summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 00:22:29 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 00:22:29 +0300
commitd635a69dd4981cc51f90293f5f64268620ed1565 (patch)
tree5e0a758b402ea7d624c25c3a343545dd29e80f31 /net
parentac73e3dc8acd0a3be292755db30388c3580f5674 (diff)
parentefd5a1584537698220578227e6467638307c2a0b (diff)
downloadlinux-d635a69dd4981cc51f90293f5f64268620ed1565.tar.xz
Merge tag 'net-next-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - support "prefer busy polling" NAPI operation mode, where we defer softirq for some time expecting applications to periodically busy poll - AF_XDP: improve efficiency by more batching and hindering the adjacency cache prefetcher - af_packet: make packet_fanout.arr size configurable up to 64K - tcp: optimize TCP zero copy receive in presence of partial or unaligned reads making zero copy a performance win for much smaller messages - XDP: add bulk APIs for returning / freeing frames - sched: support fragmenting IP packets as they come out of conntrack - net: allow virtual netdevs to forward UDP L4 and fraglist GSO skbs BPF: - BPF switch from crude rlimit-based to memcg-based memory accounting - BPF type format information for kernel modules and related tracing enhancements - BPF implement task local storage for BPF LSM - allow the FENTRY/FEXIT/RAW_TP tracing programs to use bpf_sk_storage Protocols: - mptcp: improve multiple xmit streams support, memory accounting and many smaller improvements - TLS: support CHACHA20-POLY1305 cipher - seg6: add support for SRv6 End.DT4/DT6 behavior - sctp: Implement RFC 6951: UDP Encapsulation of SCTP - ppp_generic: add ability to bridge channels directly - bridge: Connectivity Fault Management (CFM) support as is defined in IEEE 802.1Q section 12.14. Drivers: - mlx5: make use of the new auxiliary bus to organize the driver internals - mlx5: more accurate port TX timestamping support - mlxsw: - improve the efficiency of offloaded next hop updates by using the new nexthop object API - support blackhole nexthops - support IEEE 802.1ad (Q-in-Q) bridging - rtw88: major bluetooth co-existance improvements - iwlwifi: support new 6 GHz frequency band - ath11k: Fast Initial Link Setup (FILS) - mt7915: dual band concurrent (DBDC) support - net: ipa: add basic support for IPA v4.5 Refactor: - a few pieces of in_interrupt() cleanup work from Sebastian Andrzej Siewior - phy: add support for shared interrupts; get rid of multiple driver APIs and have the drivers write a full IRQ handler, slight growth of driver code should be compensated by the simpler API which also allows shared IRQs - add common code for handling netdev per-cpu counters - move TX packet re-allocation from Ethernet switch tag drivers to a central place - improve efficiency and rename nla_strlcpy - number of W=1 warning cleanups as we now catch those in a patchwork build bot Old code removal: - wan: delete the DLCI / SDLA drivers - wimax: move to staging - wifi: remove old WDS wifi bridging support" * tag 'net-next-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1922 commits) net: hns3: fix expression that is currently always true net: fix proc_fs init handling in af_packet and tls nfc: pn533: convert comma to semicolon af_vsock: Assign the vsock transport considering the vsock address flags af_vsock: Set VMADDR_FLAG_TO_HOST flag on the receive path vsock_addr: Check for supported flag values vm_sockets: Add VMADDR_FLAG_TO_HOST vsock flag vm_sockets: Add flags field in the vsock address data structure net: Disable NETIF_F_HW_TLS_TX when HW_CSUM is disabled tcp: Add logic to check for SYN w/ data in tcp_simple_retransmit net: mscc: ocelot: install MAC addresses in .ndo_set_rx_mode from process context nfc: s3fwrn5: Release the nfc firmware net: vxget: clean up sparse warnings mlxsw: spectrum_router: Use eXtended mezzanine to offload IPv4 router mlxsw: spectrum: Set KVH XLT cache mode for Spectrum2/3 mlxsw: spectrum_router_xm: Introduce basic XM cache flushing mlxsw: reg: Add Router LPM Cache Enable Register mlxsw: reg: Add Router LPM Cache ML Delete Register mlxsw: spectrum_router_xm: Implement L-value tracking for M-index mlxsw: reg: Add XM Router M Table Register ...
Diffstat (limited to 'net')
-rw-r--r--net/9p/client.c6
-rw-r--r--net/9p/trans_common.c4
-rw-r--r--net/9p/trans_fd.c4
-rw-r--r--net/9p/trans_rdma.c2
-rw-r--r--net/9p/trans_virtio.c9
-rw-r--r--net/Kconfig2
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/aarp.c18
-rw-r--r--net/appletalk/ddp.c7
-rw-r--r--net/atm/raw.c12
-rw-r--r--net/batman-adv/Kconfig27
-rw-r--r--net/batman-adv/Makefile3
-rw-r--r--net/batman-adv/bat_algo.c34
-rw-r--r--net/batman-adv/bat_algo.h5
-rw-r--r--net/batman-adv/bat_iv_ogm.c229
-rw-r--r--net/batman-adv/bat_v.c247
-rw-r--r--net/batman-adv/bat_v_elp.c1
-rw-r--r--net/batman-adv/bat_v_ogm.c1
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c130
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h16
-rw-r--r--net/batman-adv/debugfs.c442
-rw-r--r--net/batman-adv/debugfs.h73
-rw-r--r--net/batman-adv/distributed-arp-table.c55
-rw-r--r--net/batman-adv/distributed-arp-table.h2
-rw-r--r--net/batman-adv/fragmentation.c3
-rw-r--r--net/batman-adv/gateway_client.c39
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/hard-interface.c35
-rw-r--r--net/batman-adv/hard-interface.h25
-rw-r--r--net/batman-adv/icmp_socket.c392
-rw-r--r--net/batman-adv/icmp_socket.h38
-rw-r--r--net/batman-adv/log.c209
-rw-r--r--net/batman-adv/main.c46
-rw-r--r--net/batman-adv/main.h5
-rw-r--r--net/batman-adv/multicast.c111
-rw-r--r--net/batman-adv/multicast.h3
-rw-r--r--net/batman-adv/netlink.c1
-rw-r--r--net/batman-adv/network-coding.c87
-rw-r--r--net/batman-adv/network-coding.h13
-rw-r--r--net/batman-adv/originator.c121
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/routing.c10
-rw-r--r--net/batman-adv/soft-interface.c137
-rw-r--r--net/batman-adv/soft-interface.h1
-rw-r--r--net/batman-adv/sysfs.c1272
-rw-r--r--net/batman-adv/sysfs.h93
-rw-r--r--net/batman-adv/tp_meter.c1
-rw-r--r--net/batman-adv/translation-table.c212
-rw-r--r--net/batman-adv/translation-table.h3
-rw-r--r--net/batman-adv/types.h66
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/hci_core.c53
-rw-r--r--net/bluetooth/hci_debugfs.c50
-rw-r--r--net/bluetooth/hci_event.c44
-rw-r--r--net/bluetooth/hci_request.c303
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c10
-rw-r--r--net/bluetooth/mgmt.c436
-rw-r--r--net/bluetooth/mgmt_config.c187
-rw-r--r--net/bluetooth/sco.c5
-rw-r--r--net/bluetooth/smp.c44
-rw-r--r--net/bluetooth/smp.h2
-rw-r--r--net/bridge/Kconfig11
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_cfm.c867
-rw-r--r--net/bridge/br_cfm_netlink.c726
-rw-r--r--net/bridge/br_device.c39
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_input.c41
-rw-r--r--net/bridge/br_mdb.c30
-rw-r--r--net/bridge/br_mrp.c59
-rw-r--r--net/bridge/br_mrp_netlink.c2
-rw-r--r--net/bridge/br_multicast.c13
-rw-r--r--net/bridge/br_netlink.c117
-rw-r--r--net/bridge/br_private.h107
-rw-r--r--net/bridge/br_private_cfm.h147
-rw-r--r--net/bridge/br_private_mrp.h2
-rw-r--r--net/bridge/br_vlan.c31
-rw-r--r--net/bridge/netfilter/Kconfig4
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c255
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/gw.c80
-rw-r--r--net/can/isotp.c42
-rw-r--r--net/can/j1939/main.c4
-rw-r--r--net/core/bpf_sk_storage.c136
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c137
-rw-r--r--net/core/dev_ioctl.c2
-rw-r--r--net/core/devlink.c44
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c25
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/netprio_cgroup.c3
-rw-r--r--net/core/page_pool.c70
-rw-r--r--net/core/rtnetlink.c36
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/core/sock.c32
-rw-r--r--net/core/sock_map.c42
-rw-r--r--net/core/xdp.c57
-rw-r--r--net/dcb/dcbnl.c16
-rw-r--r--net/dccp/ackvec.c5
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ccids/ccid2.c5
-rw-r--r--net/dccp/ccids/ccid3.c6
-rw-r--r--net/dccp/ccids/lib/loss_interval.c3
-rw-r--r--net/dccp/ccids/lib/packet_history.c3
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/dccp/output.c9
-rw-r--r--net/dccp/qpolicy.c6
-rw-r--r--net/dccp/timer.c12
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/dsa/Kconfig11
-rw-r--r--net/dsa/Makefile4
-rw-r--r--net/dsa/dsa.c7
-rw-r--r--net/dsa/dsa_priv.h2
-rw-r--r--net/dsa/master.c7
-rw-r--r--net/dsa/slave.c96
-rw-r--r--net/dsa/tag_ar9331.c3
-rw-r--r--net/dsa/tag_brcm.c3
-rw-r--r--net/dsa/tag_dsa.c332
-rw-r--r--net/dsa/tag_edsa.c206
-rw-r--r--net/dsa/tag_gswip.c5
-rw-r--r--net/dsa/tag_hellcreek.c64
-rw-r--r--net/dsa/tag_ksz.c73
-rw-r--r--net/dsa/tag_lan9303.c9
-rw-r--r--net/dsa/tag_mtk.c3
-rw-r--r--net/dsa/tag_ocelot.c7
-rw-r--r--net/dsa/tag_qca.c3
-rw-r--r--net/dsa/tag_trailer.c31
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ieee802154/nl-mac.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/bpf_tcp_ca.c3
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/fib_semantics.c7
-rw-r--r--net/ipv4/fib_trie.c9
-rw-r--r--net/ipv4/inet_fragment.c47
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ip_tunnel_core.c9
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipconfig.c14
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/metrics.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c3
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c134
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c3
-rw-r--r--net/ipv4/nexthop.c255
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp.c603
-rw-r--r--net/ipv4/tcp_input.c48
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_lp.c7
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_recovery.c3
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/udp_diag.c2
-rw-r--r--net/ipv4/udp_offload.c5
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/calipso.c4
-rw-r--r--net/ipv6/exthdrs.c5
-rw-r--r--net/ipv6/ip6_gre.c6
-rw-r--r--net/ipv6/ip6_tunnel.c47
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c144
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c3
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/route.c9
-rw-r--r--net/ipv6/rpl.c2
-rw-r--r--net/ipv6/rpl_iptunnel.c9
-rw-r--r--net/ipv6/seg6_local.c590
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/tcp_ipv6.c9
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/iucv/af_iucv.c8
-rw-r--r--net/l3mdev/l3mdev.c1
-rw-r--r--net/lapb/lapb_iface.c82
-rw-r--r--net/lapb/lapb_timer.c11
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/agg-tx.c12
-rw-r--r--net/mac80211/cfg.c33
-rw-r--r--net/mac80211/chan.c74
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_netdev.c17
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/ieee80211_i.h24
-rw-r--r--net/mac80211/iface.c54
-rw-r--r--net/mac80211/key.c49
-rw-r--r--net/mac80211/main.c22
-rw-r--r--net/mac80211/mesh.c30
-rw-r--r--net/mac80211/mlme.c123
-rw-r--r--net/mac80211/pm.c15
-rw-r--r--net/mac80211/rx.c41
-rw-r--r--net/mac80211/trace.h23
-rw-r--r--net/mac80211/tx.c60
-rw-r--r--net/mac80211/util.c73
-rw-r--r--net/mac80211/vht.c14
-rw-r--r--net/mac80211/wme.c18
-rw-r--r--net/mac802154/main.c8
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/mptcp/ctrl.c14
-rw-r--r--net/mptcp/mptcp_diag.c2
-rw-r--r--net/mptcp/options.c218
-rw-r--r--net/mptcp/pm.c72
-rw-r--r--net/mptcp/pm_netlink.c84
-rw-r--r--net/mptcp/protocol.c1813
-rw-r--r--net/mptcp/protocol.h192
-rw-r--r--net/mptcp/subflow.c165
-rw-r--r--net/netfilter/Kconfig10
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_core.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h45
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_mac.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c11
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c31
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c19
-rw-r--r--net/netfilter/nf_tables_api.c259
-rw-r--r--net/netfilter/nfnetlink_acct.c40
-rw-r--r--net/netfilter/nfnetlink_cthelper.c4
-rw-r--r--net/netfilter/nft_ct.c2
-rw-r--r--net/netfilter/nft_dynset.c156
-rw-r--r--net/netfilter/nft_log.c2
-rw-r--r--net/netfilter/nft_reject.c12
-rw-r--r--net/netfilter/nft_reject_inet.c74
-rw-r--r--net/netfilter/nft_reject_netdev.c189
-rw-r--r--net/netfilter/nft_set_hash.c27
-rw-r--r--net/netfilter/xt_nfacct.c2
-rw-r--r--net/netlabel/netlabel_calipso.c1
-rw-r--r--net/netlabel/netlabel_mgmt.c2
-rw-r--r--net/nfc/Kconfig2
-rw-r--r--net/nfc/core.c10
-rw-r--r--net/nfc/digital_core.c3
-rw-r--r--net/nfc/nci/core.c20
-rw-r--r--net/nfc/nci/hci.c9
-rw-r--r--net/nfc/nci/ntf.c21
-rw-r--r--net/nfc/nci/rsp.c81
-rw-r--r--net/nfc/netlink.c2
-rw-r--r--net/openvswitch/actions.c15
-rw-r--r--net/openvswitch/conntrack.c14
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/meter.c2
-rw-r--r--net/openvswitch/vport-internal_dev.c29
-rw-r--r--net/openvswitch/vport.c4
-rw-r--r--net/packet/af_packet.c40
-rw-r--r--net/packet/internal.h5
-rw-r--r--net/qrtr/mhi.c6
-rw-r--r--net/qrtr/ns.c8
-rw-r--r--net/qrtr/qrtr.c49
-rw-r--r--net/rfkill/core.c41
-rw-r--r--net/rxrpc/Makefile1
-rw-r--r--net/rxrpc/ar-internal.h63
-rw-r--r--net/rxrpc/call_accept.c14
-rw-r--r--net/rxrpc/conn_client.c6
-rw-r--r--net/rxrpc/conn_event.c8
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/conn_service.c2
-rw-r--r--net/rxrpc/insecure.c19
-rw-r--r--net/rxrpc/key.c658
-rw-r--r--net/rxrpc/recvmsg.c2
-rw-r--r--net/rxrpc/rxkad.c256
-rw-r--r--net/rxrpc/security.c98
-rw-r--r--net/rxrpc/sendmsg.c45
-rw-r--r--net/rxrpc/server_key.c143
-rw-r--r--net/sched/Kconfig8
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c93
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c21
-rw-r--r--net/sched/act_simple.c4
-rw-r--r--net/sched/cls_api.c36
-rw-r--r--net/sched/cls_rsvp.h2
-rw-r--r--net/sched/cls_u32.c11
-rw-r--r--net/sched/em_cmp.c2
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sched/sch_atm.c8
-rw-r--r--net/sched/sch_cbs.c1
-rw-r--r--net/sched/sch_frag.c150
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_taprio.c1
-rw-r--r--net/sctp/Kconfig1
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/ipv6.c44
-rw-r--r--net/sctp/offload.c6
-rw-r--r--net/sctp/output.c22
-rw-r--r--net/sctp/protocol.c142
-rw-r--r--net/sctp/sm_make_chunk.c21
-rw-r--r--net/sctp/sm_statefuns.c52
-rw-r--r--net/sctp/socket.c116
-rw-r--r--net/sctp/sysctl.c62
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/smc/Makefile2
-rw-r--r--net/smc/af_smc.c100
-rw-r--r--net/smc/smc_cdc.c6
-rw-r--r--net/smc/smc_clc.c5
-rw-r--r--net/smc/smc_clc.h6
-rw-r--r--net/smc/smc_core.c399
-rw-r--r--net/smc/smc_core.h50
-rw-r--r--net/smc/smc_diag.c23
-rw-r--r--net/smc/smc_ib.c200
-rw-r--r--net/smc/smc_ib.h6
-rw-r--r--net/smc/smc_ism.c99
-rw-r--r--net/smc/smc_ism.h6
-rw-r--r--net/smc/smc_netlink.c85
-rw-r--r--net/smc/smc_netlink.h32
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/smc/smc_wr.c14
-rw-r--r--net/socket.c53
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/tipc/addr.c7
-rw-r--r--net/tipc/addr.h1
-rw-r--r--net/tipc/bearer.c27
-rw-r--r--net/tipc/bearer.h10
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/core.h15
-rw-r--r--net/tipc/crypto.c55
-rw-r--r--net/tipc/crypto.h6
-rw-r--r--net/tipc/discover.c5
-rw-r--r--net/tipc/group.c3
-rw-r--r--net/tipc/group.h3
-rw-r--r--net/tipc/link.c48
-rw-r--r--net/tipc/msg.c29
-rw-r--r--net/tipc/name_distr.c48
-rw-r--r--net/tipc/name_distr.h2
-rw-r--r--net/tipc/name_table.c57
-rw-r--r--net/tipc/name_table.h9
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/netlink_compat.c7
-rw-r--r--net/tipc/node.c60
-rw-r--r--net/tipc/socket.c221
-rw-r--r--net/tipc/socket.h2
-rw-r--r--net/tipc/subscr.c13
-rw-r--r--net/tipc/subscr.h16
-rw-r--r--net/tipc/topsrv.c6
-rw-r--r--net/tipc/trace.c2
-rw-r--r--net/tipc/udp_media.c8
-rw-r--r--net/tls/tls_device.c6
-rw-r--r--net/tls/tls_device_fallback.c13
-rw-r--r--net/tls/tls_main.c3
-rw-r--r--net/tls/tls_proc.c3
-rw-r--r--net/tls/tls_sw.c34
-rw-r--r--net/vmw_vsock/af_vsock.c24
-rw-r--r--net/vmw_vsock/vsock_addr.c4
-rw-r--r--net/wimax/Kconfig40
-rw-r--r--net/wimax/Makefile13
-rw-r--r--net/wimax/debug-levels.h29
-rw-r--r--net/wimax/debugfs.c38
-rw-r--r--net/wimax/id-table.c130
-rw-r--r--net/wimax/op-msg.c391
-rw-r--r--net/wimax/op-reset.c108
-rw-r--r--net/wimax/op-rfkill.c431
-rw-r--r--net/wimax/op-state-get.c52
-rw-r--r--net/wimax/stack.c609
-rw-r--r--net/wimax/wimax-internal.h85
-rw-r--r--net/wireless/chan.c6
-rw-r--r--net/wireless/core.c8
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/mlme.c26
-rw-r--r--net/wireless/nl80211.c324
-rw-r--r--net/wireless/nl80211.h8
-rw-r--r--net/wireless/rdev-ops.h22
-rw-r--r--net/wireless/reg.c10
-rw-r--r--net/wireless/scan.c23
-rw-r--r--net/wireless/trace.h36
-rw-r--r--net/wireless/util.c89
-rw-r--r--net/wireless/wext-compat.c154
-rw-r--r--net/x25/af_x25.c44
-rw-r--r--net/x25/x25_dev.c13
-rw-r--r--net/x25/x25_link.c52
-rw-r--r--net/x25/x25_route.c10
-rw-r--r--net/xdp/xsk.c114
-rw-r--r--net/xdp/xsk.h2
-rw-r--r--net/xdp/xsk_buff_pool.c13
-rw-r--r--net/xdp/xsk_queue.h93
-rw-r--r--net/xdp/xskmap.c35
-rw-r--r--net/xfrm/xfrm_input.c7
-rw-r--r--net/xfrm/xfrm_interface.c19
-rw-r--r--net/xfrm/xfrm_user.c74
405 files changed, 12807 insertions, 10696 deletions
diff --git a/net/9p/client.c b/net/9p/client.c
index 09f1ec589b80..785a7bb6a539 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -412,8 +412,9 @@ static void p9_tag_cleanup(struct p9_client *c)
/**
* p9_client_cb - call back from transport to client
- * c: client state
- * req: request received
+ * @c: client state
+ * @req: request received
+ * @status: request status, one of REQ_STATUS_*
*
*/
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
@@ -555,6 +556,7 @@ out_err:
* p9_check_zc_errors - check 9p packet for error return and process it
* @c: current client instance
* @req: request to parse and check for error conditions
+ * @uidata: external buffer containing error
* @in_hdrlen: Size of response protocol buffer.
*
* returns error code if one is discovered, otherwise returns 0
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 3dff68f05fb9..6ea5ea548cd4 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -17,7 +17,9 @@
#include "trans_common.h"
/**
- * p9_release_pages - Release pages after the transaction.
+ * p9_release_pages - Release pages after the transaction.
+ * @pages: array of pages to be put
+ * @nr_pages: size of array
*/
void p9_release_pages(struct page **pages, int nr_pages)
{
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 8f528e783a6c..fa158397bb63 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -45,7 +45,7 @@ static struct p9_trans_module p9_fd_trans;
* @rfd: file descriptor for reading (trans=fd)
* @wfd: file descriptor for writing (trans=fd)
* @port: port to connect to (trans=tcp)
- *
+ * @privport: port is privileged
*/
struct p9_fd_opts {
@@ -95,6 +95,8 @@ struct p9_poll_wait {
* @err: error state
* @req_list: accounting for requests which have been sent
* @unsent_req_list: accounting for requests that haven't been sent
+ * @rreq: read request
+ * @wreq: write request
* @req: current request being processed (if any)
* @tmp_buf: temporary buffer to read in header
* @rc: temporary fcall for reading current frame
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 2885ff9c76f0..af0a8a6cd3fd 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -99,6 +99,7 @@ struct p9_rdma_req;
/**
* struct p9_rdma_context - Keeps track of in-process WR
*
+ * @cqe: completion queue entry
* @busa: Bus address to unmap when the WR completes
* @req: Keeps track of requests (send)
* @rc: Keepts track of replies (receive)
@@ -115,6 +116,7 @@ struct p9_rdma_context {
/**
* struct p9_rdma_opts - Collection of mount options
* @port: port of connection
+ * @privport: Whether a privileged port may be used
* @sq_depth: The requested depth of the SQ. This really doesn't need
* to be any deeper than the number of threads used in the client
* @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index a3cd90a74012..93f2f8654882 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -50,7 +50,11 @@ static atomic_t vp_pinned = ATOMIC_INIT(0);
* @client: client instance
* @vdev: virtio dev associated with this channel
* @vq: virtio queue associated with this channel
+ * @ring_bufs_avail: flag to indicate there is some available in the ring buf
+ * @vc_wq: wait queue for waiting for thing to be added to ring buf
+ * @p9_max_pages: maximum number of pinned pages
* @sg: scatter gather list which is used to pack a request (protected?)
+ * @chan_list: linked list of channels
*
* We keep all per-channel information in a structure.
* This structure is allocated within the devices dev->mem space.
@@ -74,8 +78,8 @@ struct virtio_chan {
unsigned long p9_max_pages;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[VIRTQUEUE_NUM];
- /*
- * tag name to identify a mount null terminated
+ /**
+ * @tag: name to identify a mount null terminated
*/
char *tag;
@@ -204,6 +208,7 @@ static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
+ * @limit: maximum number of pages in sg list.
* @pdata: a list of pages to add into sg.
* @nr_pages: number of pages to pack into the scatter/gather list
* @offs: amount of data in the beginning of first page _not_ to pack
diff --git a/net/Kconfig b/net/Kconfig
index d6567162c1cf..f4c32d982af6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -386,8 +386,6 @@ source "net/mac80211/Kconfig"
endif # WIRELESS
-source "net/wimax/Kconfig"
-
source "net/rfkill/Kconfig"
source "net/9p/Kconfig"
source "net/caif/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index 5744bf1997fd..d96b0aa8f39f 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -66,7 +66,6 @@ obj-$(CONFIG_MAC802154) += mac802154/
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
endif
-obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
obj-$(CONFIG_CEPH_LIB) += ceph/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 45f584171de7..be18af481d7d 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -44,15 +44,15 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
/* Lists of aarp entries */
/**
* struct aarp_entry - AARP entry
- * @last_sent - Last time we xmitted the aarp request
- * @packet_queue - Queue of frames wait for resolution
- * @status - Used for proxy AARP
- * expires_at - Entry expiry time
- * target_addr - DDP Address
- * dev - Device to use
- * hwaddr - Physical i/f address of target/router
- * xmit_count - When this hits 10 we give up
- * next - Next entry in chain
+ * @last_sent: Last time we xmitted the aarp request
+ * @packet_queue: Queue of frames wait for resolution
+ * @status: Used for proxy AARP
+ * @expires_at: Entry expiry time
+ * @target_addr: DDP Address
+ * @dev: Device to use
+ * @hwaddr: Physical i/f address of target/router
+ * @xmit_count: When this hits 10 we give up
+ * @next: Next entry in chain
*/
struct aarp_entry {
/* These first two are only used for unresolved entries */
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 1d48708c5a2e..ca1a0d07a087 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1407,9 +1407,10 @@ drop:
/**
* atalk_rcv - Receive a packet (in skb) from device dev
- * @skb - packet received
- * @dev - network device where the packet comes from
- * @pt - packet type
+ * @skb: packet received
+ * @dev: network device where the packet comes from
+ * @pt: packet type
+ * @orig_dev: the original receive net device
*
* Receive a packet (in skb) from device dev. This has come from the SNAP
* decoder, and on entry skb->transport_header is the DDP header, skb->len
diff --git a/net/atm/raw.c b/net/atm/raw.c
index b3ba44aab0ee..2b5f78a7ec3e 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -54,6 +54,8 @@ static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
kfree_skb(skb);
return -EADDRNOTAVAIL;
}
+ if (vcc->dev->ops->send_bh)
+ return vcc->dev->ops->send_bh(vcc, skb);
return vcc->dev->ops->send(vcc, skb);
}
@@ -71,7 +73,10 @@ int atm_init_aal34(struct atm_vcc *vcc)
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
- vcc->send = vcc->dev->ops->send;
+ if (vcc->dev->ops->send_bh)
+ vcc->send = vcc->dev->ops->send_bh;
+ else
+ vcc->send = vcc->dev->ops->send;
return 0;
}
@@ -80,7 +85,10 @@ int atm_init_aal5(struct atm_vcc *vcc)
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
- vcc->send = vcc->dev->ops->send;
+ if (vcc->dev->ops->send_bh)
+ vcc->send = vcc->dev->ops->send_bh;
+ else
+ vcc->send = vcc->dev->ops->send;
return 0;
}
EXPORT_SYMBOL(atm_init_aal5);
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index c762758a4649..993afd5ff7bb 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -76,37 +76,14 @@ config BATMAN_ADV_MCAST
reduce the air overhead while improving the reliability of
multicast messages.
-config BATMAN_ADV_DEBUGFS
- bool "batman-adv debugfs entries"
- depends on BATMAN_ADV
- depends on DEBUG_FS
- help
- Enable this to export routing related debug tables via debugfs.
- The information for each soft-interface and used hard-interface can be
- found under batman_adv/
-
- If unsure, say N.
-
config BATMAN_ADV_DEBUG
bool "B.A.T.M.A.N. debugging"
depends on BATMAN_ADV
help
This is an option for use by developers; most people should
say N here. This enables compilation of support for
- outputting debugging information to the debugfs log or tracing
- buffer. The output is controlled via the batadv netdev specific
- log_level setting.
-
-config BATMAN_ADV_SYSFS
- bool "batman-adv sysfs entries"
- depends on BATMAN_ADV
- help
- Say Y here if you want to enable batman-adv device configuration and
- status interface through sysfs attributes. It is replaced by the
- batadv generic netlink family but still used by various userspace
- tools and scripts.
-
- If unsure, say Y.
+ outputting debugging information to the tracing buffer. The output is
+ controlled via the batadv netdev specific log_level setting.
config BATMAN_ADV_TRACING
bool "B.A.T.M.A.N. tracing support"
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index daa49af7ff40..8010c34b987c 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -11,14 +11,12 @@ batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_elp.o
batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_ogm.o
batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
-batman-adv-$(CONFIG_BATMAN_ADV_DEBUGFS) += debugfs.o
batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
batman-adv-y += fragmentation.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
batman-adv-y += hash.o
-batman-adv-$(CONFIG_BATMAN_ADV_DEBUGFS) += icmp_socket.o
batman-adv-$(CONFIG_BATMAN_ADV_DEBUG) += log.o
batman-adv-y += main.o
batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
@@ -28,7 +26,6 @@ batman-adv-y += originator.o
batman-adv-y += routing.o
batman-adv-y += send.o
batman-adv-y += soft-interface.o
-batman-adv-$(CONFIG_BATMAN_ADV_SYSFS) += sysfs.o
batman-adv-$(CONFIG_BATMAN_ADV_TRACING) += trace.o
batman-adv-y += tp_meter.o
batman-adv-y += translation-table.o
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index 382fbe51fd34..c5f404f6892f 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -11,7 +11,6 @@
#include <linux/moduleparam.h>
#include <linux/netlink.h>
#include <linux/printk.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/string.h>
@@ -34,7 +33,13 @@ void batadv_algo_init(void)
INIT_HLIST_HEAD(&batadv_algo_list);
}
-static struct batadv_algo_ops *batadv_algo_get(char *name)
+/**
+ * batadv_algo_get() - Search for algorithm with specific name
+ * @name: algorithm name to find
+ *
+ * Return: Pointer to batadv_algo_ops on success, NULL otherwise
+ */
+struct batadv_algo_ops *batadv_algo_get(const char *name)
{
struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
@@ -97,7 +102,7 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
*
* Return: 0 on success or negative error number in case of failure
*/
-int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
+int batadv_algo_select(struct batadv_priv *bat_priv, const char *name)
{
struct batadv_algo_ops *bat_algo_ops;
@@ -110,29 +115,6 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
return 0;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-
-/**
- * batadv_algo_seq_print_text() - Print the supported algorithms in a seq file
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct batadv_algo_ops *bat_algo_ops;
-
- seq_puts(seq, "Available routing algorithms:\n");
-
- hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
- seq_printf(seq, " * %s\n", bat_algo_ops->name);
- }
-
- return 0;
-}
-#endif
-
static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
{
struct batadv_algo_ops *bat_algo_ops;
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 686a60bc9492..43b045ac8ac7 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -10,7 +10,6 @@
#include "main.h"
#include <linux/netlink.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/types.h>
@@ -18,9 +17,9 @@ extern char batadv_routing_algo[];
extern struct list_head batadv_hardif_list;
void batadv_algo_init(void);
+struct batadv_algo_ops *batadv_algo_get(const char *name);
int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
-int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
-int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_algo_select(struct batadv_priv *bat_priv, const char *name);
int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb);
#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 206d0b424712..168621c9a081 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -32,7 +32,6 @@
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -1780,106 +1779,6 @@ free_skb:
return ret;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_iv_ogm_orig_print_neigh() - print neighbors for the originator table
- * @orig_node: the orig_node for which the neighbors are printed
- * @if_outgoing: outgoing interface for these entries
- * @seq: debugfs table seq_file struct
- *
- * Must be called while holding an rcu lock.
- */
-static void
-batadv_iv_ogm_orig_print_neigh(struct batadv_orig_node *orig_node,
- struct batadv_hard_iface *if_outgoing,
- struct seq_file *seq)
-{
- struct batadv_neigh_node *neigh_node;
- struct batadv_neigh_ifinfo *n_ifinfo;
-
- hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
- n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
- if (!n_ifinfo)
- continue;
-
- seq_printf(seq, " %pM (%3i)",
- neigh_node->addr,
- n_ifinfo->bat_iv.tq_avg);
-
- batadv_neigh_ifinfo_put(n_ifinfo);
- }
-}
-
-/**
- * batadv_iv_ogm_orig_print() - print the originator table
- * @bat_priv: the bat priv with all the soft interface information
- * @seq: debugfs table seq_file struct
- * @if_outgoing: the outgoing interface for which this should be printed
- */
-static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
- struct seq_file *seq,
- struct batadv_hard_iface *if_outgoing)
-{
- struct batadv_neigh_node *neigh_node;
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- int last_seen_msecs, last_seen_secs;
- struct batadv_orig_node *orig_node;
- struct batadv_neigh_ifinfo *n_ifinfo;
- unsigned long last_seen_jiffies;
- struct hlist_head *head;
- int batman_count = 0;
- u32 i;
-
- seq_puts(seq,
- " Originator last-seen (#/255) Nexthop [outgoingIF]: Potential nexthops ...\n");
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- neigh_node = batadv_orig_router_get(orig_node,
- if_outgoing);
- if (!neigh_node)
- continue;
-
- n_ifinfo = batadv_neigh_ifinfo_get(neigh_node,
- if_outgoing);
- if (!n_ifinfo)
- goto next;
-
- if (n_ifinfo->bat_iv.tq_avg == 0)
- goto next;
-
- last_seen_jiffies = jiffies - orig_node->last_seen;
- last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
- last_seen_secs = last_seen_msecs / 1000;
- last_seen_msecs = last_seen_msecs % 1000;
-
- seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
- orig_node->orig, last_seen_secs,
- last_seen_msecs, n_ifinfo->bat_iv.tq_avg,
- neigh_node->addr,
- neigh_node->if_incoming->net_dev->name);
-
- batadv_iv_ogm_orig_print_neigh(orig_node, if_outgoing,
- seq);
- seq_putc(seq, '\n');
- batman_count++;
-
-next:
- batadv_neigh_node_put(neigh_node);
- if (n_ifinfo)
- batadv_neigh_ifinfo_put(n_ifinfo);
- }
- rcu_read_unlock();
- }
-
- if (batman_count == 0)
- seq_puts(seq, "No batman nodes in range ...\n");
-}
-#endif
-
/**
* batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
* given outgoing interface.
@@ -2109,59 +2008,6 @@ batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
cb->args[2] = sub;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_iv_hardif_neigh_print() - print a single hop neighbour node
- * @seq: neighbour table seq_file struct
- * @hardif_neigh: hardif neighbour information
- */
-static void
-batadv_iv_hardif_neigh_print(struct seq_file *seq,
- struct batadv_hardif_neigh_node *hardif_neigh)
-{
- int last_secs, last_msecs;
-
- last_secs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) / 1000;
- last_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) % 1000;
-
- seq_printf(seq, " %10s %pM %4i.%03is\n",
- hardif_neigh->if_incoming->net_dev->name,
- hardif_neigh->addr, last_secs, last_msecs);
-}
-
-/**
- * batadv_iv_ogm_neigh_print() - print the single hop neighbour list
- * @bat_priv: the bat priv with all the soft interface information
- * @seq: neighbour table seq_file struct
- */
-static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
- struct seq_file *seq)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_hardif_neigh_node *hardif_neigh;
- struct batadv_hard_iface *hard_iface;
- int batman_count = 0;
-
- seq_puts(seq, " IF Neighbor last-seen\n");
-
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->soft_iface != net_dev)
- continue;
-
- hlist_for_each_entry_rcu(hardif_neigh,
- &hard_iface->neigh_list, list) {
- batadv_iv_hardif_neigh_print(seq, hardif_neigh);
- batman_count++;
- }
- }
- rcu_read_unlock();
-
- if (batman_count == 0)
- seq_puts(seq, "No batman nodes in range ...\n");
-}
-#endif
-
/**
* batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
* @neigh1: the first neighbor object of the comparison
@@ -2557,72 +2403,6 @@ out:
return ret;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/* fails if orig_node has no router */
-static int batadv_iv_gw_write_buffer_text(struct batadv_priv *bat_priv,
- struct seq_file *seq,
- const struct batadv_gw_node *gw_node)
-{
- struct batadv_gw_node *curr_gw;
- struct batadv_neigh_node *router;
- struct batadv_neigh_ifinfo *router_ifinfo = NULL;
- int ret = -1;
-
- router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
- if (!router)
- goto out;
-
- router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
- if (!router_ifinfo)
- goto out;
-
- curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
-
- seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
- (curr_gw == gw_node ? "=>" : " "),
- gw_node->orig_node->orig,
- router_ifinfo->bat_iv.tq_avg, router->addr,
- router->if_incoming->net_dev->name,
- gw_node->bandwidth_down / 10,
- gw_node->bandwidth_down % 10,
- gw_node->bandwidth_up / 10,
- gw_node->bandwidth_up % 10);
- ret = seq_has_overflowed(seq) ? -1 : 0;
-
- if (curr_gw)
- batadv_gw_node_put(curr_gw);
-out:
- if (router_ifinfo)
- batadv_neigh_ifinfo_put(router_ifinfo);
- if (router)
- batadv_neigh_node_put(router);
- return ret;
-}
-
-static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
- struct seq_file *seq)
-{
- struct batadv_gw_node *gw_node;
- int gw_count = 0;
-
- seq_puts(seq,
- " Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth\n");
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
- /* fails if orig_node has no router */
- if (batadv_iv_gw_write_buffer_text(bat_priv, seq, gw_node) < 0)
- continue;
-
- gw_count++;
- }
- rcu_read_unlock();
-
- if (gw_count == 0)
- seq_puts(seq, "No gateways in range ...\n");
-}
-#endif
-
/**
* batadv_iv_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
@@ -2747,24 +2527,15 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.neigh = {
.cmp = batadv_iv_ogm_neigh_cmp,
.is_similar_or_better = batadv_iv_ogm_neigh_is_sob,
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- .print = batadv_iv_neigh_print,
-#endif
.dump = batadv_iv_ogm_neigh_dump,
},
.orig = {
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- .print = batadv_iv_ogm_orig_print,
-#endif
.dump = batadv_iv_ogm_orig_dump,
},
.gw = {
.init_sel_class = batadv_iv_init_sel_class,
.get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible,
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- .print = batadv_iv_gw_print,
-#endif
.dump = batadv_iv_gw_dump,
},
};
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 0ecaf1bb0068..e4455babe4c2 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -13,14 +13,13 @@
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jiffies.h>
-#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
@@ -119,92 +118,6 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
batadv_v_elp_throughput_metric_update);
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_v_orig_print_neigh() - print neighbors for the originator table
- * @orig_node: the orig_node for which the neighbors are printed
- * @if_outgoing: outgoing interface for these entries
- * @seq: debugfs table seq_file struct
- *
- * Must be called while holding an rcu lock.
- */
-static void
-batadv_v_orig_print_neigh(struct batadv_orig_node *orig_node,
- struct batadv_hard_iface *if_outgoing,
- struct seq_file *seq)
-{
- struct batadv_neigh_node *neigh_node;
- struct batadv_neigh_ifinfo *n_ifinfo;
-
- hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
- n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
- if (!n_ifinfo)
- continue;
-
- seq_printf(seq, " %pM (%9u.%1u)",
- neigh_node->addr,
- n_ifinfo->bat_v.throughput / 10,
- n_ifinfo->bat_v.throughput % 10);
-
- batadv_neigh_ifinfo_put(n_ifinfo);
- }
-}
-
-/**
- * batadv_v_hardif_neigh_print() - print a single ELP neighbour node
- * @seq: neighbour table seq_file struct
- * @hardif_neigh: hardif neighbour information
- */
-static void
-batadv_v_hardif_neigh_print(struct seq_file *seq,
- struct batadv_hardif_neigh_node *hardif_neigh)
-{
- int last_secs, last_msecs;
- u32 throughput;
-
- last_secs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) / 1000;
- last_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) % 1000;
- throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
-
- seq_printf(seq, "%pM %4i.%03is (%9u.%1u) [%10s]\n",
- hardif_neigh->addr, last_secs, last_msecs, throughput / 10,
- throughput % 10, hardif_neigh->if_incoming->net_dev->name);
-}
-
-/**
- * batadv_v_neigh_print() - print the single hop neighbour list
- * @bat_priv: the bat priv with all the soft interface information
- * @seq: neighbour table seq_file struct
- */
-static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
- struct seq_file *seq)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_hardif_neigh_node *hardif_neigh;
- struct batadv_hard_iface *hard_iface;
- int batman_count = 0;
-
- seq_puts(seq,
- " Neighbor last-seen ( throughput) [ IF]\n");
-
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->soft_iface != net_dev)
- continue;
-
- hlist_for_each_entry_rcu(hardif_neigh,
- &hard_iface->neigh_list, list) {
- batadv_v_hardif_neigh_print(seq, hardif_neigh);
- batman_count++;
- }
- }
- rcu_read_unlock();
-
- if (batman_count == 0)
- seq_puts(seq, "No batman nodes in range ...\n");
-}
-#endif
-
/**
* batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
* @msg: Netlink message to dump into
@@ -337,75 +250,6 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
cb->args[1] = idx;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_v_orig_print() - print the originator table
- * @bat_priv: the bat priv with all the soft interface information
- * @seq: debugfs table seq_file struct
- * @if_outgoing: the outgoing interface for which this should be printed
- */
-static void batadv_v_orig_print(struct batadv_priv *bat_priv,
- struct seq_file *seq,
- struct batadv_hard_iface *if_outgoing)
-{
- struct batadv_neigh_node *neigh_node;
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- int last_seen_msecs, last_seen_secs;
- struct batadv_orig_node *orig_node;
- struct batadv_neigh_ifinfo *n_ifinfo;
- unsigned long last_seen_jiffies;
- struct hlist_head *head;
- int batman_count = 0;
- u32 i;
-
- seq_puts(seq,
- " Originator last-seen ( throughput) Nexthop [outgoingIF]: Potential nexthops ...\n");
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- neigh_node = batadv_orig_router_get(orig_node,
- if_outgoing);
- if (!neigh_node)
- continue;
-
- n_ifinfo = batadv_neigh_ifinfo_get(neigh_node,
- if_outgoing);
- if (!n_ifinfo)
- goto next;
-
- last_seen_jiffies = jiffies - orig_node->last_seen;
- last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
- last_seen_secs = last_seen_msecs / 1000;
- last_seen_msecs = last_seen_msecs % 1000;
-
- seq_printf(seq, "%pM %4i.%03is (%9u.%1u) %pM [%10s]:",
- orig_node->orig, last_seen_secs,
- last_seen_msecs,
- n_ifinfo->bat_v.throughput / 10,
- n_ifinfo->bat_v.throughput % 10,
- neigh_node->addr,
- neigh_node->if_incoming->net_dev->name);
-
- batadv_v_orig_print_neigh(orig_node, if_outgoing, seq);
- seq_putc(seq, '\n');
- batman_count++;
-
-next:
- batadv_neigh_node_put(neigh_node);
- if (n_ifinfo)
- batadv_neigh_ifinfo_put(n_ifinfo);
- }
- rcu_read_unlock();
- }
-
- if (batman_count == 0)
- seq_puts(seq, "No batman nodes in range ...\n");
-}
-#endif
-
/**
* batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
* @msg: Netlink message to dump into
@@ -685,13 +529,6 @@ static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
return count;
}
-static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
-{
- u32 class = atomic_read(&bat_priv->gw.sel_class);
-
- return sprintf(buff, "%u.%u MBit\n", class / 10, class % 10);
-}
-
/**
* batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
* @gw_node: the GW to retrieve the metric for
@@ -829,78 +666,6 @@ out:
return ret;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/* fails if orig_node has no router */
-static int batadv_v_gw_write_buffer_text(struct batadv_priv *bat_priv,
- struct seq_file *seq,
- const struct batadv_gw_node *gw_node)
-{
- struct batadv_gw_node *curr_gw;
- struct batadv_neigh_node *router;
- struct batadv_neigh_ifinfo *router_ifinfo = NULL;
- int ret = -1;
-
- router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
- if (!router)
- goto out;
-
- router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
- if (!router_ifinfo)
- goto out;
-
- curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
-
- seq_printf(seq, "%s %pM (%9u.%1u) %pM [%10s]: %u.%u/%u.%u MBit\n",
- (curr_gw == gw_node ? "=>" : " "),
- gw_node->orig_node->orig,
- router_ifinfo->bat_v.throughput / 10,
- router_ifinfo->bat_v.throughput % 10, router->addr,
- router->if_incoming->net_dev->name,
- gw_node->bandwidth_down / 10,
- gw_node->bandwidth_down % 10,
- gw_node->bandwidth_up / 10,
- gw_node->bandwidth_up % 10);
- ret = seq_has_overflowed(seq) ? -1 : 0;
-
- if (curr_gw)
- batadv_gw_node_put(curr_gw);
-out:
- if (router_ifinfo)
- batadv_neigh_ifinfo_put(router_ifinfo);
- if (router)
- batadv_neigh_node_put(router);
- return ret;
-}
-
-/**
- * batadv_v_gw_print() - print the gateway list
- * @bat_priv: the bat priv with all the soft interface information
- * @seq: gateway table seq_file struct
- */
-static void batadv_v_gw_print(struct batadv_priv *bat_priv,
- struct seq_file *seq)
-{
- struct batadv_gw_node *gw_node;
- int gw_count = 0;
-
- seq_puts(seq,
- " Gateway ( throughput) Nexthop [outgoingIF]: advertised uplink bandwidth\n");
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
- /* fails if orig_node has no router */
- if (batadv_v_gw_write_buffer_text(bat_priv, seq, gw_node) < 0)
- continue;
-
- gw_count++;
- }
- rcu_read_unlock();
-
- if (gw_count == 0)
- seq_puts(seq, "No gateways in range ...\n");
-}
-#endif
-
/**
* batadv_v_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
@@ -1046,26 +811,16 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.hardif_init = batadv_v_hardif_neigh_init,
.cmp = batadv_v_neigh_cmp,
.is_similar_or_better = batadv_v_neigh_is_sob,
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- .print = batadv_v_neigh_print,
-#endif
.dump = batadv_v_neigh_dump,
},
.orig = {
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- .print = batadv_v_orig_print,
-#endif
.dump = batadv_v_orig_dump,
},
.gw = {
.init_sel_class = batadv_v_init_sel_class,
.store_sel_class = batadv_v_store_sel_class,
- .show_sel_class = batadv_v_show_sel_class,
.get_best_gw_node = batadv_v_gw_get_best_gw_node,
.is_eligible = batadv_v_gw_is_eligible,
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- .print = batadv_v_gw_print,
-#endif
.dump = batadv_v_gw_dump,
},
};
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 79a7dfc32e76..0512ea6cd818 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -18,6 +18,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
#include <linux/prandom.h>
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 8c1148fc73d7..798d659855d0 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -18,6 +18,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
+#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/prandom.h>
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ba0027d1f2df..d2de12e527ba 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -28,7 +28,6 @@
#include <linux/preempt.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -2115,69 +2114,6 @@ out:
return ret;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
- struct batadv_bla_backbone_gw *backbone_gw;
- struct batadv_bla_claim *claim;
- struct batadv_hard_iface *primary_if;
- struct hlist_head *head;
- u16 backbone_crc;
- u32 i;
- bool is_own;
- u8 *primary_addr;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- goto out;
-
- primary_addr = primary_if->net_dev->dev_addr;
- seq_printf(seq,
- "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
- net_dev->name, primary_addr,
- ntohs(bat_priv->bla.claim_dest.group));
- seq_puts(seq,
- " Client VID Originator [o] (CRC )\n");
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(claim, head, hash_entry) {
- backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
-
- is_own = batadv_compare_eth(backbone_gw->orig,
- primary_addr);
-
- spin_lock_bh(&backbone_gw->crc_lock);
- backbone_crc = backbone_gw->crc;
- spin_unlock_bh(&backbone_gw->crc_lock);
- seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
- claim->addr, batadv_print_vid(claim->vid),
- backbone_gw->orig,
- (is_own ? 'x' : ' '),
- backbone_crc);
-
- batadv_backbone_gw_put(backbone_gw);
- }
- rcu_read_unlock();
- }
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
- return 0;
-}
-#endif
-
/**
* batadv_bla_claim_dump_entry() - dump one entry of the claim table
* to a netlink socket
@@ -2348,72 +2284,6 @@ out:
return ret;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
- * seq file
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
- struct batadv_bla_backbone_gw *backbone_gw;
- struct batadv_hard_iface *primary_if;
- struct hlist_head *head;
- int secs, msecs;
- u16 backbone_crc;
- u32 i;
- bool is_own;
- u8 *primary_addr;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- goto out;
-
- primary_addr = primary_if->net_dev->dev_addr;
- seq_printf(seq,
- "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
- net_dev->name, primary_addr,
- ntohs(bat_priv->bla.claim_dest.group));
- seq_puts(seq, " Originator VID last seen (CRC )\n");
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
- msecs = jiffies_to_msecs(jiffies -
- backbone_gw->lasttime);
- secs = msecs / 1000;
- msecs = msecs % 1000;
-
- is_own = batadv_compare_eth(backbone_gw->orig,
- primary_addr);
- if (is_own)
- continue;
-
- spin_lock_bh(&backbone_gw->crc_lock);
- backbone_crc = backbone_gw->crc;
- spin_unlock_bh(&backbone_gw->crc_lock);
-
- seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
- backbone_gw->orig,
- batadv_print_vid(backbone_gw->vid), secs,
- msecs, backbone_crc);
- }
- rcu_read_unlock();
- }
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
- return 0;
-}
-#endif
-
/**
* batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
* netlink socket
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index a81c41b636f9..7dc6d3571925 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -12,7 +12,6 @@
#include <linux/compiler.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -41,10 +40,7 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
int hdr_size);
-int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb);
-int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
- void *offset);
int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb);
bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
unsigned short vid);
@@ -84,18 +80,6 @@ static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
return false;
}
-static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
- void *offset)
-{
- return 0;
-}
-
-static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
- void *offset)
-{
- return 0;
-}
-
static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
u8 *orig, unsigned short vid)
{
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
deleted file mode 100644
index 452856c27d20..000000000000
--- a/net/batman-adv/debugfs.c
+++ /dev/null
@@ -1,442 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2010-2020 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- */
-
-#include "debugfs.h"
-#include "main.h"
-
-#include <asm/current.h>
-#include <linux/dcache.h>
-#include <linux/debugfs.h>
-#include <linux/errno.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/netdevice.h>
-#include <linux/printk.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/stat.h>
-#include <linux/stddef.h>
-#include <linux/stringify.h>
-#include <linux/sysfs.h>
-#include <net/net_namespace.h>
-
-#include "bat_algo.h"
-#include "bridge_loop_avoidance.h"
-#include "distributed-arp-table.h"
-#include "gateway_client.h"
-#include "icmp_socket.h"
-#include "log.h"
-#include "multicast.h"
-#include "network-coding.h"
-#include "originator.h"
-#include "translation-table.h"
-
-static struct dentry *batadv_debugfs;
-
-/**
- * batadv_debugfs_deprecated() - Log use of deprecated batadv debugfs access
- * @file: file which was accessed
- * @alt: explanation what can be used as alternative
- */
-void batadv_debugfs_deprecated(struct file *file, const char *alt)
-{
- struct dentry *dentry = file_dentry(file);
- const char *name = dentry->d_name.name;
-
- pr_warn_ratelimited(DEPRECATED "%s (pid %d) Use of debugfs file \"%s\".\n%s",
- current->comm, task_pid_nr(current), name, alt);
-}
-
-static int batadv_algorithms_open(struct inode *inode, struct file *file)
-{
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_ROUTING_ALGOS instead\n");
- return single_open(file, batadv_algo_seq_print_text, NULL);
-}
-
-static int neighbors_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_NEIGHBORS instead\n");
- return single_open(file, batadv_hardif_neigh_seq_print_text, net_dev);
-}
-
-static int batadv_originators_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_ORIGINATORS instead\n");
- return single_open(file, batadv_orig_seq_print_text, net_dev);
-}
-
-/**
- * batadv_originators_hardif_open() - handles debugfs output for the originator
- * table of an hard interface
- * @inode: inode pointer to debugfs file
- * @file: pointer to the seq_file
- *
- * Return: 0 on success or negative error number in case of failure
- */
-static int batadv_originators_hardif_open(struct inode *inode,
- struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_HARDIFS instead\n");
- return single_open(file, batadv_orig_hardif_seq_print_text, net_dev);
-}
-
-static int batadv_gateways_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_GATEWAYS instead\n");
- return single_open(file, batadv_gw_client_seq_print_text, net_dev);
-}
-
-static int batadv_transtable_global_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_TRANSTABLE_GLOBAL instead\n");
- return single_open(file, batadv_tt_global_seq_print_text, net_dev);
-}
-
-#ifdef CONFIG_BATMAN_ADV_BLA
-static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_BLA_CLAIM instead\n");
- return single_open(file, batadv_bla_claim_table_seq_print_text,
- net_dev);
-}
-
-static int batadv_bla_backbone_table_open(struct inode *inode,
- struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_BLA_BACKBONE instead\n");
- return single_open(file, batadv_bla_backbone_table_seq_print_text,
- net_dev);
-}
-
-#endif
-
-#ifdef CONFIG_BATMAN_ADV_DAT
-/**
- * batadv_dat_cache_open() - Prepare file handler for reads from dat_cache
- * @inode: inode which was opened
- * @file: file handle to be initialized
- *
- * Return: 0 on success or negative error number in case of failure
- */
-static int batadv_dat_cache_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_DAT_CACHE instead\n");
- return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
-}
-#endif
-
-static int batadv_transtable_local_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_TRANSTABLE_LOCAL instead\n");
- return single_open(file, batadv_tt_local_seq_print_text, net_dev);
-}
-
-struct batadv_debuginfo {
- struct attribute attr;
- const struct file_operations fops;
-};
-
-#ifdef CONFIG_BATMAN_ADV_NC
-static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file, "");
- return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
-}
-#endif
-
-#ifdef CONFIG_BATMAN_ADV_MCAST
-/**
- * batadv_mcast_flags_open() - prepare file handler for reads from mcast_flags
- * @inode: inode which was opened
- * @file: file handle to be initialized
- *
- * Return: 0 on success or negative error number in case of failure
- */
-static int batadv_mcast_flags_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
-
- batadv_debugfs_deprecated(file,
- "Use genl command BATADV_CMD_GET_MCAST_FLAGS instead\n");
- return single_open(file, batadv_mcast_flags_seq_print_text, net_dev);
-}
-#endif
-
-#define BATADV_DEBUGINFO(_name, _mode, _open) \
-struct batadv_debuginfo batadv_debuginfo_##_name = { \
- .attr = { \
- .name = __stringify(_name), \
- .mode = _mode, \
- }, \
- .fops = { \
- .owner = THIS_MODULE, \
- .open = _open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- }, \
-}
-
-/* the following attributes are general and therefore they will be directly
- * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
- */
-static BATADV_DEBUGINFO(routing_algos, 0444, batadv_algorithms_open);
-
-static struct batadv_debuginfo *batadv_general_debuginfos[] = {
- &batadv_debuginfo_routing_algos,
- NULL,
-};
-
-/* The following attributes are per soft interface */
-static BATADV_DEBUGINFO(neighbors, 0444, neighbors_open);
-static BATADV_DEBUGINFO(originators, 0444, batadv_originators_open);
-static BATADV_DEBUGINFO(gateways, 0444, batadv_gateways_open);
-static BATADV_DEBUGINFO(transtable_global, 0444, batadv_transtable_global_open);
-#ifdef CONFIG_BATMAN_ADV_BLA
-static BATADV_DEBUGINFO(bla_claim_table, 0444, batadv_bla_claim_table_open);
-static BATADV_DEBUGINFO(bla_backbone_table, 0444,
- batadv_bla_backbone_table_open);
-#endif
-#ifdef CONFIG_BATMAN_ADV_DAT
-static BATADV_DEBUGINFO(dat_cache, 0444, batadv_dat_cache_open);
-#endif
-static BATADV_DEBUGINFO(transtable_local, 0444, batadv_transtable_local_open);
-#ifdef CONFIG_BATMAN_ADV_NC
-static BATADV_DEBUGINFO(nc_nodes, 0444, batadv_nc_nodes_open);
-#endif
-#ifdef CONFIG_BATMAN_ADV_MCAST
-static BATADV_DEBUGINFO(mcast_flags, 0444, batadv_mcast_flags_open);
-#endif
-
-static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
- &batadv_debuginfo_neighbors,
- &batadv_debuginfo_originators,
- &batadv_debuginfo_gateways,
- &batadv_debuginfo_transtable_global,
-#ifdef CONFIG_BATMAN_ADV_BLA
- &batadv_debuginfo_bla_claim_table,
- &batadv_debuginfo_bla_backbone_table,
-#endif
-#ifdef CONFIG_BATMAN_ADV_DAT
- &batadv_debuginfo_dat_cache,
-#endif
- &batadv_debuginfo_transtable_local,
-#ifdef CONFIG_BATMAN_ADV_NC
- &batadv_debuginfo_nc_nodes,
-#endif
-#ifdef CONFIG_BATMAN_ADV_MCAST
- &batadv_debuginfo_mcast_flags,
-#endif
- NULL,
-};
-
-#define BATADV_HARDIF_DEBUGINFO(_name, _mode, _open) \
-struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
- .attr = { \
- .name = __stringify(_name), \
- .mode = _mode, \
- }, \
- .fops = { \
- .owner = THIS_MODULE, \
- .open = _open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- }, \
-}
-
-static BATADV_HARDIF_DEBUGINFO(originators, 0444,
- batadv_originators_hardif_open);
-
-static struct batadv_debuginfo *batadv_hardif_debuginfos[] = {
- &batadv_hardif_debuginfo_originators,
- NULL,
-};
-
-/**
- * batadv_debugfs_init() - Initialize soft interface independent debugfs entries
- */
-void batadv_debugfs_init(void)
-{
- struct batadv_debuginfo **bat_debug;
-
- batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL);
-
- for (bat_debug = batadv_general_debuginfos; *bat_debug; ++bat_debug)
- debugfs_create_file(((*bat_debug)->attr).name,
- S_IFREG | ((*bat_debug)->attr).mode,
- batadv_debugfs, NULL, &(*bat_debug)->fops);
-}
-
-/**
- * batadv_debugfs_destroy() - Remove all debugfs entries
- */
-void batadv_debugfs_destroy(void)
-{
- debugfs_remove_recursive(batadv_debugfs);
- batadv_debugfs = NULL;
-}
-
-/**
- * batadv_debugfs_add_hardif() - creates the base directory for a hard interface
- * in debugfs.
- * @hard_iface: hard interface which should be added.
- */
-void batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
-{
- struct net *net = dev_net(hard_iface->net_dev);
- struct batadv_debuginfo **bat_debug;
-
- if (net != &init_net)
- return;
-
- hard_iface->debug_dir = debugfs_create_dir(hard_iface->net_dev->name,
- batadv_debugfs);
-
- for (bat_debug = batadv_hardif_debuginfos; *bat_debug; ++bat_debug)
- debugfs_create_file(((*bat_debug)->attr).name,
- S_IFREG | ((*bat_debug)->attr).mode,
- hard_iface->debug_dir, hard_iface->net_dev,
- &(*bat_debug)->fops);
-}
-
-/**
- * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
- * @hard_iface: hard interface which was renamed
- */
-void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
-{
- const char *name = hard_iface->net_dev->name;
- struct dentry *dir;
-
- dir = hard_iface->debug_dir;
- if (!dir)
- return;
-
- debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
-}
-
-/**
- * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
- * in debugfs.
- * @hard_iface: hard interface which is deleted.
- */
-void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
-{
- struct net *net = dev_net(hard_iface->net_dev);
-
- if (net != &init_net)
- return;
-
- if (batadv_debugfs) {
- debugfs_remove_recursive(hard_iface->debug_dir);
- hard_iface->debug_dir = NULL;
- }
-}
-
-/**
- * batadv_debugfs_add_meshif() - Initialize interface dependent debugfs entries
- * @dev: netdev struct of the soft interface
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_debugfs_add_meshif(struct net_device *dev)
-{
- struct batadv_priv *bat_priv = netdev_priv(dev);
- struct batadv_debuginfo **bat_debug;
- struct net *net = dev_net(dev);
-
- if (net != &init_net)
- return 0;
-
- bat_priv->debug_dir = debugfs_create_dir(dev->name, batadv_debugfs);
-
- batadv_socket_setup(bat_priv);
-
- if (batadv_debug_log_setup(bat_priv) < 0)
- goto rem_attr;
-
- for (bat_debug = batadv_mesh_debuginfos; *bat_debug; ++bat_debug)
- debugfs_create_file(((*bat_debug)->attr).name,
- S_IFREG | ((*bat_debug)->attr).mode,
- bat_priv->debug_dir, dev,
- &(*bat_debug)->fops);
-
- batadv_nc_init_debugfs(bat_priv);
-
- return 0;
-rem_attr:
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
- return -ENOMEM;
-}
-
-/**
- * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
- * @dev: net_device which was renamed
- */
-void batadv_debugfs_rename_meshif(struct net_device *dev)
-{
- struct batadv_priv *bat_priv = netdev_priv(dev);
- const char *name = dev->name;
- struct dentry *dir;
-
- dir = bat_priv->debug_dir;
- if (!dir)
- return;
-
- debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
-}
-
-/**
- * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
- * @dev: netdev struct of the soft interface
- */
-void batadv_debugfs_del_meshif(struct net_device *dev)
-{
- struct batadv_priv *bat_priv = netdev_priv(dev);
- struct net *net = dev_net(dev);
-
- if (net != &init_net)
- return;
-
- batadv_debug_log_cleanup(bat_priv);
-
- if (batadv_debugfs) {
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
- }
-}
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
deleted file mode 100644
index 7e2e8f586f42..000000000000
--- a/net/batman-adv/debugfs.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2010-2020 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- */
-
-#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
-#define _NET_BATMAN_ADV_DEBUGFS_H_
-
-#include "main.h"
-
-#include <linux/fs.h>
-#include <linux/netdevice.h>
-
-#define BATADV_DEBUGFS_SUBDIR "batman_adv"
-
-#if IS_ENABLED(CONFIG_BATMAN_ADV_DEBUGFS)
-
-void batadv_debugfs_deprecated(struct file *file, const char *alt);
-void batadv_debugfs_init(void);
-void batadv_debugfs_destroy(void);
-int batadv_debugfs_add_meshif(struct net_device *dev);
-void batadv_debugfs_rename_meshif(struct net_device *dev);
-void batadv_debugfs_del_meshif(struct net_device *dev);
-void batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
-void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
-void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
-
-#else
-
-static inline void batadv_debugfs_deprecated(struct file *file, const char *alt)
-{
-}
-
-static inline void batadv_debugfs_init(void)
-{
-}
-
-static inline void batadv_debugfs_destroy(void)
-{
-}
-
-static inline int batadv_debugfs_add_meshif(struct net_device *dev)
-{
- return 0;
-}
-
-static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
-{
-}
-
-static inline void batadv_debugfs_del_meshif(struct net_device *dev)
-{
-}
-
-static inline
-void batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
-{
-}
-
-static inline
-void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
-{
-}
-
-static inline
-void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
-{
-}
-
-#endif
-
-#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 0e6e53e9b5f3..fd7ba6bbdf85 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -26,7 +26,6 @@
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -842,60 +841,6 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
batadv_dat_hash_free(bat_priv);
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_dat_cache_seq_print_text() - print the local DAT hash table
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->dat.hash;
- struct batadv_dat_entry *dat_entry;
- struct batadv_hard_iface *primary_if;
- struct hlist_head *head;
- unsigned long last_seen_jiffies;
- int last_seen_msecs, last_seen_secs, last_seen_mins;
- u32 i;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- goto out;
-
- seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
- seq_puts(seq,
- " IPv4 MAC VID last-seen\n");
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
- last_seen_jiffies = jiffies - dat_entry->last_update;
- last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
- last_seen_mins = last_seen_msecs / 60000;
- last_seen_msecs = last_seen_msecs % 60000;
- last_seen_secs = last_seen_msecs / 1000;
-
- seq_printf(seq, " * %15pI4 %pM %4i %6i:%02i\n",
- &dat_entry->ip, dat_entry->mac_addr,
- batadv_print_vid(dat_entry->vid),
- last_seen_mins, last_seen_secs);
- }
- rcu_read_unlock();
- }
-
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
- return 0;
-}
-#endif
-
/**
* batadv_dat_cache_dump_entry() - dump one entry of the DAT cache table to a
* netlink socket
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 4e031661682a..e980fb45693a 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -12,7 +12,6 @@
#include <linux/compiler.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
@@ -74,7 +73,6 @@ batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
int batadv_dat_init(struct batadv_priv *bat_priv);
void batadv_dat_free(struct batadv_priv *bat_priv);
-int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb);
/**
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 1f1f5b0873b2..e522f1fcfd9a 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -14,8 +14,8 @@
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
-#include <linux/kernel.h>
#include <linux/lockdep.h>
+#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -27,7 +27,6 @@
#include "originator.h"
#include "routing.h"
#include "send.h"
-#include "soft-interface.h"
/**
* batadv_frag_clear_chain() - delete entries in the fragment buffer chain
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index ef3f85b576c4..cffe72f4edd7 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -25,7 +25,6 @@
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -511,44 +510,6 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
spin_unlock_bh(&bat_priv->gw.list_lock);
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-
-/**
- * batadv_gw_client_seq_print_text() - Print the gateway table in a seq file
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hard_iface *primary_if;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- return 0;
-
- seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
- BATADV_SOURCE_VERSION, primary_if->net_dev->name,
- primary_if->net_dev->dev_addr, net_dev->name,
- bat_priv->algo_ops->name);
-
- batadv_hardif_put(primary_if);
-
- if (!bat_priv->algo_ops->gw.print) {
- seq_puts(seq,
- "No printing function for this routing protocol\n");
- return 0;
- }
-
- bat_priv->algo_ops->gw.print(bat_priv, seq);
-
- return 0;
-}
-#endif
-
/**
* batadv_gw_dump() - Dump gateways into a message
* @msg: Netlink message to dump into
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 88b5dba84354..2fbc500f0ac1 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -10,7 +10,6 @@
#include "main.h"
#include <linux/netlink.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
@@ -31,7 +30,6 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv);
void batadv_gw_node_put(struct batadv_gw_node *gw_node);
struct batadv_gw_node *
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv);
-int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb);
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
enum batadv_dhcp_recipient
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 33904595fc56..0f186ddc15e3 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -18,6 +18,7 @@
#include <linux/kref.h>
#include <linux/limits.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
@@ -31,14 +32,12 @@
#include "bat_v.h"
#include "bridge_loop_avoidance.h"
-#include "debugfs.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
#include "log.h"
#include "originator.h"
#include "send.h"
#include "soft-interface.h"
-#include "sysfs.h"
#include "translation-table.h"
/**
@@ -846,11 +845,8 @@ static size_t batadv_hardif_cnt(const struct net_device *soft_iface)
/**
* batadv_hardif_disable_interface() - Remove hard interface from soft interface
* @hard_iface: hard interface to be removed
- * @autodel: whether to delete soft interface when it doesn't contain any other
- * slave interfaces
*/
-void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
- enum batadv_hard_if_cleanup autodel)
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hard_iface *primary_if = NULL;
@@ -888,13 +884,9 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
/* nobody uses this interface anymore */
- if (batadv_hardif_cnt(hard_iface->soft_iface) <= 1) {
+ if (batadv_hardif_cnt(hard_iface->soft_iface) <= 1)
batadv_gw_check_client_stop(bat_priv);
- if (autodel == BATADV_IF_CLEANUP_AUTO)
- batadv_softif_destroy_sysfs(hard_iface->soft_iface);
- }
-
hard_iface->soft_iface = NULL;
batadv_hardif_put(hard_iface);
@@ -907,7 +899,6 @@ static struct batadv_hard_iface *
batadv_hardif_add_interface(struct net_device *net_dev)
{
struct batadv_hard_iface *hard_iface;
- int ret;
ASSERT_RTNL();
@@ -920,16 +911,10 @@ batadv_hardif_add_interface(struct net_device *net_dev)
if (!hard_iface)
goto release_dev;
- ret = batadv_sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
- if (ret)
- goto free_if;
-
hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
- batadv_debugfs_add_hardif(hard_iface);
-
INIT_LIST_HEAD(&hard_iface->list);
INIT_HLIST_HEAD(&hard_iface->neigh_list);
@@ -953,8 +938,6 @@ batadv_hardif_add_interface(struct net_device *net_dev)
return hard_iface;
-free_if:
- kfree(hard_iface);
release_dev:
dev_put(net_dev);
out:
@@ -967,15 +950,12 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
/* first deactivate interface */
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
- batadv_hardif_disable_interface(hard_iface,
- BATADV_IF_CLEANUP_KEEP);
+ batadv_hardif_disable_interface(hard_iface);
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
return;
hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
- batadv_debugfs_del_hardif(hard_iface);
- batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
batadv_hardif_put(hard_iface);
}
@@ -993,13 +973,9 @@ static int batadv_hard_if_event_softif(unsigned long event,
switch (event) {
case NETDEV_REGISTER:
- batadv_sysfs_add_meshif(net_dev);
bat_priv = netdev_priv(net_dev);
batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
break;
- case NETDEV_CHANGENAME:
- batadv_debugfs_rename_meshif(net_dev);
- break;
}
return NOTIFY_DONE;
@@ -1064,9 +1040,6 @@ static int batadv_hard_if_event(struct notifier_block *this,
if (batadv_is_wifi_hardif(hard_iface))
hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
break;
- case NETDEV_CHANGENAME:
- batadv_debugfs_rename_hardif(hard_iface);
- break;
default:
break;
}
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index b1855d9d0b06..f4b8e9efef19 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -42,12 +42,6 @@ enum batadv_hard_if_state {
/** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */
BATADV_IF_TO_BE_ACTIVATED,
-
- /**
- * @BATADV_IF_I_WANT_YOU: interface is queued up (using sysfs) for being
- * added as slave interface of a batman-adv soft interface
- */
- BATADV_IF_I_WANT_YOU,
};
/**
@@ -73,22 +67,6 @@ enum batadv_hard_if_bcast {
BATADV_HARDIF_BCAST_DUPORIG,
};
-/**
- * enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
- */
-enum batadv_hard_if_cleanup {
- /**
- * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
- */
- BATADV_IF_CLEANUP_KEEP,
-
- /**
- * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was
- * removed
- */
- BATADV_IF_CLEANUP_AUTO,
-};
-
extern struct notifier_block batadv_hard_if_notifier;
struct net_device *batadv_get_real_netdev(struct net_device *net_device);
@@ -98,8 +76,7 @@ struct batadv_hard_iface*
batadv_hardif_get_by_netdev(const struct net_device *net_dev);
int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
struct net *net, const char *iface_name);
-void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
- enum batadv_hard_if_cleanup autodel);
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface);
int batadv_hardif_min_mtu(struct net_device *soft_iface);
void batadv_update_min_mtu(struct net_device *soft_iface);
void batadv_hardif_release(struct kref *ref);
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
deleted file mode 100644
index 8bdabc03b0b2..000000000000
--- a/net/batman-adv/icmp_socket.c
+++ /dev/null
@@ -1,392 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- */
-
-#include "icmp_socket.h"
-#include "main.h"
-
-#include <linux/atomic.h>
-#include <linux/compiler.h>
-#include <linux/debugfs.h>
-#include <linux/errno.h>
-#include <linux/etherdevice.h>
-#include <linux/eventpoll.h>
-#include <linux/export.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/gfp.h>
-#include <linux/if_ether.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pkt_sched.h>
-#include <linux/poll.h>
-#include <linux/printk.h>
-#include <linux/sched.h> /* for linux/wait.h */
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/wait.h>
-#include <uapi/linux/batadv_packet.h>
-
-#include "debugfs.h"
-#include "hard-interface.h"
-#include "log.h"
-#include "originator.h"
-#include "send.h"
-
-static struct batadv_socket_client *batadv_socket_client_hash[256];
-
-static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
- struct batadv_icmp_header *icmph,
- size_t icmp_len);
-
-/**
- * batadv_socket_init() - Initialize soft interface independent socket data
- */
-void batadv_socket_init(void)
-{
- memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
-}
-
-static int batadv_socket_open(struct inode *inode, struct file *file)
-{
- unsigned int i;
- struct batadv_socket_client *socket_client;
-
- if (!try_module_get(THIS_MODULE))
- return -EBUSY;
-
- batadv_debugfs_deprecated(file, "");
-
- stream_open(inode, file);
-
- socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL);
- if (!socket_client) {
- module_put(THIS_MODULE);
- return -ENOMEM;
- }
-
- for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) {
- if (!batadv_socket_client_hash[i]) {
- batadv_socket_client_hash[i] = socket_client;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(batadv_socket_client_hash)) {
- pr_err("Error - can't add another packet client: maximum number of clients reached\n");
- kfree(socket_client);
- module_put(THIS_MODULE);
- return -EXFULL;
- }
-
- INIT_LIST_HEAD(&socket_client->queue_list);
- socket_client->queue_len = 0;
- socket_client->index = i;
- socket_client->bat_priv = inode->i_private;
- spin_lock_init(&socket_client->lock);
- init_waitqueue_head(&socket_client->queue_wait);
-
- file->private_data = socket_client;
-
- return 0;
-}
-
-static int batadv_socket_release(struct inode *inode, struct file *file)
-{
- struct batadv_socket_client *client = file->private_data;
- struct batadv_socket_packet *packet, *tmp;
-
- spin_lock_bh(&client->lock);
-
- /* for all packets in the queue ... */
- list_for_each_entry_safe(packet, tmp, &client->queue_list, list) {
- list_del(&packet->list);
- kfree(packet);
- }
-
- batadv_socket_client_hash[client->index] = NULL;
- spin_unlock_bh(&client->lock);
-
- kfree(client);
- module_put(THIS_MODULE);
-
- return 0;
-}
-
-static ssize_t batadv_socket_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct batadv_socket_client *socket_client = file->private_data;
- struct batadv_socket_packet *socket_packet;
- size_t packet_len;
- int error;
-
- if ((file->f_flags & O_NONBLOCK) && socket_client->queue_len == 0)
- return -EAGAIN;
-
- if (!buf || count < sizeof(struct batadv_icmp_packet))
- return -EINVAL;
-
- error = wait_event_interruptible(socket_client->queue_wait,
- socket_client->queue_len);
-
- if (error)
- return error;
-
- spin_lock_bh(&socket_client->lock);
-
- socket_packet = list_first_entry(&socket_client->queue_list,
- struct batadv_socket_packet, list);
- list_del(&socket_packet->list);
- socket_client->queue_len--;
-
- spin_unlock_bh(&socket_client->lock);
-
- packet_len = min(count, socket_packet->icmp_len);
- error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
-
- kfree(socket_packet);
-
- if (error)
- return -EFAULT;
-
- return packet_len;
-}
-
-static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
- size_t len, loff_t *off)
-{
- struct batadv_socket_client *socket_client = file->private_data;
- struct batadv_priv *bat_priv = socket_client->bat_priv;
- struct batadv_hard_iface *primary_if = NULL;
- struct sk_buff *skb;
- struct batadv_icmp_packet_rr *icmp_packet_rr;
- struct batadv_icmp_header *icmp_header;
- struct batadv_orig_node *orig_node = NULL;
- struct batadv_neigh_node *neigh_node = NULL;
- size_t packet_len = sizeof(struct batadv_icmp_packet);
- u8 *addr;
-
- if (len < sizeof(struct batadv_icmp_header)) {
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: invalid packet size\n");
- return -EINVAL;
- }
-
- primary_if = batadv_primary_if_get_selected(bat_priv);
-
- if (!primary_if) {
- len = -EFAULT;
- goto out;
- }
-
- if (len >= BATADV_ICMP_MAX_PACKET_SIZE)
- packet_len = BATADV_ICMP_MAX_PACKET_SIZE;
- else
- packet_len = len;
-
- skb = netdev_alloc_skb_ip_align(NULL, packet_len + ETH_HLEN);
- if (!skb) {
- len = -ENOMEM;
- goto out;
- }
-
- skb->priority = TC_PRIO_CONTROL;
- skb_reserve(skb, ETH_HLEN);
- icmp_header = skb_put(skb, packet_len);
-
- if (copy_from_user(icmp_header, buff, packet_len)) {
- len = -EFAULT;
- goto free_skb;
- }
-
- if (icmp_header->packet_type != BATADV_ICMP) {
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
- len = -EINVAL;
- goto free_skb;
- }
-
- switch (icmp_header->msg_type) {
- case BATADV_ECHO_REQUEST:
- if (len < sizeof(struct batadv_icmp_packet)) {
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: invalid packet size\n");
- len = -EINVAL;
- goto free_skb;
- }
-
- if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
- goto dst_unreach;
-
- orig_node = batadv_orig_hash_find(bat_priv, icmp_header->dst);
- if (!orig_node)
- goto dst_unreach;
-
- neigh_node = batadv_orig_router_get(orig_node,
- BATADV_IF_DEFAULT);
- if (!neigh_node)
- goto dst_unreach;
-
- if (!neigh_node->if_incoming)
- goto dst_unreach;
-
- if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
- goto dst_unreach;
-
- icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmp_header;
- if (packet_len == sizeof(*icmp_packet_rr)) {
- addr = neigh_node->if_incoming->net_dev->dev_addr;
- ether_addr_copy(icmp_packet_rr->rr[0], addr);
- }
-
- break;
- default:
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: got unknown message type\n");
- len = -EINVAL;
- goto free_skb;
- }
-
- icmp_header->uid = socket_client->index;
-
- if (icmp_header->version != BATADV_COMPAT_VERSION) {
- icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
- icmp_header->version = BATADV_COMPAT_VERSION;
- batadv_socket_add_packet(socket_client, icmp_header,
- packet_len);
- goto free_skb;
- }
-
- ether_addr_copy(icmp_header->orig, primary_if->net_dev->dev_addr);
-
- batadv_send_unicast_skb(skb, neigh_node);
- goto out;
-
-dst_unreach:
- icmp_header->msg_type = BATADV_DESTINATION_UNREACHABLE;
- batadv_socket_add_packet(socket_client, icmp_header, packet_len);
-free_skb:
- kfree_skb(skb);
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
- if (neigh_node)
- batadv_neigh_node_put(neigh_node);
- if (orig_node)
- batadv_orig_node_put(orig_node);
- return len;
-}
-
-static __poll_t batadv_socket_poll(struct file *file, poll_table *wait)
-{
- struct batadv_socket_client *socket_client = file->private_data;
-
- poll_wait(file, &socket_client->queue_wait, wait);
-
- if (socket_client->queue_len > 0)
- return EPOLLIN | EPOLLRDNORM;
-
- return 0;
-}
-
-static const struct file_operations batadv_fops = {
- .owner = THIS_MODULE,
- .open = batadv_socket_open,
- .release = batadv_socket_release,
- .read = batadv_socket_read,
- .write = batadv_socket_write,
- .poll = batadv_socket_poll,
- .llseek = no_llseek,
-};
-
-/**
- * batadv_socket_setup() - Create debugfs "socket" file
- * @bat_priv: the bat priv with all the soft interface information
- */
-void batadv_socket_setup(struct batadv_priv *bat_priv)
-{
- debugfs_create_file(BATADV_ICMP_SOCKET, 0600, bat_priv->debug_dir,
- bat_priv, &batadv_fops);
-}
-
-/**
- * batadv_socket_add_packet() - schedule an icmp packet to be sent to
- * userspace on an icmp socket.
- * @socket_client: the socket this packet belongs to
- * @icmph: pointer to the header of the icmp packet
- * @icmp_len: total length of the icmp packet
- */
-static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
- struct batadv_icmp_header *icmph,
- size_t icmp_len)
-{
- struct batadv_socket_packet *socket_packet;
- size_t len;
-
- socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
-
- if (!socket_packet)
- return;
-
- len = icmp_len;
- /* check the maximum length before filling the buffer */
- if (len > sizeof(socket_packet->icmp_packet))
- len = sizeof(socket_packet->icmp_packet);
-
- INIT_LIST_HEAD(&socket_packet->list);
- memcpy(&socket_packet->icmp_packet, icmph, len);
- socket_packet->icmp_len = len;
-
- spin_lock_bh(&socket_client->lock);
-
- /* while waiting for the lock the socket_client could have been
- * deleted
- */
- if (!batadv_socket_client_hash[icmph->uid]) {
- spin_unlock_bh(&socket_client->lock);
- kfree(socket_packet);
- return;
- }
-
- list_add_tail(&socket_packet->list, &socket_client->queue_list);
- socket_client->queue_len++;
-
- if (socket_client->queue_len > 100) {
- socket_packet = list_first_entry(&socket_client->queue_list,
- struct batadv_socket_packet,
- list);
-
- list_del(&socket_packet->list);
- kfree(socket_packet);
- socket_client->queue_len--;
- }
-
- spin_unlock_bh(&socket_client->lock);
-
- wake_up(&socket_client->queue_wait);
-}
-
-/**
- * batadv_socket_receive_packet() - schedule an icmp packet to be received
- * locally and sent to userspace.
- * @icmph: pointer to the header of the icmp packet
- * @icmp_len: total length of the icmp packet
- */
-void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
- size_t icmp_len)
-{
- struct batadv_socket_client *hash;
-
- hash = batadv_socket_client_hash[icmph->uid];
- if (hash)
- batadv_socket_add_packet(hash, icmph, icmp_len);
-}
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
deleted file mode 100644
index 6abd0f4742ef..000000000000
--- a/net/batman-adv/icmp_socket.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- */
-
-#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
-#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
-
-#include "main.h"
-
-#include <linux/types.h>
-#include <uapi/linux/batadv_packet.h>
-
-#define BATADV_ICMP_SOCKET "socket"
-
-void batadv_socket_setup(struct batadv_priv *bat_priv);
-
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-
-void batadv_socket_init(void);
-void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
- size_t icmp_len);
-
-#else
-
-static inline void batadv_socket_init(void)
-{
-}
-
-static inline void
-batadv_socket_receive_packet(struct batadv_icmp_header *icmph, size_t icmp_len)
-{
-}
-
-#endif
-
-#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index c0ca5fbe5b08..b7e9923b11a2 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -7,214 +7,10 @@
#include "log.h"
#include "main.h"
-#include <linux/compiler.h>
-#include <linux/debugfs.h>
-#include <linux/errno.h>
-#include <linux/eventpoll.h>
-#include <linux/export.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/gfp.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/sched.h> /* for linux/wait.h */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/stddef.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/wait.h>
#include <stdarg.h>
-#include "debugfs.h"
#include "trace.h"
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-
-#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
-
-static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
-
-static char *batadv_log_char_addr(struct batadv_priv_debug_log *debug_log,
- size_t idx)
-{
- return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
-}
-
-static void batadv_emit_log_char(struct batadv_priv_debug_log *debug_log,
- char c)
-{
- char *char_addr;
-
- char_addr = batadv_log_char_addr(debug_log, debug_log->log_end);
- *char_addr = c;
- debug_log->log_end++;
-
- if (debug_log->log_end - debug_log->log_start > batadv_log_buff_len)
- debug_log->log_start = debug_log->log_end - batadv_log_buff_len;
-}
-
-__printf(2, 3)
-static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
- const char *fmt, ...)
-{
- va_list args;
- static char debug_log_buf[256];
- char *p;
-
- if (!debug_log)
- return 0;
-
- spin_lock_bh(&debug_log->lock);
- va_start(args, fmt);
- vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
- va_end(args);
-
- for (p = debug_log_buf; *p != 0; p++)
- batadv_emit_log_char(debug_log, *p);
-
- spin_unlock_bh(&debug_log->lock);
-
- wake_up(&debug_log->queue_wait);
-
- return 0;
-}
-
-static int batadv_log_open(struct inode *inode, struct file *file)
-{
- if (!try_module_get(THIS_MODULE))
- return -EBUSY;
-
- batadv_debugfs_deprecated(file,
- "Use tracepoint batadv:batadv_dbg instead\n");
-
- stream_open(inode, file);
- file->private_data = inode->i_private;
- return 0;
-}
-
-static int batadv_log_release(struct inode *inode, struct file *file)
-{
- module_put(THIS_MODULE);
- return 0;
-}
-
-static bool batadv_log_empty(struct batadv_priv_debug_log *debug_log)
-{
- return !(debug_log->log_start - debug_log->log_end);
-}
-
-static ssize_t batadv_log_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct batadv_priv *bat_priv = file->private_data;
- struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
- int error, i = 0;
- char *char_addr;
- char c;
-
- if ((file->f_flags & O_NONBLOCK) && batadv_log_empty(debug_log))
- return -EAGAIN;
-
- if (!buf)
- return -EINVAL;
-
- if (count == 0)
- return 0;
-
- if (!access_ok(buf, count))
- return -EFAULT;
-
- error = wait_event_interruptible(debug_log->queue_wait,
- (!batadv_log_empty(debug_log)));
-
- if (error)
- return error;
-
- spin_lock_bh(&debug_log->lock);
-
- while ((!error) && (i < count) &&
- (debug_log->log_start != debug_log->log_end)) {
- char_addr = batadv_log_char_addr(debug_log,
- debug_log->log_start);
- c = *char_addr;
-
- debug_log->log_start++;
-
- spin_unlock_bh(&debug_log->lock);
-
- error = __put_user(c, buf);
-
- spin_lock_bh(&debug_log->lock);
-
- buf++;
- i++;
- }
-
- spin_unlock_bh(&debug_log->lock);
-
- if (!error)
- return i;
-
- return error;
-}
-
-static __poll_t batadv_log_poll(struct file *file, poll_table *wait)
-{
- struct batadv_priv *bat_priv = file->private_data;
- struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
-
- poll_wait(file, &debug_log->queue_wait, wait);
-
- if (!batadv_log_empty(debug_log))
- return EPOLLIN | EPOLLRDNORM;
-
- return 0;
-}
-
-static const struct file_operations batadv_log_fops = {
- .open = batadv_log_open,
- .release = batadv_log_release,
- .read = batadv_log_read,
- .poll = batadv_log_poll,
- .llseek = no_llseek,
- .owner = THIS_MODULE,
-};
-
-/**
- * batadv_debug_log_setup() - Initialize debug log
- * @bat_priv: the bat priv with all the soft interface information
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_debug_log_setup(struct batadv_priv *bat_priv)
-{
- bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
- if (!bat_priv->debug_log)
- return -ENOMEM;
-
- spin_lock_init(&bat_priv->debug_log->lock);
- init_waitqueue_head(&bat_priv->debug_log->queue_wait);
-
- debugfs_create_file("log", 0400, bat_priv->debug_dir, bat_priv,
- &batadv_log_fops);
- return 0;
-}
-
-/**
- * batadv_debug_log_cleanup() - Destroy debug log
- * @bat_priv: the bat priv with all the soft interface information
- */
-void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
-{
- kfree(bat_priv->debug_log);
- bat_priv->debug_log = NULL;
-}
-
-#endif /* CONFIG_BATMAN_ADV_DEBUGFS */
-
/**
* batadv_debug_log() - Add debug log entry
* @bat_priv: the bat priv with all the soft interface information
@@ -232,11 +28,6 @@ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- batadv_fdebug_log(bat_priv->debug_log, "[%10u] %pV",
- jiffies_to_msecs(jiffies), &vaf);
-#endif
-
trace_batadv_dbg(bat_priv, &vaf);
va_end(args);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 70fee9b42e25..ed9d87ce3407 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -23,12 +23,12 @@
#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -44,12 +44,10 @@
#include "bat_iv_ogm.h"
#include "bat_v.h"
#include "bridge_loop_avoidance.h"
-#include "debugfs.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
#include "gateway_common.h"
#include "hard-interface.h"
-#include "icmp_socket.h"
#include "log.h"
#include "multicast.h"
#include "netlink.h"
@@ -113,9 +111,6 @@ static int __init batadv_init(void)
if (!batadv_event_workqueue)
goto err_create_wq;
- batadv_socket_init();
- batadv_debugfs_init();
-
register_netdevice_notifier(&batadv_hard_if_notifier);
rtnl_link_register(&batadv_link_ops);
batadv_netlink_register();
@@ -133,7 +128,6 @@ err_create_wq:
static void __exit batadv_exit(void)
{
- batadv_debugfs_destroy();
batadv_netlink_unregister();
rtnl_link_unregister(&batadv_link_ops);
unregister_netdevice_notifier(&batadv_hard_if_notifier);
@@ -305,44 +299,6 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
return is_my_mac;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_seq_print_text_primary_if_get() - called from debugfs table printing
- * function that requires the primary interface
- * @seq: debugfs table seq_file struct
- *
- * Return: primary interface if found or NULL otherwise.
- */
-struct batadv_hard_iface *
-batadv_seq_print_text_primary_if_get(struct seq_file *seq)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hard_iface *primary_if;
-
- primary_if = batadv_primary_if_get_selected(bat_priv);
-
- if (!primary_if) {
- seq_printf(seq,
- "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
- net_dev->name);
- goto out;
- }
-
- if (primary_if->if_status == BATADV_IF_ACTIVE)
- goto out;
-
- seq_printf(seq,
- "BATMAN mesh %s disabled - primary interface not active\n",
- net_dev->name);
- batadv_hardif_put(primary_if);
- primary_if = NULL;
-
-out:
- return primary_if;
-}
-#endif
-
/**
* batadv_max_header_len() - calculate maximum encapsulation overhead for a
* payload packet
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index a47dc332d796..288201630ceb 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2020.4"
+#define BATADV_SOURCE_VERSION "2021.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -212,7 +212,6 @@ enum batadv_uev_type {
#include <linux/jiffies.h>
#include <linux/netdevice.h>
#include <linux/percpu.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
@@ -243,8 +242,6 @@ extern struct workqueue_struct *batadv_event_workqueue;
int batadv_mesh_init(struct net_device *soft_iface);
void batadv_mesh_free(struct net_device *soft_iface);
bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr);
-struct batadv_hard_iface *
-batadv_seq_print_text_primary_if_get(struct seq_file *seq);
int batadv_max_header_len(void);
void batadv_skb_set_priority(struct sk_buff *skb, int offset);
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 9af99c39b9fd..854e5ff28a3f 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -33,7 +33,6 @@
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -2074,116 +2073,6 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
batadv_mcast_start_timer(bat_priv);
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
- * @bat_priv: the bat priv with all the soft interface information
- * @seq: debugfs table seq_file struct
- *
- * Prints our own multicast flags including a more specific reason why
- * they are set, that is prints the bridge and querier state too, to
- * the debugfs table specified via @seq.
- */
-static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
- struct seq_file *seq)
-{
- struct batadv_mcast_mla_flags *mla_flags = &bat_priv->mcast.mla_flags;
- char querier4, querier6, shadowing4, shadowing6;
- bool bridged = mla_flags->bridged;
- u8 flags = mla_flags->tvlv_flags;
-
- if (bridged) {
- querier4 = mla_flags->querier_ipv4.exists ? '.' : '4';
- querier6 = mla_flags->querier_ipv6.exists ? '.' : '6';
- shadowing4 = mla_flags->querier_ipv4.shadowing ? '4' : '.';
- shadowing6 = mla_flags->querier_ipv6.shadowing ? '6' : '.';
- } else {
- querier4 = '?';
- querier6 = '?';
- shadowing4 = '?';
- shadowing6 = '?';
- }
-
- seq_printf(seq, "Multicast flags (own flags: [%c%c%c%s%s])\n",
- (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
- (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
- (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
- !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
- !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
- seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.');
- seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n",
- querier4, querier6);
- seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n",
- shadowing4, shadowing6);
- seq_puts(seq, "-------------------------------------------\n");
- seq_printf(seq, " %-10s %s\n", "Originator", "Flags");
-}
-
-/**
- * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
- * @seq: seq file to print on
- * @offset: not used
- *
- * This prints a table of (primary) originators and their according
- * multicast flags, including (in the header) our own.
- *
- * Return: always 0
- */
-int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hard_iface *primary_if;
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct batadv_orig_node *orig_node;
- struct hlist_head *head;
- u8 flags;
- u32 i;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- return 0;
-
- batadv_mcast_flags_print_header(bat_priv, seq);
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
- &orig_node->capa_initialized))
- continue;
-
- if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
- &orig_node->capabilities)) {
- seq_printf(seq, "%pM -\n", orig_node->orig);
- continue;
- }
-
- flags = orig_node->mcast_flags;
-
- seq_printf(seq, "%pM [%c%c%c%s%s]\n", orig_node->orig,
- (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)
- ? 'U' : '.',
- (flags & BATADV_MCAST_WANT_ALL_IPV4)
- ? '4' : '.',
- (flags & BATADV_MCAST_WANT_ALL_IPV6)
- ? '6' : '.',
- !(flags & BATADV_MCAST_WANT_NO_RTR4)
- ? "R4" : ". ",
- !(flags & BATADV_MCAST_WANT_NO_RTR6)
- ? "R6" : ". ");
- }
- rcu_read_unlock();
- }
-
- batadv_hardif_put(primary_if);
-
- return 0;
-}
-#endif
-
/**
* batadv_mcast_mesh_info_put() - put multicast info into a netlink message
* @msg: buffer for the message
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 3e114bc5ca3b..d61593d02072 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -10,7 +10,6 @@
#include "main.h"
#include <linux/netlink.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
/**
@@ -56,8 +55,6 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
void batadv_mcast_init(struct batadv_priv *bat_priv);
-int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset);
-
int batadv_mcast_mesh_info_put(struct sk_buff *msg,
struct batadv_priv *bat_priv);
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index c7a55647b520..97bcf149633d 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/printk.h>
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 61ddd6d709a0..0cec108b7a99 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -11,7 +11,6 @@
#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
-#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
@@ -30,7 +29,6 @@
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -39,7 +37,6 @@
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
-#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "originator.h"
@@ -1876,87 +1873,3 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL);
batadv_hash_destroy(bat_priv->nc.decoding_hash);
}
-
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_nc_nodes_seq_print_text() - print the nc node information
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct batadv_hard_iface *primary_if;
- struct hlist_head *head;
- struct batadv_orig_node *orig_node;
- struct batadv_nc_node *nc_node;
- int i;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- goto out;
-
- /* Traverse list of originators */
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- /* For each orig_node in this bin */
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- /* no need to print the orig node if it does not have
- * network coding neighbors
- */
- if (list_empty(&orig_node->in_coding_list) &&
- list_empty(&orig_node->out_coding_list))
- continue;
-
- seq_printf(seq, "Node: %pM\n", orig_node->orig);
-
- seq_puts(seq, " Ingoing: ");
- /* For each in_nc_node to this orig_node */
- list_for_each_entry_rcu(nc_node,
- &orig_node->in_coding_list,
- list)
- seq_printf(seq, "%pM ",
- nc_node->addr);
- seq_puts(seq, "\n Outgoing: ");
- /* For out_nc_node to this orig_node */
- list_for_each_entry_rcu(nc_node,
- &orig_node->out_coding_list,
- list)
- seq_printf(seq, "%pM ",
- nc_node->addr);
- seq_puts(seq, "\n\n");
- }
- rcu_read_unlock();
- }
-
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
- return 0;
-}
-
-/**
- * batadv_nc_init_debugfs() - create nc folder and related files in debugfs
- * @bat_priv: the bat priv with all the soft interface information
- */
-void batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
-{
- struct dentry *nc_dir;
-
- nc_dir = debugfs_create_dir("nc", bat_priv->debug_dir);
-
- debugfs_create_u8("min_tq", 0644, nc_dir, &bat_priv->nc.min_tq);
-
- debugfs_create_u32("max_fwd_delay", 0644, nc_dir,
- &bat_priv->nc.max_fwd_delay);
-
- debugfs_create_u32("max_buffer_time", 0644, nc_dir,
- &bat_priv->nc.max_buffer_time);
-}
-#endif
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 334289084127..8fb2c01e7837 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -10,7 +10,6 @@
#include "main.h"
#include <linux/netdevice.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
@@ -38,8 +37,6 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
struct sk_buff *skb);
void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
struct sk_buff *skb);
-int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
#else /* ifdef CONFIG_BATMAN_ADV_NC */
@@ -104,16 +101,6 @@ batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
{
}
-static inline int batadv_nc_nodes_seq_print_text(struct seq_file *seq,
- void *offset)
-{
- return 0;
-}
-
-static inline void batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
-{
-}
-
#endif /* ifdef CONFIG_BATMAN_ADV_NC */
#endif /* _NET_BATMAN_ADV_NETWORK_CODING_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 805d8969bdfb..77431e59b228 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -20,7 +20,6 @@
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -733,42 +732,6 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr);
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_hardif_neigh_seq_print_text() - print the single hop neighbour list
- * @seq: neighbour table seq_file struct
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hard_iface *primary_if;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- return 0;
-
- seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
- BATADV_SOURCE_VERSION, primary_if->net_dev->name,
- primary_if->net_dev->dev_addr, net_dev->name,
- bat_priv->algo_ops->name);
-
- batadv_hardif_put(primary_if);
-
- if (!bat_priv->algo_ops->neigh.print) {
- seq_puts(seq,
- "No printing function for this routing protocol\n");
- return 0;
- }
-
- bat_priv->algo_ops->neigh.print(bat_priv, seq);
- return 0;
-}
-#endif
-
/**
* batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
* specific outgoing interface
@@ -1382,90 +1345,6 @@ static void batadv_purge_orig(struct work_struct *work)
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-
-/**
- * batadv_orig_seq_print_text() - Print the originator table in a seq file
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hard_iface *primary_if;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- return 0;
-
- seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
- BATADV_SOURCE_VERSION, primary_if->net_dev->name,
- primary_if->net_dev->dev_addr, net_dev->name,
- bat_priv->algo_ops->name);
-
- batadv_hardif_put(primary_if);
-
- if (!bat_priv->algo_ops->orig.print) {
- seq_puts(seq,
- "No printing function for this routing protocol\n");
- return 0;
- }
-
- bat_priv->algo_ops->orig.print(bat_priv, seq, BATADV_IF_DEFAULT);
-
- return 0;
-}
-
-/**
- * batadv_orig_hardif_seq_print_text() - writes originator infos for a specific
- * outgoing interface
- * @seq: debugfs table seq_file struct
- * @offset: not used
- *
- * Return: 0
- */
-int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_hard_iface *hard_iface;
- struct batadv_priv *bat_priv;
-
- hard_iface = batadv_hardif_get_by_netdev(net_dev);
-
- if (!hard_iface || !hard_iface->soft_iface) {
- seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
- goto out;
- }
-
- bat_priv = netdev_priv(hard_iface->soft_iface);
- if (!bat_priv->algo_ops->orig.print) {
- seq_puts(seq,
- "No printing function for this routing protocol\n");
- goto out;
- }
-
- if (hard_iface->if_status != BATADV_IF_ACTIVE) {
- seq_puts(seq, "Interface not active\n");
- goto out;
- }
-
- seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
- BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
- hard_iface->net_dev->dev_addr,
- hard_iface->soft_iface->name, bat_priv->algo_ops->name);
-
- bat_priv->algo_ops->orig.print(bat_priv, seq, hard_iface);
-
-out:
- if (hard_iface)
- batadv_hardif_put(hard_iface);
- return 0;
-}
-#endif
-
/**
* batadv_orig_dump() - Dump to netlink the originator infos for a specific
* outgoing interface
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 7bc01c138b3a..e75d4c4d11f5 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -13,7 +13,6 @@
#include <linux/if_ether.h>
#include <linux/jhash.h>
#include <linux/netlink.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/types.h>
@@ -46,7 +45,6 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo);
int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb);
-int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset);
struct batadv_orig_ifinfo *
batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
@@ -56,9 +54,7 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_outgoing);
void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo);
-int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
-int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
unsigned short vid);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 9e5c71e406ff..49cbca4aa428 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,7 +29,6 @@
#include "distributed-arp-table.h"
#include "fragmentation.h"
#include "hard-interface.h"
-#include "icmp_socket.h"
#include "log.h"
#include "network-coding.h"
#include "originator.h"
@@ -227,15 +226,6 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
icmph = (struct batadv_icmp_header *)skb->data;
switch (icmph->msg_type) {
- case BATADV_ECHO_REPLY:
- case BATADV_DESTINATION_UNREACHABLE:
- case BATADV_TTL_EXCEEDED:
- /* receive the packet */
- if (skb_linearize(skb) < 0)
- break;
-
- batadv_socket_receive_packet(icmph, skb->len);
- break;
case BATADV_ECHO_REQUEST:
/* answer echo request (ping) */
primary_if = batadv_primary_if_get_selected(bat_priv);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 82e7ca886605..97118efbe678 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -30,7 +30,6 @@
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/socket.h>
@@ -38,12 +37,12 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <net/netlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bridge_loop_avoidance.h"
-#include "debugfs.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
#include "hard-interface.h"
@@ -51,7 +50,6 @@
#include "network-coding.h"
#include "originator.h"
#include "send.h"
-#include "sysfs.h"
#include "translation-table.h"
/**
@@ -574,7 +572,6 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
{
struct batadv_softif_vlan *vlan;
- int err;
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
@@ -601,19 +598,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
- /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
- * sleeping behavior of the sysfs functions and the fs_reclaim lock
- */
- err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
- if (err) {
- /* ref for the function */
- batadv_softif_vlan_put(vlan);
-
- /* ref for the list */
- batadv_softif_vlan_put(vlan);
- return err;
- }
-
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
@@ -641,7 +625,6 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
vlan->vid, "vlan interface destroyed", false);
- batadv_sysfs_del_vlan(bat_priv, vlan);
batadv_softif_vlan_put(vlan);
}
@@ -661,7 +644,6 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
{
struct batadv_priv *bat_priv = netdev_priv(dev);
struct batadv_softif_vlan *vlan;
- int ret;
/* only 802.1Q vlans are supported.
* batman-adv does not know how to handle other types
@@ -681,17 +663,6 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
if (!vlan)
return batadv_softif_create_vlan(bat_priv, vid);
- /* recreate the sysfs object if it was already destroyed (and it should
- * be since we received a kill_vid() for this vlan
- */
- if (!vlan->kobj) {
- ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
- if (ret) {
- batadv_softif_vlan_put(vlan);
- return ret;
- }
- }
-
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag. This must be added again, even if the vlan object already
* exists, because the entry was deleted by kill_vid()
@@ -845,22 +816,18 @@ static int batadv_softif_init_late(struct net_device *dev)
batadv_nc_init_bat_priv(bat_priv);
- ret = batadv_algo_select(bat_priv, batadv_routing_algo);
- if (ret < 0)
- goto free_bat_counters;
-
- ret = batadv_debugfs_add_meshif(dev);
- if (ret < 0)
- goto free_bat_counters;
+ if (!bat_priv->algo_ops) {
+ ret = batadv_algo_select(bat_priv, batadv_routing_algo);
+ if (ret < 0)
+ goto free_bat_counters;
+ }
ret = batadv_mesh_init(dev);
if (ret < 0)
- goto unreg_debugfs;
+ goto free_bat_counters;
return 0;
-unreg_debugfs:
- batadv_debugfs_del_meshif(dev);
free_bat_counters:
free_percpu(bat_priv->bat_counters);
bat_priv->bat_counters = NULL;
@@ -914,7 +881,7 @@ static int batadv_softif_slave_del(struct net_device *dev,
if (!hard_iface || hard_iface->soft_iface != dev)
goto out;
- batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_KEEP);
+ batadv_hardif_disable_interface(hard_iface);
ret = 0;
out:
@@ -1037,7 +1004,6 @@ static const struct ethtool_ops batadv_ethtool_ops = {
*/
static void batadv_softif_free(struct net_device *dev)
{
- batadv_debugfs_del_meshif(dev);
batadv_mesh_free(dev);
/* some scheduled RCU callbacks need the bat_priv struct to accomplish
@@ -1074,6 +1040,59 @@ static void batadv_softif_init_early(struct net_device *dev)
}
/**
+ * batadv_softif_validate() - validate configuration of new batadv link
+ * @tb: IFLA_INFO_DATA netlink attributes
+ * @data: enum batadv_ifla_attrs attributes
+ * @extack: extended ACK report struct
+ *
+ * Return: 0 if successful or error otherwise.
+ */
+static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct batadv_algo_ops *algo_ops;
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_BATADV_ALGO_NAME]) {
+ algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME]));
+ if (!algo_ops)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * batadv_softif_newlink() - pre-initialize and register new batadv link
+ * @src_net: the applicable net namespace
+ * @dev: network device to register
+ * @tb: IFLA_INFO_DATA netlink attributes
+ * @data: enum batadv_ifla_attrs attributes
+ * @extack: extended ACK report struct
+ *
+ * Return: 0 if successful or error otherwise.
+ */
+static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ const char *algo_name;
+ int err;
+
+ if (data && data[IFLA_BATADV_ALGO_NAME]) {
+ algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]);
+ err = batadv_algo_select(bat_priv, algo_name);
+ if (err)
+ return -EINVAL;
+ }
+
+ return register_netdevice(dev);
+}
+
+/**
* batadv_softif_create() - Create and register soft interface
* @net: the applicable net namespace
* @name: name of the new soft interface
@@ -1106,28 +1125,6 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
}
/**
- * batadv_softif_destroy_sysfs() - deletion of batadv_soft_interface via sysfs
- * @soft_iface: the to-be-removed batman-adv interface
- */
-void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
-{
- struct batadv_priv *bat_priv = netdev_priv(soft_iface);
- struct batadv_softif_vlan *vlan;
-
- ASSERT_RTNL();
-
- /* destroy the "untagged" VLAN */
- vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
- if (vlan) {
- batadv_softif_destroy_vlan(bat_priv, vlan);
- batadv_softif_vlan_put(vlan);
- }
-
- batadv_sysfs_del_meshif(soft_iface);
- unregister_netdevice(soft_iface);
-}
-
-/**
* batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
* netlink
* @soft_iface: the to-be-removed batman-adv interface
@@ -1142,8 +1139,7 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface == soft_iface)
- batadv_hardif_disable_interface(hard_iface,
- BATADV_IF_CLEANUP_KEEP);
+ batadv_hardif_disable_interface(hard_iface);
}
/* destroy the "untagged" VLAN */
@@ -1153,7 +1149,6 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
batadv_softif_vlan_put(vlan);
}
- batadv_sysfs_del_meshif(soft_iface);
unregister_netdevice_queue(soft_iface, head);
}
@@ -1171,9 +1166,17 @@ bool batadv_softif_is_valid(const struct net_device *net_dev)
return false;
}
+static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = {
+ [IFLA_BATADV_ALGO_NAME] = { .type = NLA_NUL_STRING },
+};
+
struct rtnl_link_ops batadv_link_ops __read_mostly = {
.kind = "batadv",
.priv_size = sizeof(struct batadv_priv),
.setup = batadv_softif_init_early,
+ .maxtype = IFLA_BATADV_MAX,
+ .policy = batadv_ifla_policy,
+ .validate = batadv_softif_validate,
+ .newlink = batadv_softif_newlink,
.dellink = batadv_softif_destroy_netlink,
};
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 534e08d6ad91..74716d9ca4f6 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -20,7 +20,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, int hdr_size,
struct batadv_orig_node *orig_node);
struct net_device *batadv_softif_create(struct net *net, const char *name);
-void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
bool batadv_softif_is_valid(const struct net_device *net_dev);
extern struct rtnl_link_ops batadv_link_ops;
int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
deleted file mode 100644
index 0f962dcd239e..000000000000
--- a/net/batman-adv/sysfs.c
+++ /dev/null
@@ -1,1272 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2010-2020 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- */
-
-#include "sysfs.h"
-#include "main.h"
-
-#include <asm/current.h>
-#include <linux/atomic.h>
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gfp.h>
-#include <linux/if.h>
-#include <linux/if_vlan.h>
-#include <linux/kernel.h>
-#include <linux/kobject.h>
-#include <linux/kref.h>
-#include <linux/limits.h>
-#include <linux/netdevice.h>
-#include <linux/printk.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/rtnetlink.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/stringify.h>
-#include <linux/workqueue.h>
-#include <uapi/linux/batadv_packet.h>
-#include <uapi/linux/batman_adv.h>
-
-#include "bridge_loop_avoidance.h"
-#include "distributed-arp-table.h"
-#include "gateway_client.h"
-#include "gateway_common.h"
-#include "hard-interface.h"
-#include "log.h"
-#include "netlink.h"
-#include "network-coding.h"
-#include "soft-interface.h"
-
-/**
- * batadv_sysfs_deprecated() - Log use of deprecated batadv sysfs access
- * @attr: attribute which was accessed
- */
-static void batadv_sysfs_deprecated(struct attribute *attr)
-{
- pr_warn_ratelimited(DEPRECATED "%s (pid %d) Use of sysfs file \"%s\".\nUse batadv genl family instead",
- current->comm, task_pid_nr(current), attr->name);
-}
-
-static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
-{
- struct device *dev = container_of(obj->parent, struct device, kobj);
-
- return to_net_dev(dev);
-}
-
-static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(obj);
-
- return netdev_priv(net_dev);
-}
-
-/**
- * batadv_vlan_kobj_to_batpriv() - convert a vlan kobj in the associated batpriv
- * @obj: kobject to covert
- *
- * Return: the associated batadv_priv struct.
- */
-static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
-{
- /* VLAN specific attributes are located in the root sysfs folder if they
- * refer to the untagged VLAN..
- */
- if (!strcmp(BATADV_SYSFS_IF_MESH_SUBDIR, obj->name))
- return batadv_kobj_to_batpriv(obj);
-
- /* ..while the attributes for the tagged vlans are located in
- * the in the corresponding "vlan%VID" subfolder
- */
- return batadv_kobj_to_batpriv(obj->parent);
-}
-
-/**
- * batadv_kobj_to_vlan() - convert a kobj in the associated softif_vlan struct
- * @bat_priv: the bat priv with all the soft interface information
- * @obj: kobject to covert
- *
- * Return: the associated softif_vlan struct if found, NULL otherwise.
- */
-static struct batadv_softif_vlan *
-batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
-{
- struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
- if (vlan_tmp->kobj != obj)
- continue;
-
- if (!kref_get_unless_zero(&vlan_tmp->refcount))
- continue;
-
- vlan = vlan_tmp;
- break;
- }
- rcu_read_unlock();
-
- return vlan;
-}
-
-/* Use this, if you have customized show and store functions for vlan attrs */
-#define BATADV_ATTR_VLAN(_name, _mode, _show, _store) \
-struct batadv_attribute batadv_attr_vlan_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-}
-
-/* Use this, if you have customized show and store functions */
-#define BATADV_ATTR(_name, _mode, _show, _store) \
-struct batadv_attribute batadv_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-}
-
-#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
-ssize_t batadv_store_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff, \
- size_t count) \
-{ \
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
- struct batadv_priv *bat_priv = netdev_priv(net_dev); \
- ssize_t length; \
- \
- batadv_sysfs_deprecated(attr); \
- length = __batadv_store_bool_attr(buff, count, _post_func, attr,\
- &bat_priv->_name, net_dev); \
- \
- batadv_netlink_notify_mesh(bat_priv); \
- \
- return length; \
-}
-
-#define BATADV_ATTR_SIF_SHOW_BOOL(_name) \
-ssize_t batadv_show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
- \
- batadv_sysfs_deprecated(attr); \
- return sprintf(buff, "%s\n", \
- atomic_read(&bat_priv->_name) == 0 ? \
- "disabled" : "enabled"); \
-} \
-
-/* Use this, if you are going to turn a [name] in the soft-interface
- * (bat_priv) on or off
- */
-#define BATADV_ATTR_SIF_BOOL(_name, _mode, _post_func) \
- static BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
- static BATADV_ATTR_SIF_SHOW_BOOL(_name) \
- static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
- batadv_store_##_name)
-
-#define BATADV_ATTR_SIF_STORE_UINT(_name, _var, _min, _max, _post_func) \
-ssize_t batadv_store_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff, \
- size_t count) \
-{ \
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
- struct batadv_priv *bat_priv = netdev_priv(net_dev); \
- ssize_t length; \
- \
- batadv_sysfs_deprecated(attr); \
- length = __batadv_store_uint_attr(buff, count, _min, _max, \
- _post_func, attr, \
- &bat_priv->_var, net_dev, \
- NULL); \
- \
- batadv_netlink_notify_mesh(bat_priv); \
- \
- return length; \
-}
-
-#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
-ssize_t batadv_show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
- \
- batadv_sysfs_deprecated(attr); \
- return sprintf(buff, "%i\n", atomic_read(&bat_priv->_var)); \
-} \
-
-/* Use this, if you are going to set [name] in the soft-interface
- * (bat_priv) to an unsigned integer value
- */
-#define BATADV_ATTR_SIF_UINT(_name, _var, _mode, _min, _max, _post_func)\
- static BATADV_ATTR_SIF_STORE_UINT(_name, _var, _min, _max, _post_func)\
- static BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
- static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
- batadv_store_##_name)
-
-#define BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func) \
-ssize_t batadv_store_vlan_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff, \
- size_t count) \
-{ \
- struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
- struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
- kobj); \
- size_t res = __batadv_store_bool_attr(buff, count, _post_func, \
- attr, &vlan->_name, \
- bat_priv->soft_iface); \
- \
- batadv_sysfs_deprecated(attr); \
- if (vlan->vid) \
- batadv_netlink_notify_vlan(bat_priv, vlan); \
- else \
- batadv_netlink_notify_mesh(bat_priv); \
- \
- batadv_softif_vlan_put(vlan); \
- return res; \
-}
-
-#define BATADV_ATTR_VLAN_SHOW_BOOL(_name) \
-ssize_t batadv_show_vlan_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
- struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
- kobj); \
- size_t res = sprintf(buff, "%s\n", \
- atomic_read(&vlan->_name) == 0 ? \
- "disabled" : "enabled"); \
- \
- batadv_sysfs_deprecated(attr); \
- batadv_softif_vlan_put(vlan); \
- return res; \
-}
-
-/* Use this, if you are going to turn a [name] in the vlan struct on or off */
-#define BATADV_ATTR_VLAN_BOOL(_name, _mode, _post_func) \
- static BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func) \
- static BATADV_ATTR_VLAN_SHOW_BOOL(_name) \
- static BATADV_ATTR_VLAN(_name, _mode, batadv_show_vlan_##_name, \
- batadv_store_vlan_##_name)
-
-#define BATADV_ATTR_HIF_STORE_UINT(_name, _var, _min, _max, _post_func) \
-ssize_t batadv_store_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff, \
- size_t count) \
-{ \
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
- struct batadv_hard_iface *hard_iface; \
- struct batadv_priv *bat_priv; \
- ssize_t length; \
- \
- batadv_sysfs_deprecated(attr); \
- hard_iface = batadv_hardif_get_by_netdev(net_dev); \
- if (!hard_iface) \
- return 0; \
- \
- length = __batadv_store_uint_attr(buff, count, _min, _max, \
- _post_func, attr, \
- &hard_iface->_var, \
- hard_iface->soft_iface, \
- net_dev); \
- \
- if (hard_iface->soft_iface) { \
- bat_priv = netdev_priv(hard_iface->soft_iface); \
- batadv_netlink_notify_hardif(bat_priv, hard_iface); \
- } \
- \
- batadv_hardif_put(hard_iface); \
- return length; \
-}
-
-#define BATADV_ATTR_HIF_SHOW_UINT(_name, _var) \
-ssize_t batadv_show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
- struct batadv_hard_iface *hard_iface; \
- ssize_t length; \
- \
- batadv_sysfs_deprecated(attr); \
- hard_iface = batadv_hardif_get_by_netdev(net_dev); \
- if (!hard_iface) \
- return 0; \
- \
- length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_var)); \
- \
- batadv_hardif_put(hard_iface); \
- return length; \
-}
-
-/* Use this, if you are going to set [name] in hard_iface to an
- * unsigned integer value
- */
-#define BATADV_ATTR_HIF_UINT(_name, _var, _mode, _min, _max, _post_func)\
- static BATADV_ATTR_HIF_STORE_UINT(_name, _var, _min, \
- _max, _post_func) \
- static BATADV_ATTR_HIF_SHOW_UINT(_name, _var) \
- static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
- batadv_store_##_name)
-
-static int batadv_store_bool_attr(char *buff, size_t count,
- struct net_device *net_dev,
- const char *attr_name, atomic_t *attr,
- bool *changed)
-{
- int enabled = -1;
-
- *changed = false;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if ((strncmp(buff, "1", 2) == 0) ||
- (strncmp(buff, "enable", 7) == 0) ||
- (strncmp(buff, "enabled", 8) == 0))
- enabled = 1;
-
- if ((strncmp(buff, "0", 2) == 0) ||
- (strncmp(buff, "disable", 8) == 0) ||
- (strncmp(buff, "disabled", 9) == 0))
- enabled = 0;
-
- if (enabled < 0) {
- batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
- attr_name, buff);
- return -EINVAL;
- }
-
- if (atomic_read(attr) == enabled)
- return count;
-
- batadv_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
- atomic_read(attr) == 1 ? "enabled" : "disabled",
- enabled == 1 ? "enabled" : "disabled");
-
- *changed = true;
-
- atomic_set(attr, (unsigned int)enabled);
- return count;
-}
-
-static inline ssize_t
-__batadv_store_bool_attr(char *buff, size_t count,
- void (*post_func)(struct net_device *),
- struct attribute *attr,
- atomic_t *attr_store, struct net_device *net_dev)
-{
- bool changed;
- int ret;
-
- ret = batadv_store_bool_attr(buff, count, net_dev, attr->name,
- attr_store, &changed);
- if (post_func && changed)
- post_func(net_dev);
-
- return ret;
-}
-
-static int batadv_store_uint_attr(const char *buff, size_t count,
- struct net_device *net_dev,
- struct net_device *slave_dev,
- const char *attr_name,
- unsigned int min, unsigned int max,
- atomic_t *attr)
-{
- char ifname[IFNAMSIZ + 3] = "";
- unsigned long uint_val;
- int ret;
-
- ret = kstrtoul(buff, 10, &uint_val);
- if (ret) {
- batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
- attr_name, buff);
- return -EINVAL;
- }
-
- if (uint_val < min) {
- batadv_info(net_dev, "%s: Value is too small: %lu min: %u\n",
- attr_name, uint_val, min);
- return -EINVAL;
- }
-
- if (uint_val > max) {
- batadv_info(net_dev, "%s: Value is too big: %lu max: %u\n",
- attr_name, uint_val, max);
- return -EINVAL;
- }
-
- if (atomic_read(attr) == uint_val)
- return count;
-
- if (slave_dev)
- snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
-
- batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
- attr_name, ifname, atomic_read(attr), uint_val);
-
- atomic_set(attr, uint_val);
- return count;
-}
-
-static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
- int min, int max,
- void (*post_func)(struct net_device *),
- const struct attribute *attr,
- atomic_t *attr_store,
- struct net_device *net_dev,
- struct net_device *slave_dev)
-{
- int ret;
-
- ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
- attr->name, min, max, attr_store);
- if (post_func && ret)
- post_func(net_dev);
-
- return ret;
-}
-
-static ssize_t batadv_show_bat_algo(struct kobject *kobj,
- struct attribute *attr, char *buff)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-
- batadv_sysfs_deprecated(attr);
- return sprintf(buff, "%s\n", bat_priv->algo_ops->name);
-}
-
-static void batadv_post_gw_reselect(struct net_device *net_dev)
-{
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
-
- batadv_gw_reselect(bat_priv);
-}
-
-static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
- int bytes_written;
-
- batadv_sysfs_deprecated(attr);
-
- /* GW mode is not available if the routing algorithm in use does not
- * implement the GW API
- */
- if (!bat_priv->algo_ops->gw.get_best_gw_node ||
- !bat_priv->algo_ops->gw.is_eligible)
- return -ENOENT;
-
- switch (atomic_read(&bat_priv->gw.mode)) {
- case BATADV_GW_MODE_CLIENT:
- bytes_written = sprintf(buff, "%s\n",
- BATADV_GW_MODE_CLIENT_NAME);
- break;
- case BATADV_GW_MODE_SERVER:
- bytes_written = sprintf(buff, "%s\n",
- BATADV_GW_MODE_SERVER_NAME);
- break;
- default:
- bytes_written = sprintf(buff, "%s\n",
- BATADV_GW_MODE_OFF_NAME);
- break;
- }
-
- return bytes_written;
-}
-
-static ssize_t batadv_store_gw_mode(struct kobject *kobj,
- struct attribute *attr, char *buff,
- size_t count)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- char *curr_gw_mode_str;
- int gw_mode_tmp = -1;
-
- batadv_sysfs_deprecated(attr);
-
- /* toggling GW mode is allowed only if the routing algorithm in use
- * provides the GW API
- */
- if (!bat_priv->algo_ops->gw.get_best_gw_node ||
- !bat_priv->algo_ops->gw.is_eligible)
- return -EINVAL;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strncmp(buff, BATADV_GW_MODE_OFF_NAME,
- strlen(BATADV_GW_MODE_OFF_NAME)) == 0)
- gw_mode_tmp = BATADV_GW_MODE_OFF;
-
- if (strncmp(buff, BATADV_GW_MODE_CLIENT_NAME,
- strlen(BATADV_GW_MODE_CLIENT_NAME)) == 0)
- gw_mode_tmp = BATADV_GW_MODE_CLIENT;
-
- if (strncmp(buff, BATADV_GW_MODE_SERVER_NAME,
- strlen(BATADV_GW_MODE_SERVER_NAME)) == 0)
- gw_mode_tmp = BATADV_GW_MODE_SERVER;
-
- if (gw_mode_tmp < 0) {
- batadv_info(net_dev,
- "Invalid parameter for 'gw mode' setting received: %s\n",
- buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->gw.mode) == gw_mode_tmp)
- return count;
-
- switch (atomic_read(&bat_priv->gw.mode)) {
- case BATADV_GW_MODE_CLIENT:
- curr_gw_mode_str = BATADV_GW_MODE_CLIENT_NAME;
- break;
- case BATADV_GW_MODE_SERVER:
- curr_gw_mode_str = BATADV_GW_MODE_SERVER_NAME;
- break;
- default:
- curr_gw_mode_str = BATADV_GW_MODE_OFF_NAME;
- break;
- }
-
- batadv_info(net_dev, "Changing gw mode from: %s to: %s\n",
- curr_gw_mode_str, buff);
-
- /* Invoking batadv_gw_reselect() is not enough to really de-select the
- * current GW. It will only instruct the gateway client code to perform
- * a re-election the next time that this is needed.
- *
- * When gw client mode is being switched off the current GW must be
- * de-selected explicitly otherwise no GW_ADD uevent is thrown on
- * client mode re-activation. This is operation is performed in
- * batadv_gw_check_client_stop().
- */
- batadv_gw_reselect(bat_priv);
- /* always call batadv_gw_check_client_stop() before changing the gateway
- * state
- */
- batadv_gw_check_client_stop(bat_priv);
- atomic_set(&bat_priv->gw.mode, (unsigned int)gw_mode_tmp);
- batadv_gw_tvlv_container_update(bat_priv);
-
- batadv_netlink_notify_mesh(bat_priv);
-
- return count;
-}
-
-static ssize_t batadv_show_gw_sel_class(struct kobject *kobj,
- struct attribute *attr, char *buff)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-
- batadv_sysfs_deprecated(attr);
-
- /* GW selection class is not available if the routing algorithm in use
- * does not implement the GW API
- */
- if (!bat_priv->algo_ops->gw.get_best_gw_node ||
- !bat_priv->algo_ops->gw.is_eligible)
- return -ENOENT;
-
- if (bat_priv->algo_ops->gw.show_sel_class)
- return bat_priv->algo_ops->gw.show_sel_class(bat_priv, buff);
-
- return sprintf(buff, "%i\n", atomic_read(&bat_priv->gw.sel_class));
-}
-
-static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
- struct attribute *attr, char *buff,
- size_t count)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
- ssize_t length;
-
- batadv_sysfs_deprecated(attr);
-
- /* setting the GW selection class is allowed only if the routing
- * algorithm in use implements the GW API
- */
- if (!bat_priv->algo_ops->gw.get_best_gw_node ||
- !bat_priv->algo_ops->gw.is_eligible)
- return -EINVAL;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (bat_priv->algo_ops->gw.store_sel_class)
- return bat_priv->algo_ops->gw.store_sel_class(bat_priv, buff,
- count);
-
- length = __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
- batadv_post_gw_reselect, attr,
- &bat_priv->gw.sel_class,
- bat_priv->soft_iface, NULL);
-
- batadv_netlink_notify_mesh(bat_priv);
-
- return length;
-}
-
-static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
- struct attribute *attr, char *buff)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
- u32 down, up;
-
- batadv_sysfs_deprecated(attr);
-
- down = atomic_read(&bat_priv->gw.bandwidth_down);
- up = atomic_read(&bat_priv->gw.bandwidth_up);
-
- return sprintf(buff, "%u.%u/%u.%u MBit\n", down / 10,
- down % 10, up / 10, up % 10);
-}
-
-static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
- struct attribute *attr, char *buff,
- size_t count)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- ssize_t length;
-
- batadv_sysfs_deprecated(attr);
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- length = batadv_gw_bandwidth_set(net_dev, buff, count);
-
- batadv_netlink_notify_mesh(bat_priv);
-
- return length;
-}
-
-/**
- * batadv_show_isolation_mark() - print the current isolation mark/mask
- * @kobj: kobject representing the private mesh sysfs directory
- * @attr: the batman-adv attribute the user is interacting with
- * @buff: the buffer that will contain the data to send back to the user
- *
- * Return: the number of bytes written into 'buff' on success or a negative
- * error code in case of failure
- */
-static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
- struct attribute *attr, char *buff)
-{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-
- batadv_sysfs_deprecated(attr);
- return sprintf(buff, "%#.8x/%#.8x\n", bat_priv->isolation_mark,
- bat_priv->isolation_mark_mask);
-}
-
-/**
- * batadv_store_isolation_mark() - parse and store the isolation mark/mask
- * entered by the user
- * @kobj: kobject representing the private mesh sysfs directory
- * @attr: the batman-adv attribute the user is interacting with
- * @buff: the buffer containing the user data
- * @count: number of bytes in the buffer
- *
- * Return: 'count' on success or a negative error code in case of failure
- */
-static ssize_t batadv_store_isolation_mark(struct kobject *kobj,
- struct attribute *attr, char *buff,
- size_t count)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- u32 mark, mask;
- char *mask_ptr;
-
- batadv_sysfs_deprecated(attr);
-
- /* parse the mask if it has been specified, otherwise assume the mask is
- * the biggest possible
- */
- mask = 0xFFFFFFFF;
- mask_ptr = strchr(buff, '/');
- if (mask_ptr) {
- *mask_ptr = '\0';
- mask_ptr++;
-
- /* the mask must be entered in hex base as it is going to be a
- * bitmask and not a prefix length
- */
- if (kstrtou32(mask_ptr, 16, &mask) < 0)
- return -EINVAL;
- }
-
- /* the mark can be entered in any base */
- if (kstrtou32(buff, 0, &mark) < 0)
- return -EINVAL;
-
- bat_priv->isolation_mark_mask = mask;
- /* erase bits not covered by the mask */
- bat_priv->isolation_mark = mark & bat_priv->isolation_mark_mask;
-
- batadv_info(net_dev,
- "New skb mark for extended isolation: %#.8x/%#.8x\n",
- bat_priv->isolation_mark, bat_priv->isolation_mark_mask);
-
- batadv_netlink_notify_mesh(bat_priv);
-
- return count;
-}
-
-BATADV_ATTR_SIF_BOOL(aggregated_ogms, 0644, NULL);
-BATADV_ATTR_SIF_BOOL(bonding, 0644, NULL);
-#ifdef CONFIG_BATMAN_ADV_BLA
-BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, 0644, batadv_bla_status_update);
-#endif
-#ifdef CONFIG_BATMAN_ADV_DAT
-BATADV_ATTR_SIF_BOOL(distributed_arp_table, 0644, batadv_dat_status_update);
-#endif
-BATADV_ATTR_SIF_BOOL(fragmentation, 0644, batadv_update_min_mtu);
-static BATADV_ATTR(routing_algo, 0444, batadv_show_bat_algo, NULL);
-static BATADV_ATTR(gw_mode, 0644, batadv_show_gw_mode, batadv_store_gw_mode);
-BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, 0644, 2 * BATADV_JITTER,
- INT_MAX, NULL);
-BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, 0644, 0, BATADV_TQ_MAX_VALUE,
- NULL);
-static BATADV_ATTR(gw_sel_class, 0644, batadv_show_gw_sel_class,
- batadv_store_gw_sel_class);
-static BATADV_ATTR(gw_bandwidth, 0644, batadv_show_gw_bwidth,
- batadv_store_gw_bwidth);
-#ifdef CONFIG_BATMAN_ADV_MCAST
-BATADV_ATTR_SIF_BOOL(multicast_mode, 0644, NULL);
-#endif
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-BATADV_ATTR_SIF_UINT(log_level, log_level, 0644, 0, BATADV_DBG_ALL, NULL);
-#endif
-#ifdef CONFIG_BATMAN_ADV_NC
-BATADV_ATTR_SIF_BOOL(network_coding, 0644, batadv_nc_status_update);
-#endif
-static BATADV_ATTR(isolation_mark, 0644, batadv_show_isolation_mark,
- batadv_store_isolation_mark);
-
-static struct batadv_attribute *batadv_mesh_attrs[] = {
- &batadv_attr_aggregated_ogms,
- &batadv_attr_bonding,
-#ifdef CONFIG_BATMAN_ADV_BLA
- &batadv_attr_bridge_loop_avoidance,
-#endif
-#ifdef CONFIG_BATMAN_ADV_DAT
- &batadv_attr_distributed_arp_table,
-#endif
-#ifdef CONFIG_BATMAN_ADV_MCAST
- &batadv_attr_multicast_mode,
-#endif
- &batadv_attr_fragmentation,
- &batadv_attr_routing_algo,
- &batadv_attr_gw_mode,
- &batadv_attr_orig_interval,
- &batadv_attr_hop_penalty,
- &batadv_attr_gw_sel_class,
- &batadv_attr_gw_bandwidth,
-#ifdef CONFIG_BATMAN_ADV_DEBUG
- &batadv_attr_log_level,
-#endif
-#ifdef CONFIG_BATMAN_ADV_NC
- &batadv_attr_network_coding,
-#endif
- &batadv_attr_isolation_mark,
- NULL,
-};
-
-BATADV_ATTR_VLAN_BOOL(ap_isolation, 0644, NULL);
-
-/* array of vlan specific sysfs attributes */
-static struct batadv_attribute *batadv_vlan_attrs[] = {
- &batadv_attr_vlan_ap_isolation,
- NULL,
-};
-
-/**
- * batadv_sysfs_add_meshif() - Add soft interface specific sysfs entries
- * @dev: netdev struct of the soft interface
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_sysfs_add_meshif(struct net_device *dev)
-{
- struct kobject *batif_kobject = &dev->dev.kobj;
- struct batadv_priv *bat_priv = netdev_priv(dev);
- struct batadv_attribute **bat_attr;
- int err;
-
- bat_priv->mesh_obj = kobject_create_and_add(BATADV_SYSFS_IF_MESH_SUBDIR,
- batif_kobject);
- if (!bat_priv->mesh_obj) {
- batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- BATADV_SYSFS_IF_MESH_SUBDIR);
- goto out;
- }
-
- for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(bat_priv->mesh_obj,
- &((*bat_attr)->attr));
- if (err) {
- batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, BATADV_SYSFS_IF_MESH_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE);
- kobject_del(bat_priv->mesh_obj);
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-out:
- return -ENOMEM;
-}
-
-/**
- * batadv_sysfs_del_meshif() - Remove soft interface specific sysfs entries
- * @dev: netdev struct of the soft interface
- */
-void batadv_sysfs_del_meshif(struct net_device *dev)
-{
- struct batadv_priv *bat_priv = netdev_priv(dev);
- struct batadv_attribute **bat_attr;
-
- for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE);
- kobject_del(bat_priv->mesh_obj);
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-}
-
-/**
- * batadv_sysfs_add_vlan() - add all the needed sysfs objects for the new vlan
- * @dev: netdev of the mesh interface
- * @vlan: private data of the newly added VLAN interface
- *
- * Return: 0 on success and -ENOMEM if any of the structure allocations fails.
- */
-int batadv_sysfs_add_vlan(struct net_device *dev,
- struct batadv_softif_vlan *vlan)
-{
- char vlan_subdir[sizeof(BATADV_SYSFS_VLAN_SUBDIR_PREFIX) + 5];
- struct batadv_priv *bat_priv = netdev_priv(dev);
- struct batadv_attribute **bat_attr;
- int err;
-
- if (vlan->vid & BATADV_VLAN_HAS_TAG) {
- sprintf(vlan_subdir, BATADV_SYSFS_VLAN_SUBDIR_PREFIX "%hu",
- vlan->vid & VLAN_VID_MASK);
-
- vlan->kobj = kobject_create_and_add(vlan_subdir,
- bat_priv->mesh_obj);
- if (!vlan->kobj) {
- batadv_err(dev, "Can't add sysfs directory: %s/%s\n",
- dev->name, vlan_subdir);
- goto out;
- }
- } else {
- /* the untagged LAN uses the root folder to store its "VLAN
- * specific attributes"
- */
- vlan->kobj = bat_priv->mesh_obj;
- kobject_get(bat_priv->mesh_obj);
- }
-
- for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(vlan->kobj,
- &((*bat_attr)->attr));
- if (err) {
- batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, vlan_subdir,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
-
- if (vlan->kobj != bat_priv->mesh_obj) {
- kobject_uevent(vlan->kobj, KOBJ_REMOVE);
- kobject_del(vlan->kobj);
- }
- kobject_put(vlan->kobj);
- vlan->kobj = NULL;
-out:
- return -ENOMEM;
-}
-
-/**
- * batadv_sysfs_del_vlan() - remove all the sysfs objects for a given VLAN
- * @bat_priv: the bat priv with all the soft interface information
- * @vlan: the private data of the VLAN to destroy
- */
-void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
- struct batadv_softif_vlan *vlan)
-{
- struct batadv_attribute **bat_attr;
-
- for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
-
- if (vlan->kobj != bat_priv->mesh_obj) {
- kobject_uevent(vlan->kobj, KOBJ_REMOVE);
- kobject_del(vlan->kobj);
- }
- kobject_put(vlan->kobj);
- vlan->kobj = NULL;
-}
-
-static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
- struct attribute *attr, char *buff)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_hard_iface *hard_iface;
- ssize_t length;
- const char *ifname;
-
- batadv_sysfs_deprecated(attr);
-
- hard_iface = batadv_hardif_get_by_netdev(net_dev);
- if (!hard_iface)
- return 0;
-
- if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
- ifname = "none";
- else
- ifname = hard_iface->soft_iface->name;
-
- length = sprintf(buff, "%s\n", ifname);
-
- batadv_hardif_put(hard_iface);
-
- return length;
-}
-
-/**
- * batadv_store_mesh_iface_finish() - store new hardif mesh_iface state
- * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
- * @ifname: name of soft-interface to modify
- *
- * Changes the parts of the hard+soft interface which can not be modified under
- * sysfs lock (to prevent deadlock situations).
- *
- * Return: 0 on success, 0 < on failure
- */
-static int batadv_store_mesh_iface_finish(struct net_device *net_dev,
- char ifname[IFNAMSIZ])
-{
- struct net *net = dev_net(net_dev);
- struct batadv_hard_iface *hard_iface;
- int status_tmp;
- int ret = 0;
-
- ASSERT_RTNL();
-
- hard_iface = batadv_hardif_get_by_netdev(net_dev);
- if (!hard_iface)
- return 0;
-
- if (strncmp(ifname, "none", 4) == 0)
- status_tmp = BATADV_IF_NOT_IN_USE;
- else
- status_tmp = BATADV_IF_I_WANT_YOU;
-
- if (hard_iface->if_status == status_tmp)
- goto out;
-
- if (hard_iface->soft_iface &&
- strncmp(hard_iface->soft_iface->name, ifname, IFNAMSIZ) == 0)
- goto out;
-
- if (status_tmp == BATADV_IF_NOT_IN_USE) {
- batadv_hardif_disable_interface(hard_iface,
- BATADV_IF_CLEANUP_AUTO);
- goto out;
- }
-
- /* if the interface already is in use */
- if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
- batadv_hardif_disable_interface(hard_iface,
- BATADV_IF_CLEANUP_AUTO);
-
- ret = batadv_hardif_enable_interface(hard_iface, net, ifname);
-out:
- batadv_hardif_put(hard_iface);
- return ret;
-}
-
-/**
- * batadv_store_mesh_iface_work() - store new hardif mesh_iface state
- * @work: work queue item
- *
- * Changes the parts of the hard+soft interface which can not be modified under
- * sysfs lock (to prevent deadlock situations).
- */
-static void batadv_store_mesh_iface_work(struct work_struct *work)
-{
- struct batadv_store_mesh_work *store_work;
- int ret;
-
- store_work = container_of(work, struct batadv_store_mesh_work, work);
-
- rtnl_lock();
- ret = batadv_store_mesh_iface_finish(store_work->net_dev,
- store_work->soft_iface_name);
- rtnl_unlock();
-
- if (ret < 0)
- pr_err("Failed to store new mesh_iface state %s for %s: %d\n",
- store_work->soft_iface_name, store_work->net_dev->name,
- ret);
-
- dev_put(store_work->net_dev);
- kfree(store_work);
-}
-
-static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
- struct attribute *attr, char *buff,
- size_t count)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_store_mesh_work *store_work;
-
- batadv_sysfs_deprecated(attr);
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strlen(buff) >= IFNAMSIZ) {
- pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
- buff);
- return -EINVAL;
- }
-
- store_work = kmalloc(sizeof(*store_work), GFP_KERNEL);
- if (!store_work)
- return -ENOMEM;
-
- dev_hold(net_dev);
- INIT_WORK(&store_work->work, batadv_store_mesh_iface_work);
- store_work->net_dev = net_dev;
- strscpy(store_work->soft_iface_name, buff,
- sizeof(store_work->soft_iface_name));
-
- queue_work(batadv_event_workqueue, &store_work->work);
-
- return count;
-}
-
-static ssize_t batadv_show_iface_status(struct kobject *kobj,
- struct attribute *attr, char *buff)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_hard_iface *hard_iface;
- ssize_t length;
-
- batadv_sysfs_deprecated(attr);
-
- hard_iface = batadv_hardif_get_by_netdev(net_dev);
- if (!hard_iface)
- return 0;
-
- switch (hard_iface->if_status) {
- case BATADV_IF_TO_BE_REMOVED:
- length = sprintf(buff, "disabling\n");
- break;
- case BATADV_IF_INACTIVE:
- length = sprintf(buff, "inactive\n");
- break;
- case BATADV_IF_ACTIVE:
- length = sprintf(buff, "active\n");
- break;
- case BATADV_IF_TO_BE_ACTIVATED:
- length = sprintf(buff, "enabling\n");
- break;
- case BATADV_IF_NOT_IN_USE:
- default:
- length = sprintf(buff, "not in use\n");
- break;
- }
-
- batadv_hardif_put(hard_iface);
-
- return length;
-}
-
-#ifdef CONFIG_BATMAN_ADV_BATMAN_V
-
-/**
- * batadv_store_throughput_override() - parse and store throughput override
- * entered by the user
- * @kobj: kobject representing the private mesh sysfs directory
- * @attr: the batman-adv attribute the user is interacting with
- * @buff: the buffer containing the user data
- * @count: number of bytes in the buffer
- *
- * Return: 'count' on success or a negative error code in case of failure
- */
-static ssize_t batadv_store_throughput_override(struct kobject *kobj,
- struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_hard_iface *hard_iface;
- struct batadv_priv *bat_priv;
- u32 tp_override;
- u32 old_tp_override;
- bool ret;
-
- batadv_sysfs_deprecated(attr);
-
- hard_iface = batadv_hardif_get_by_netdev(net_dev);
- if (!hard_iface)
- return -EINVAL;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
- &tp_override);
- if (!ret)
- goto out;
-
- old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
- if (old_tp_override == tp_override)
- goto out;
-
- batadv_info(hard_iface->soft_iface,
- "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
- "throughput_override", net_dev->name,
- old_tp_override / 10, old_tp_override % 10,
- tp_override / 10, tp_override % 10);
-
- atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
-
- if (hard_iface->soft_iface) {
- bat_priv = netdev_priv(hard_iface->soft_iface);
- batadv_netlink_notify_hardif(bat_priv, hard_iface);
- }
-
-out:
- batadv_hardif_put(hard_iface);
- return count;
-}
-
-static ssize_t batadv_show_throughput_override(struct kobject *kobj,
- struct attribute *attr,
- char *buff)
-{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
- struct batadv_hard_iface *hard_iface;
- u32 tp_override;
-
- batadv_sysfs_deprecated(attr);
-
- hard_iface = batadv_hardif_get_by_netdev(net_dev);
- if (!hard_iface)
- return -EINVAL;
-
- tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
-
- batadv_hardif_put(hard_iface);
- return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
- tp_override % 10);
-}
-
-#endif
-
-static BATADV_ATTR(mesh_iface, 0644, batadv_show_mesh_iface,
- batadv_store_mesh_iface);
-static BATADV_ATTR(iface_status, 0444, batadv_show_iface_status, NULL);
-#ifdef CONFIG_BATMAN_ADV_BATMAN_V
-BATADV_ATTR_HIF_UINT(elp_interval, bat_v.elp_interval, 0644,
- 2 * BATADV_JITTER, INT_MAX, NULL);
-static BATADV_ATTR(throughput_override, 0644, batadv_show_throughput_override,
- batadv_store_throughput_override);
-#endif
-
-static struct batadv_attribute *batadv_batman_attrs[] = {
- &batadv_attr_mesh_iface,
- &batadv_attr_iface_status,
-#ifdef CONFIG_BATMAN_ADV_BATMAN_V
- &batadv_attr_elp_interval,
- &batadv_attr_throughput_override,
-#endif
- NULL,
-};
-
-/**
- * batadv_sysfs_add_hardif() - Add hard interface specific sysfs entries
- * @hardif_obj: address where to store the pointer to new sysfs folder
- * @dev: netdev struct of the hard interface
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
-{
- struct kobject *hardif_kobject = &dev->dev.kobj;
- struct batadv_attribute **bat_attr;
- int err;
-
- *hardif_obj = kobject_create_and_add(BATADV_SYSFS_IF_BAT_SUBDIR,
- hardif_kobject);
-
- if (!*hardif_obj) {
- batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- BATADV_SYSFS_IF_BAT_SUBDIR);
- goto out;
- }
-
- for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
- if (err) {
- batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, BATADV_SYSFS_IF_BAT_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
-out:
- return -ENOMEM;
-}
-
-/**
- * batadv_sysfs_del_hardif() - Remove hard interface specific sysfs entries
- * @hardif_obj: address to the pointer to which stores batman-adv sysfs folder
- * of the hard interface
- */
-void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
-{
- kobject_uevent(*hardif_obj, KOBJ_REMOVE);
- kobject_del(*hardif_obj);
- kobject_put(*hardif_obj);
- *hardif_obj = NULL;
-}
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
deleted file mode 100644
index d987f8b30a98..000000000000
--- a/net/batman-adv/sysfs.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2010-2020 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- */
-
-#ifndef _NET_BATMAN_ADV_SYSFS_H_
-#define _NET_BATMAN_ADV_SYSFS_H_
-
-#include "main.h"
-
-#include <linux/kobject.h>
-#include <linux/netdevice.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
-#define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
-#define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
-/**
- * BATADV_SYSFS_VLAN_SUBDIR_PREFIX - prefix of the subfolder that will be
- * created in the sysfs hierarchy for each VLAN interface. The subfolder will
- * be named "BATADV_SYSFS_VLAN_SUBDIR_PREFIX%vid".
- */
-#define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
-
-/**
- * struct batadv_attribute - sysfs export helper for batman-adv attributes
- */
-struct batadv_attribute {
- /** @attr: sysfs attribute file */
- struct attribute attr;
-
- /**
- * @show: function to export the current attribute's content to sysfs
- */
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
-
- /**
- * @store: function to load new value from character buffer and save it
- * in batman-adv attribute
- */
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- char *buf, size_t count);
-};
-
-#ifdef CONFIG_BATMAN_ADV_SYSFS
-
-int batadv_sysfs_add_meshif(struct net_device *dev);
-void batadv_sysfs_del_meshif(struct net_device *dev);
-int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
- struct net_device *dev);
-void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
-int batadv_sysfs_add_vlan(struct net_device *dev,
- struct batadv_softif_vlan *vlan);
-void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
- struct batadv_softif_vlan *vlan);
-
-#else
-
-static inline int batadv_sysfs_add_meshif(struct net_device *dev)
-{
- return 0;
-}
-
-static inline void batadv_sysfs_del_meshif(struct net_device *dev)
-{
-}
-
-static inline int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
- struct net_device *dev)
-{
- return 0;
-}
-
-static inline void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
-{
-}
-
-static inline int batadv_sysfs_add_vlan(struct net_device *dev,
- struct batadv_softif_vlan *vlan)
-{
- return 0;
-}
-
-static inline void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
- struct batadv_softif_vlan *vlan)
-{
-}
-
-#endif
-
-#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index db7e3774825b..d4e10005df6c 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/limits.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/param.h>
#include <linux/printk.h>
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 98a0aaaf0d50..cd09916f97fe 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -30,7 +30,6 @@
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -1062,84 +1061,6 @@ container_register:
kfree(tt_data);
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-
-/**
- * batadv_tt_local_seq_print_text() - Print the local tt table in a seq file
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->tt.local_hash;
- struct batadv_tt_common_entry *tt_common_entry;
- struct batadv_tt_local_entry *tt_local;
- struct batadv_hard_iface *primary_if;
- struct hlist_head *head;
- u32 i;
- int last_seen_secs;
- int last_seen_msecs;
- unsigned long last_seen_jiffies;
- bool no_purge;
- u16 np_flag = BATADV_TT_CLIENT_NOPURGE;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- goto out;
-
- seq_printf(seq,
- "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
- net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
- seq_puts(seq,
- " Client VID Flags Last seen (CRC )\n");
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry,
- head, hash_entry) {
- tt_local = container_of(tt_common_entry,
- struct batadv_tt_local_entry,
- common);
- last_seen_jiffies = jiffies - tt_local->last_seen;
- last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
- last_seen_secs = last_seen_msecs / 1000;
- last_seen_msecs = last_seen_msecs % 1000;
-
- no_purge = tt_common_entry->flags & np_flag;
- seq_printf(seq,
- " * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n",
- tt_common_entry->addr,
- batadv_print_vid(tt_common_entry->vid),
- ((tt_common_entry->flags &
- BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
- no_purge ? 'P' : '.',
- ((tt_common_entry->flags &
- BATADV_TT_CLIENT_NEW) ? 'N' : '.'),
- ((tt_common_entry->flags &
- BATADV_TT_CLIENT_PENDING) ? 'X' : '.'),
- ((tt_common_entry->flags &
- BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
- ((tt_common_entry->flags &
- BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
- no_purge ? 0 : last_seen_secs,
- no_purge ? 0 : last_seen_msecs,
- tt_local->vlan->tt.crc);
- }
- rcu_read_unlock();
- }
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
- return 0;
-}
-#endif
-
/**
* batadv_tt_local_dump_entry() - Dump one TT local entry into a message
* @msg :Netlink message to dump into
@@ -1879,139 +1800,6 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
return best_entry;
}
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
-/**
- * batadv_tt_global_print_entry() - print all orig nodes who announce the
- * address for this global entry
- * @bat_priv: the bat priv with all the soft interface information
- * @tt_global_entry: global translation table entry to be printed
- * @seq: debugfs table seq_file struct
- *
- * This function assumes the caller holds rcu_read_lock().
- */
-static void
-batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
- struct batadv_tt_global_entry *tt_global_entry,
- struct seq_file *seq)
-{
- struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
- struct batadv_tt_common_entry *tt_common_entry;
- struct batadv_orig_node_vlan *vlan;
- struct hlist_head *head;
- u8 last_ttvn;
- u16 flags;
-
- tt_common_entry = &tt_global_entry->common;
- flags = tt_common_entry->flags;
-
- best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
- if (best_entry) {
- vlan = batadv_orig_node_vlan_get(best_entry->orig_node,
- tt_common_entry->vid);
- if (!vlan) {
- seq_printf(seq,
- " * Cannot retrieve VLAN %d for originator %pM\n",
- batadv_print_vid(tt_common_entry->vid),
- best_entry->orig_node->orig);
- goto print_list;
- }
-
- last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
- seq_printf(seq,
- " %c %pM %4i (%3u) via %pM (%3u) (%#.8x) [%c%c%c%c]\n",
- '*', tt_global_entry->common.addr,
- batadv_print_vid(tt_global_entry->common.vid),
- best_entry->ttvn, best_entry->orig_node->orig,
- last_ttvn, vlan->tt.crc,
- ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
- ((flags & BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
- ((flags & BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
- ((flags & BATADV_TT_CLIENT_TEMP) ? 'T' : '.'));
-
- batadv_orig_node_vlan_put(vlan);
- }
-
-print_list:
- head = &tt_global_entry->orig_list;
-
- hlist_for_each_entry_rcu(orig_entry, head, list) {
- if (best_entry == orig_entry)
- continue;
-
- vlan = batadv_orig_node_vlan_get(orig_entry->orig_node,
- tt_common_entry->vid);
- if (!vlan) {
- seq_printf(seq,
- " + Cannot retrieve VLAN %d for originator %pM\n",
- batadv_print_vid(tt_common_entry->vid),
- orig_entry->orig_node->orig);
- continue;
- }
-
- last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
- seq_printf(seq,
- " %c %pM %4d (%3u) via %pM (%3u) (%#.8x) [%c%c%c%c]\n",
- '+', tt_global_entry->common.addr,
- batadv_print_vid(tt_global_entry->common.vid),
- orig_entry->ttvn, orig_entry->orig_node->orig,
- last_ttvn, vlan->tt.crc,
- ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
- ((flags & BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
- ((flags & BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
- ((flags & BATADV_TT_CLIENT_TEMP) ? 'T' : '.'));
-
- batadv_orig_node_vlan_put(vlan);
- }
-}
-
-/**
- * batadv_tt_global_seq_print_text() - Print the global tt table in a seq file
- * @seq: seq file to print on
- * @offset: not used
- *
- * Return: always 0
- */
-int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hashtable *hash = bat_priv->tt.global_hash;
- struct batadv_tt_common_entry *tt_common_entry;
- struct batadv_tt_global_entry *tt_global;
- struct batadv_hard_iface *primary_if;
- struct hlist_head *head;
- u32 i;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
- goto out;
-
- seq_printf(seq,
- "Globally announced TT entries received via the mesh %s\n",
- net_dev->name);
- seq_puts(seq,
- " Client VID (TTVN) Originator (Curr TTVN) (CRC ) Flags\n");
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry,
- head, hash_entry) {
- tt_global = container_of(tt_common_entry,
- struct batadv_tt_global_entry,
- common);
- batadv_tt_global_print_entry(bat_priv, tt_global, seq);
- }
- rcu_read_unlock();
- }
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
- return 0;
-}
-#endif
-
/**
* batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
* @msg: Netlink message to dump into
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index b24d35b9226a..57192c817229 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -11,7 +11,6 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/types.h>
@@ -21,8 +20,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
const u8 *addr, unsigned short vid,
const char *message, bool roaming);
-int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
-int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb);
int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb);
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 965336a3b89d..2f96e96a5ca4 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -21,7 +21,6 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/sched.h> /* for linux/wait.h */
-#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
@@ -187,9 +186,6 @@ struct batadv_hard_iface {
/** @net_dev: pointer to the net_device */
struct net_device *net_dev;
- /** @hardif_obj: kobject of the per interface sysfs "mesh" directory */
- struct kobject *hardif_obj;
-
/** @refcount: number of contexts the object is used */
struct kref refcount;
@@ -222,13 +218,6 @@ struct batadv_hard_iface {
struct batadv_hard_iface_bat_v bat_v;
#endif
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- /**
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- */
- struct dentry *debug_dir;
-#endif
-
/**
* @neigh_list: list of unique single hop neighbors via this interface
*/
@@ -1306,13 +1295,6 @@ struct batadv_priv_nc {
/** @work: work queue callback item for cleanup */
struct delayed_work work;
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- /**
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- */
- struct dentry *debug_dir;
-#endif
-
/**
* @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
*/
@@ -1512,9 +1494,6 @@ struct batadv_softif_vlan {
/** @vid: VLAN identifier */
unsigned short vid;
- /** @kobj: kobject for sysfs vlan subdirectory */
- struct kobject *kobj;
-
/** @ap_isolation: AP isolation state */
atomic_t ap_isolation; /* boolean */
@@ -1667,14 +1646,6 @@ struct batadv_priv {
/** @batman_queue_left: number of remaining OGM packet slots */
atomic_t batman_queue_left;
- /** @mesh_obj: kobject for sysfs mesh subdirectory */
- struct kobject *mesh_obj;
-
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- /** @debug_dir: dentry for debugfs batman-adv subdirectory */
- struct dentry *debug_dir;
-#endif
-
/** @forw_bat_list: list of aggregated OGMs that will be forwarded */
struct hlist_head forw_bat_list;
@@ -2234,11 +2205,6 @@ struct batadv_algo_neigh_ops {
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2);
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- /** @print: print the single hop neighbor list (optional) */
- void (*print)(struct batadv_priv *priv, struct seq_file *seq);
-#endif
-
/** @dump: dump neighbors to a netlink socket (optional) */
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *priv,
@@ -2249,12 +2215,6 @@ struct batadv_algo_neigh_ops {
* struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
*/
struct batadv_algo_orig_ops {
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- /** @print: print the originator table (optional) */
- void (*print)(struct batadv_priv *priv, struct seq_file *seq,
- struct batadv_hard_iface *hard_iface);
-#endif
-
/** @dump: dump originators to a netlink socket (optional) */
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *priv,
@@ -2274,10 +2234,6 @@ struct batadv_algo_gw_ops {
*/
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count);
-
- /** @show_sel_class: prints the current GW selection class (optional) */
- ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
-
/**
* @get_best_gw_node: select the best GW from the list of available
* nodes (optional)
@@ -2293,11 +2249,6 @@ struct batadv_algo_gw_ops {
struct batadv_orig_node *curr_gw_orig,
struct batadv_orig_node *orig_node);
-#ifdef CONFIG_BATMAN_ADV_DEBUGFS
- /** @print: print the gateway table (optional) */
- void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
-#endif
-
/** @dump: dump gateways to a netlink socket (optional) */
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *priv);
@@ -2456,21 +2407,4 @@ enum batadv_tvlv_handler_flags {
BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
};
-/**
- * struct batadv_store_mesh_work - Work queue item to detach add/del interface
- * from sysfs locks
- */
-struct batadv_store_mesh_work {
- /**
- * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
- */
- struct net_device *net_dev;
-
- /** @soft_iface_name: name of soft-interface to modify */
- char soft_iface_name[IFNAMSIZ];
-
- /** @work: work queue item */
- struct work_struct work;
-};
-
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index d0c1024bf600..4f1cd8063e72 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -758,6 +758,9 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
conn = hci_lookup_le_connect(hdev);
+ if (hdev->adv_instance_cnt)
+ hci_req_resume_adv_instances(hdev);
+
if (!status) {
hci_connect_le_scan_cleanup(conn);
goto done;
@@ -1067,10 +1070,11 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* connections most controllers will refuse to connect if
* advertising is enabled, and for slave role connections we
* anyway have to disable it in order to start directed
- * advertising.
+ * advertising. Any registered advertisements will be
+ * re-enabled after the connection attempt is finished.
*/
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- __hci_req_disable_advertising(&req);
+ __hci_req_pause_adv_instances(&req);
/* If requested to connect as slave use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
@@ -1118,6 +1122,10 @@ create_conn:
err = hci_req_run(&req, create_le_conn_complete);
if (err) {
hci_conn_del(conn);
+
+ if (hdev->adv_instance_cnt)
+ hci_req_resume_adv_instances(hdev);
+
return ERR_PTR(err);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 502552d6e9af..9d2c9a1c552f 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -741,6 +741,12 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
}
+ if (hdev->commands[38] & 0x80) {
+ /* Read LE Min/Max Tx Power*/
+ hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
+ 0, NULL);
+ }
+
if (hdev->commands[26] & 0x40) {
/* Read LE White List Size */
hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
@@ -763,7 +769,7 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
}
- if (hdev->commands[35] & 0x40) {
+ if (hdev->commands[35] & 0x04) {
__le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
/* Set RPA timeout */
@@ -2951,7 +2957,8 @@ static void adv_instance_rpa_expired(struct work_struct *work)
int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 adv_data_len, u8 *adv_data,
u16 scan_rsp_len, u8 *scan_rsp_data,
- u16 timeout, u16 duration)
+ u16 timeout, u16 duration, s8 tx_power,
+ u32 min_interval, u32 max_interval)
{
struct adv_info *adv_instance;
@@ -2979,6 +2986,9 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
adv_instance->flags = flags;
adv_instance->adv_data_len = adv_data_len;
adv_instance->scan_rsp_len = scan_rsp_len;
+ adv_instance->min_interval = min_interval;
+ adv_instance->max_interval = max_interval;
+ adv_instance->tx_power = tx_power;
if (adv_data_len)
memcpy(adv_instance->adv_data, adv_data, adv_data_len);
@@ -2995,8 +3005,6 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
else
adv_instance->duration = duration;
- adv_instance->tx_power = HCI_TX_POWER_INVALID;
-
INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
adv_instance_rpa_expired);
@@ -3006,6 +3014,37 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
}
/* This function requires the caller holds hdev->lock */
+int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
+ u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data)
+{
+ struct adv_info *adv_instance;
+
+ adv_instance = hci_find_adv_instance(hdev, instance);
+
+ /* If advertisement doesn't exist, we can't modify its data */
+ if (!adv_instance)
+ return -ENOENT;
+
+ if (adv_data_len) {
+ memset(adv_instance->adv_data, 0,
+ sizeof(adv_instance->adv_data));
+ memcpy(adv_instance->adv_data, adv_data, adv_data_len);
+ adv_instance->adv_data_len = adv_data_len;
+ }
+
+ if (scan_rsp_len) {
+ memset(adv_instance->scan_rsp_data, 0,
+ sizeof(adv_instance->scan_rsp_data));
+ memcpy(adv_instance->scan_rsp_data,
+ scan_rsp_data, scan_rsp_len);
+ adv_instance->scan_rsp_len = scan_rsp_len;
+ }
+
+ return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
void hci_adv_monitors_clear(struct hci_dev *hdev)
{
struct adv_monitor *monitor;
@@ -3592,6 +3631,10 @@ struct hci_dev *hci_alloc_dev(void)
hdev->cur_adv_instance = 0x00;
hdev->adv_instance_timeout = 0;
+ hdev->advmon_allowlist_duration = 300;
+ hdev->advmon_no_filter_duration = 500;
+ hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
+
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
@@ -3623,6 +3666,8 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
+ hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
+ hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 5e8af2658e44..4626e0289a97 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -494,6 +494,45 @@ static int auto_accept_delay_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
auto_accept_delay_set, "%llu\n");
+static ssize_t force_bredr_smp_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_bredr_smp_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ err = smp_force_bredr(hdev, enable);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static const struct file_operations force_bredr_smp_fops = {
+ .open = simple_open,
+ .read = force_bredr_smp_read,
+ .write = force_bredr_smp_write,
+ .llseek = default_llseek,
+};
+
static int idle_timeout_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
@@ -589,6 +628,17 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev,
&voice_setting_fops);
+ /* If the controller does not support BR/EDR Secure Connections
+ * feature, then the BR/EDR SMP channel shall not be present.
+ *
+ * To test this with Bluetooth 4.0 controllers, create a debugfs
+ * switch that allows forcing BR/EDR SMP support and accepting
+ * cross-transport pairing on non-AES encrypted connections.
+ */
+ if (!lmp_sc_capable(hdev))
+ debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs,
+ hdev, &force_bredr_smp_fops);
+
if (lmp_ssp_capable(hdev)) {
debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
hdev, &ssp_debug_mode_fops);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f04963914366..67668be3461e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1202,6 +1202,20 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
+static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->min_le_tx_power = rp->min_le_tx_power;
+ hdev->max_le_tx_power = rp->max_le_tx_power;
+}
+
static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 *sent, status = *((__u8 *) skb->data);
@@ -1752,6 +1766,7 @@ static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
}
/* Update adv data as tx power is known now */
hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
+
hci_dev_unlock(hdev);
}
@@ -3581,6 +3596,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_le_set_adv_set_random_addr(hdev, skb);
break;
+ case HCI_OP_LE_READ_TRANSMIT_POWER:
+ hci_cc_le_read_transmit_power(hdev, skb);
+ break;
+
default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break;
@@ -4936,15 +4955,15 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon) {
- hci_dev_unlock(hdev);
- return;
- }
+ if (!hcon)
+ goto unlock;
+
+ if (!hcon->amp_mgr)
+ goto unlock;
if (ev->status) {
hci_conn_del(hcon);
- hci_dev_unlock(hdev);
- return;
+ goto unlock;
}
bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
@@ -4961,6 +4980,7 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
amp_physical_cfm(bredr_hcon, hcon);
+unlock:
hci_dev_unlock(hdev);
}
@@ -5868,21 +5888,19 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
u8 num_reports = skb->data[0];
- void *ptr = &skb->data[1];
+ struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
- hci_dev_lock(hdev);
+ if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
+ return;
- while (num_reports--) {
- struct hci_ev_le_direct_adv_info *ev = ptr;
+ hci_dev_lock(hdev);
+ for (; num_reports; num_reports--, ev++)
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
ev->bdaddr_type, &ev->direct_addr,
ev->direct_addr_type, ev->rssi, NULL, 0,
false);
- ptr += sizeof(*ev);
- }
-
hci_dev_unlock(hdev);
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 6f12bab4d2fa..71bffd745472 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -58,7 +58,7 @@ static int req_run(struct hci_request *req, hci_req_complete_t complete,
struct sk_buff *skb;
unsigned long flags;
- BT_DBG("length %u", skb_queue_len(&req->cmd_q));
+ bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
/* If an error occurred during request building, remove all HCI
* commands queued on the HCI request queue.
@@ -102,7 +102,7 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
struct sk_buff *skb)
{
- BT_DBG("%s result 0x%2.2x", hdev->name, result);
+ bt_dev_dbg(hdev, "result 0x%2.2x", result);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
@@ -115,7 +115,7 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
void hci_req_sync_cancel(struct hci_dev *hdev, int err)
{
- BT_DBG("%s err 0x%2.2x", hdev->name, err);
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
@@ -131,7 +131,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
struct sk_buff *skb;
int err = 0;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_req_init(&req, hdev);
@@ -167,7 +167,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
skb = hdev->req_skb;
hdev->req_skb = NULL;
- BT_DBG("%s end: err %d", hdev->name, err);
+ bt_dev_dbg(hdev, "end: err %d", err);
if (err < 0) {
kfree_skb(skb);
@@ -196,7 +196,7 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
struct hci_request req;
int err = 0;
- BT_DBG("%s start", hdev->name);
+ bt_dev_dbg(hdev, "start");
hci_req_init(&req, hdev);
@@ -260,7 +260,7 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
hdev->req_skb = NULL;
hdev->req_status = hdev->req_result = 0;
- BT_DBG("%s end: err %d", hdev->name, err);
+ bt_dev_dbg(hdev, "end: err %d", err);
return err;
}
@@ -300,7 +300,7 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
if (plen)
skb_put_data(skb, param, plen);
- BT_DBG("skb len %d", skb->len);
+ bt_dev_dbg(hdev, "skb len %d", skb->len);
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
hci_skb_opcode(skb) = opcode;
@@ -315,7 +315,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
- BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+ bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
/* If an error occurred during request building, there is no point in
* queueing the HCI command. We can simply return.
@@ -378,6 +378,53 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
}
+static void start_interleave_scan(struct hci_dev *hdev)
+{
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, 0);
+}
+
+static bool is_interleave_scanning(struct hci_dev *hdev)
+{
+ return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
+}
+
+static void cancel_interleave_scan(struct hci_dev *hdev)
+{
+ bt_dev_dbg(hdev, "cancelling interleave scan");
+
+ cancel_delayed_work_sync(&hdev->interleave_scan);
+
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
+}
+
+/* Return true if interleave_scan wasn't started until exiting this function,
+ * otherwise, return false
+ */
+static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
+{
+ /* If there is at least one ADV monitors and one pending LE connection
+ * or one device to be scanned for, we should alternate between
+ * allowlist scan and one without any filters to save power.
+ */
+ bool use_interleaving = hci_is_adv_monitoring(hdev) &&
+ !(list_empty(&hdev->pend_le_conns) &&
+ list_empty(&hdev->pend_le_reports));
+ bool is_interleaving = is_interleave_scanning(hdev);
+
+ if (use_interleaving && !is_interleaving) {
+ start_interleave_scan(hdev);
+ bt_dev_dbg(hdev, "starting interleave scan");
+ return true;
+ }
+
+ if (!use_interleaving && is_interleaving)
+ cancel_interleave_scan(hdev);
+
+ return false;
+}
+
/* This function controls the background scanning based on hdev->pend_le_conns
* list. If there are pending LE connection we start the background scanning,
* otherwise we stop it.
@@ -413,8 +460,8 @@ static void __hci_update_background_scan(struct hci_request *req)
*/
hci_discovery_filter_clear(hdev);
- BT_DBG("%s ADV monitoring is %s", hdev->name,
- hci_is_adv_monitoring(hdev) ? "on" : "off");
+ bt_dev_dbg(hdev, "ADV monitoring is %s",
+ hci_is_adv_monitoring(hdev) ? "on" : "off");
if (list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports) &&
@@ -430,7 +477,7 @@ static void __hci_update_background_scan(struct hci_request *req)
hci_req_add_le_scan_disable(req, false);
- BT_DBG("%s stopping background scanning", hdev->name);
+ bt_dev_dbg(hdev, "stopping background scanning");
} else {
/* If there is at least one pending LE connection, we should
* keep the background scan running.
@@ -450,8 +497,7 @@ static void __hci_update_background_scan(struct hci_request *req)
hci_req_add_le_scan_disable(req, false);
hci_req_add_le_passive_scan(req);
-
- BT_DBG("%s starting background scanning", hdev->name);
+ bt_dev_dbg(hdev, "starting background scanning");
}
}
@@ -661,6 +707,9 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
return;
}
+ if (hdev->suspended)
+ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
+
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable cp;
@@ -698,7 +747,8 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(req->hdev)) {
+ if (use_ll_privacy(req->hdev) &&
+ hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
@@ -732,7 +782,8 @@ static int add_to_white_list(struct hci_request *req,
return -1;
/* White list can not be used with RPAs */
- if (!allow_rpa && !use_ll_privacy(hdev) &&
+ if (!allow_rpa &&
+ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
return -1;
}
@@ -750,7 +801,8 @@ static int add_to_white_list(struct hci_request *req,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(hdev)) {
+ if (use_ll_privacy(hdev) &&
+ hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(hdev, &params->addr,
@@ -812,7 +864,8 @@ static u8 update_white_list(struct hci_request *req)
}
/* White list can not be used with RPAs */
- if (!allow_rpa && !use_ll_privacy(hdev) &&
+ if (!allow_rpa &&
+ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
return 0x00;
}
@@ -844,12 +897,17 @@ static u8 update_white_list(struct hci_request *req)
return 0x00;
}
- /* Once the controller offloading of advertisement monitor is in place,
- * the if condition should include the support of MSFT extension
- * support. If suspend is ongoing, whitelist should be the default to
- * prevent waking by random advertisements.
+ /* Use the allowlist unless the following conditions are all true:
+ * - We are not currently suspending
+ * - There are 1 or more ADV monitors registered
+ * - Interleaved scanning is not currently using the allowlist
+ *
+ * Once the controller offloading of advertisement monitor is in place,
+ * the above condition should include the support of MSFT extension
+ * support.
*/
- if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
+ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
+ hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
return 0x00;
/* Select filter policy to use white list */
@@ -1002,6 +1060,11 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
&own_addr_type))
return;
+ if (hdev->enable_advmon_interleave_scan &&
+ __hci_update_interleaved_scan(hdev))
+ return;
+
+ bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
/* Adding or removing entries from the white list must
* happen before enabling scanning. The controller does
* not allow white list modification while scanning.
@@ -1040,22 +1103,23 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
own_addr_type, filter_policy, addr_resolv);
}
-static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
+static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
{
struct adv_info *adv_instance;
/* Instance 0x00 always set local name */
if (instance == 0x00)
- return 1;
+ return true;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
- return 0;
+ return false;
- /* TODO: Take into account the "appearance" and "local-name" flags here.
- * These are currently being ignored as they are not supported.
- */
- return adv_instance->scan_rsp_len;
+ if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
+ adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ return true;
+
+ return adv_instance->scan_rsp_len ? true : false;
}
static void hci_req_clear_event_filter(struct hci_request *req)
@@ -1098,6 +1162,11 @@ static void hci_req_set_event_filter(struct hci_request *req)
scan = SCAN_PAGE;
}
+ if (scan)
+ set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
+ else
+ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
+
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
@@ -1123,9 +1192,9 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
}
/* This function requires the caller holds hdev->lock */
-static void hci_suspend_adv_instances(struct hci_request *req)
+void __hci_req_pause_adv_instances(struct hci_request *req)
{
- bt_dev_dbg(req->hdev, "Suspending advertising instances");
+ bt_dev_dbg(req->hdev, "Pausing advertising instances");
/* Call to disable any advertisements active on the controller.
* This will succeed even if no advertisements are configured.
@@ -1138,7 +1207,7 @@ static void hci_suspend_adv_instances(struct hci_request *req)
}
/* This function requires the caller holds hdev->lock */
-static void hci_resume_adv_instances(struct hci_request *req)
+static void __hci_req_resume_adv_instances(struct hci_request *req)
{
struct adv_info *adv;
@@ -1161,6 +1230,17 @@ static void hci_resume_adv_instances(struct hci_request *req)
}
}
+/* This function requires the caller holds hdev->lock */
+int hci_req_resume_adv_instances(struct hci_dev *hdev)
+{
+ struct hci_request req;
+
+ hci_req_init(&req, hdev);
+ __hci_req_resume_adv_instances(&req);
+
+ return hci_req_run(&req, NULL);
+}
+
static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
@@ -1214,7 +1294,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
/* Pause other advertisements */
if (hdev->adv_instance_cnt)
- hci_suspend_adv_instances(&req);
+ __hci_req_pause_adv_instances(&req);
hdev->advertising_paused = true;
hdev->advertising_old_state = old_state;
@@ -1223,8 +1303,10 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
/* Disable LE passive scan if enabled */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
+ cancel_interleave_scan(hdev);
hci_req_add_le_scan_disable(&req, false);
+ }
/* Mark task needing completion */
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
@@ -1279,7 +1361,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
/* Resume other advertisements */
if (hdev->adv_instance_cnt)
- hci_resume_adv_instances(&req);
+ __hci_req_resume_adv_instances(&req);
/* Unpause discovery */
hdev->discovery_paused = false;
@@ -1300,23 +1382,9 @@ done:
wake_up(&hdev->suspend_wait_q);
}
-static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
+static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{
- u8 instance = hdev->cur_adv_instance;
- struct adv_info *adv_instance;
-
- /* Instance 0x00 always set local name */
- if (instance == 0x00)
- return 1;
-
- adv_instance = hci_find_adv_instance(hdev, instance);
- if (!adv_instance)
- return 0;
-
- /* TODO: Take into account the "appearance" and "local-name" flags here.
- * These are currently being ignored as they are not supported.
- */
- return adv_instance->scan_rsp_len;
+ return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
}
void __hci_req_disable_advertising(struct hci_request *req)
@@ -1428,6 +1496,7 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
void __hci_req_enable_advertising(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
+ struct adv_info *adv_instance;
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type, enable = 0x01;
bool connectable;
@@ -1435,6 +1504,7 @@ void __hci_req_enable_advertising(struct hci_request *req)
u32 flags;
flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
+ adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
/* If the "connectable" instance flag was not set, then choose between
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
@@ -1466,13 +1536,18 @@ void __hci_req_enable_advertising(struct hci_request *req)
memset(&cp, 0, sizeof(cp));
- if (connectable) {
- cp.type = LE_ADV_IND;
-
+ if (adv_instance) {
+ adv_min_interval = adv_instance->min_interval;
+ adv_max_interval = adv_instance->max_interval;
+ } else {
adv_min_interval = hdev->le_adv_min_interval;
adv_max_interval = hdev->le_adv_max_interval;
+ }
+
+ if (connectable) {
+ cp.type = LE_ADV_IND;
} else {
- if (get_cur_adv_instance_scan_rsp_len(hdev))
+ if (adv_cur_instance_is_scannable(hdev))
cp.type = LE_ADV_SCAN_IND;
else
cp.type = LE_ADV_NONCONN_IND;
@@ -1481,9 +1556,6 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
- } else {
- adv_min_interval = hdev->le_adv_min_interval;
- adv_max_interval = hdev->le_adv_max_interval;
}
}
@@ -1591,14 +1663,11 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
memset(&cp, 0, sizeof(cp));
- /* Extended scan response data doesn't allow a response to be
- * set if the instance isn't scannable.
- */
- if (get_adv_instance_scan_rsp_len(hdev, instance))
+ if (instance)
len = create_instance_scan_rsp_data(hdev, instance,
cp.data);
else
- len = 0;
+ len = create_default_scan_rsp_data(hdev, cp.data);
if (hdev->scan_rsp_data_len == len &&
!memcmp(cp.data, hdev->scan_rsp_data, len))
@@ -1811,7 +1880,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev)
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
- BT_DBG("%s status %u", hdev->name, status);
+ bt_dev_dbg(hdev, "status %u", status);
}
void hci_req_reenable_advertising(struct hci_dev *hdev)
@@ -1848,7 +1917,7 @@ static void adv_timeout_expire(struct work_struct *work)
struct hci_request req;
u8 instance;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -1871,6 +1940,62 @@ unlock:
hci_dev_unlock(hdev);
}
+static int hci_req_add_le_interleaved_scan(struct hci_request *req,
+ unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+ int ret = 0;
+
+ hci_dev_lock(hdev);
+
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ hci_req_add_le_scan_disable(req, false);
+ hci_req_add_le_passive_scan(req);
+
+ switch (hdev->interleave_scan_state) {
+ case INTERLEAVE_SCAN_ALLOWLIST:
+ bt_dev_dbg(hdev, "next state: allowlist");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ break;
+ case INTERLEAVE_SCAN_NO_FILTER:
+ bt_dev_dbg(hdev, "next state: no filter");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
+ break;
+ case INTERLEAVE_SCAN_NONE:
+ BT_ERR("unexpected error");
+ ret = -1;
+ }
+
+ hci_dev_unlock(hdev);
+
+ return ret;
+}
+
+static void interleave_scan_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ interleave_scan.work);
+ u8 status;
+ unsigned long timeout;
+
+ if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
+ timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
+ } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
+ timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
+ } else {
+ bt_dev_err(hdev, "unexpected error");
+ return;
+ }
+
+ hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
+ HCI_CMD_TIMEOUT, &status);
+
+ /* Don't continue interleaving if it was canceled */
+ if (is_interleave_scanning(hdev))
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, timeout);
+}
+
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
bool use_rpa, struct adv_info *adv_instance,
u8 *own_addr_type, bdaddr_t *rand_addr)
@@ -2006,9 +2131,15 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
memset(&cp, 0, sizeof(cp));
- /* In ext adv set param interval is 3 octets */
- hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
- hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
+ if (adv_instance) {
+ hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
+ hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
+ cp.tx_power = adv_instance->tx_power;
+ } else {
+ hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
+ hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
+ cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
+ }
secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
@@ -2017,7 +2148,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
else
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
- } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
+ } else if (adv_instance_is_scannable(hdev, instance)) {
if (secondary_adv)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
else
@@ -2031,7 +2162,6 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
- cp.tx_power = 127;
cp.handle = instance;
if (flags & MGMT_ADV_FLAG_SEC_2M) {
@@ -2332,7 +2462,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
*/
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
hci_lookup_le_connect(hdev)) {
- BT_DBG("Deferring random address update");
+ bt_dev_dbg(hdev, "Deferring random address update");
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return;
}
@@ -2557,7 +2687,7 @@ void __hci_req_update_class(struct hci_request *req)
struct hci_dev *hdev = req->hdev;
u8 cod[3];
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
if (!hdev_is_powered(hdev))
return;
@@ -2726,7 +2856,7 @@ void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
if (status)
- BT_DBG("Failed to abort connection: status 0x%2.2x", status);
+ bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
}
int hci_abort_conn(struct hci_conn *conn, u8 reason)
@@ -2789,7 +2919,7 @@ static int bredr_inquiry(struct hci_request *req, unsigned long opt)
const u8 liac[3] = { 0x00, 0x8b, 0x9e };
struct hci_cp_inquiry cp;
- BT_DBG("%s", req->hdev->name);
+ bt_dev_dbg(req->hdev, "");
hci_dev_lock(req->hdev);
hci_inquiry_cache_flush(req->hdev);
@@ -2815,7 +2945,7 @@ static void le_scan_disable_work(struct work_struct *work)
le_scan_disable.work);
u8 status;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
@@ -2911,7 +3041,7 @@ static void le_scan_restart_work(struct work_struct *work)
unsigned long timeout, duration, scan_start, now;
u8 status;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
if (status) {
@@ -2965,14 +3095,16 @@ static int active_scan(struct hci_request *req, unsigned long opt)
bool addr_resolv = false;
int err;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
/* If controller is scanning, it means the background scanning is
* running. Thus, we should temporarily stop it in order to set the
* discovery scanning parameters.
*/
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(req, false);
+ cancel_interleave_scan(hdev);
+ }
/* All active scans will be done with either a resolvable private
* address (when privacy feature has been enabled) or non-resolvable
@@ -2993,7 +3125,7 @@ static int interleaved_discov(struct hci_request *req, unsigned long opt)
{
int err;
- BT_DBG("%s", req->hdev->name);
+ bt_dev_dbg(req->hdev, "");
err = active_scan(req, opt);
if (err)
@@ -3006,7 +3138,7 @@ static void start_discovery(struct hci_dev *hdev, u8 *status)
{
unsigned long timeout;
- BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
+ bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
switch (hdev->discovery.type) {
case DISCOV_TYPE_BREDR:
@@ -3054,7 +3186,7 @@ static void start_discovery(struct hci_dev *hdev, u8 *status)
if (*status)
return;
- BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
+ bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
/* When service discovery is used and the controller has a
* strict duplicate filter, it is important to remember the
@@ -3079,7 +3211,7 @@ bool hci_req_stop_discovery(struct hci_request *req)
struct inquiry_entry *e;
bool ret = false;
- BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
+ bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
if (test_bit(HCI_INQUIRY, &hdev->flags))
@@ -3159,7 +3291,7 @@ static void discov_off(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev,
discov_off.work);
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -3298,6 +3430,7 @@ void hci_request_setup(struct hci_dev *hdev)
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
+ INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
}
void hci_request_cancel_all(struct hci_dev *hdev)
@@ -3317,4 +3450,6 @@ void hci_request_cancel_all(struct hci_dev *hdev)
cancel_delayed_work_sync(&hdev->adv_instance_expire);
hdev->adv_instance_timeout = 0;
}
+
+ cancel_interleave_scan(hdev);
}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 6a12e84c66c4..39ee8a18087a 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -71,6 +71,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
void hci_req_disable_address_resolution(struct hci_dev *hdev);
+void __hci_req_pause_adv_instances(struct hci_request *req);
+int hci_req_resume_adv_instances(struct hci_dev *hdev);
void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 3b4fa27a44e6..0db48c812662 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1290,7 +1290,7 @@ static int hidp_session_thread(void *arg)
/* cleanup runtime environment */
remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
- remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
+ remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
wake_up_interruptible(&session->report_queue);
hidp_del_timer(session);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 1ab27b90ddcb..17b87b57a175 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1515,8 +1515,14 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
* that have no key size requirements. Ensure that the link is
* actually encrypted before enforcing a key size.
*/
+ int min_key_size = hcon->hdev->min_enc_key_size;
+
+ /* On FIPS security level, key size must be 16 bytes */
+ if (hcon->sec_level == BT_SECURITY_FIPS)
+ min_key_size = 16;
+
return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
- hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
+ hcon->enc_key_size >= min_key_size);
}
static void l2cap_do_start(struct l2cap_chan *chan)
@@ -3627,7 +3633,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
if (hint)
break;
result = L2CAP_CONF_UNKNOWN;
- *((u8 *) ptr++) = type;
+ l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
break;
}
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 12d7b368b428..fa0f7a4a1d2f 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -40,7 +40,7 @@
#include "msft.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 18
+#define MGMT_REVISION 19
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -110,7 +110,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_SET_APPEARANCE,
MGMT_OP_SET_BLOCKED_KEYS,
MGMT_OP_SET_WIDEBAND_SPEECH,
- MGMT_OP_READ_SECURITY_INFO,
+ MGMT_OP_READ_CONTROLLER_CAP,
MGMT_OP_READ_EXP_FEATURES_INFO,
MGMT_OP_SET_EXP_FEATURE,
MGMT_OP_READ_DEF_SYSTEM_CONFIG,
@@ -122,6 +122,8 @@ static const u16 mgmt_commands[] = {
MGMT_OP_READ_ADV_MONITOR_FEATURES,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
MGMT_OP_REMOVE_ADV_MONITOR,
+ MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_OP_ADD_EXT_ADV_DATA,
};
static const u16 mgmt_events[] = {
@@ -174,7 +176,7 @@ static const u16 mgmt_untrusted_commands[] = {
MGMT_OP_READ_CONFIG_INFO,
MGMT_OP_READ_EXT_INDEX_LIST,
MGMT_OP_READ_EXT_INFO,
- MGMT_OP_READ_SECURITY_INFO,
+ MGMT_OP_READ_CONTROLLER_CAP,
MGMT_OP_READ_EXP_FEATURES_INFO,
MGMT_OP_READ_DEF_SYSTEM_CONFIG,
MGMT_OP_READ_DEF_RUNTIME_CONFIG,
@@ -3387,7 +3389,7 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
- struct mgmt_rp_get_phy_confguration rp;
+ struct mgmt_rp_get_phy_configuration rp;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -3451,7 +3453,7 @@ unlock:
static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
- struct mgmt_cp_set_phy_confguration *cp = data;
+ struct mgmt_cp_set_phy_configuration *cp = data;
struct hci_cp_le_set_default_phy cp_phy;
struct mgmt_pending_cmd *cmd;
struct hci_request req;
@@ -3708,13 +3710,14 @@ unlock:
return err;
}
-static int read_security_info(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 data_len)
+static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
{
- char buf[16];
- struct mgmt_rp_read_security_info *rp = (void *)buf;
- u16 sec_len = 0;
+ char buf[20];
+ struct mgmt_rp_read_controller_cap *rp = (void *)buf;
+ u16 cap_len = 0;
u8 flags = 0;
+ u8 tx_power_range[2];
bt_dev_dbg(hdev, "sock %p", sk);
@@ -3738,23 +3741,37 @@ static int read_security_info(struct sock *sk, struct hci_dev *hdev,
flags |= 0x08; /* Encryption key size enforcement (LE) */
- sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
+ cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
+ &flags, 1);
/* When the Read Simple Pairing Options command is supported, then
* also max encryption key size information is provided.
*/
if (hdev->commands[41] & 0x08)
- sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
+ cap_len = eir_append_le16(rp->cap, cap_len,
+ MGMT_CAP_MAX_ENC_KEY_SIZE,
hdev->max_enc_key_size);
- sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
+ cap_len = eir_append_le16(rp->cap, cap_len,
+ MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
+ SMP_MAX_ENC_KEY_SIZE);
+
+ /* Append the min/max LE tx power parameters if we were able to fetch
+ * it from the controller
+ */
+ if (hdev->commands[38] & 0x80) {
+ memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
+ memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
+ cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
+ tx_power_range, 2);
+ }
- rp->sec_len = cpu_to_le16(sec_len);
+ rp->cap_len = cpu_to_le16(cap_len);
hci_dev_unlock(hdev);
- return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
- rp, sizeof(*rp) + sec_len);
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
+ rp, sizeof(*rp) + cap_len);
}
#ifdef CONFIG_BT_FEATURE_DEBUG
@@ -7203,6 +7220,10 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)
flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
flags |= MGMT_ADV_FLAG_APPEARANCE;
flags |= MGMT_ADV_FLAG_LOCAL_NAME;
+ flags |= MGMT_ADV_PARAM_DURATION;
+ flags |= MGMT_ADV_PARAM_TIMEOUT;
+ flags |= MGMT_ADV_PARAM_INTERVALS;
+ flags |= MGMT_ADV_PARAM_TX_POWER;
/* In extended adv TX_POWER returned from Set Adv Param
* will be always valid.
@@ -7377,6 +7398,31 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
return true;
}
+static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
+{
+ u32 supported_flags, phy_flags;
+
+ /* The current implementation only supports a subset of the specified
+ * flags. Also need to check mutual exclusiveness of sec flags.
+ */
+ supported_flags = get_supported_adv_flags(hdev);
+ phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
+ if (adv_flags & ~supported_flags ||
+ ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
+ return false;
+
+ return true;
+}
+
+static bool adv_busy(struct hci_dev *hdev)
+{
+ return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
+ pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
+ pending_find(MGMT_OP_SET_LE, hdev) ||
+ pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
+ pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
+}
+
static void add_advertising_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
@@ -7391,6 +7437,8 @@ static void add_advertising_complete(struct hci_dev *hdev, u8 status,
hci_dev_lock(hdev);
cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
+ if (!cmd)
+ cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
if (!adv_instance->pending)
@@ -7435,7 +7483,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_add_advertising *cp = data;
struct mgmt_rp_add_advertising rp;
u32 flags;
- u32 supported_flags, phy_flags;
u8 status;
u16 timeout, duration;
unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
@@ -7471,13 +7518,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
timeout = __le16_to_cpu(cp->timeout);
duration = __le16_to_cpu(cp->duration);
- /* The current implementation only supports a subset of the specified
- * flags. Also need to check mutual exclusiveness of sec flags.
- */
- supported_flags = get_supported_adv_flags(hdev);
- phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
- if (flags & ~supported_flags ||
- ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
+ if (!requested_adv_flags_are_valid(hdev, flags))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -7489,9 +7530,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
- pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
- pending_find(MGMT_OP_SET_LE, hdev)) {
+ if (adv_busy(hdev)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_BUSY);
goto unlock;
@@ -7509,7 +7548,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
cp->adv_data_len, cp->data,
cp->scan_rsp_len,
cp->data + cp->adv_data_len,
- timeout, duration);
+ timeout, duration,
+ HCI_ADV_TX_POWER_NO_PREFERENCE,
+ hdev->le_adv_min_interval,
+ hdev->le_adv_max_interval);
if (err < 0) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_FAILED);
@@ -7582,6 +7624,338 @@ unlock:
return err;
}
+static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct mgmt_pending_cmd *cmd;
+ struct mgmt_cp_add_ext_adv_params *cp;
+ struct mgmt_rp_add_ext_adv_params rp;
+ struct adv_info *adv_instance;
+ u32 flags;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
+ if (!cmd)
+ goto unlock;
+
+ cp = cmd->param;
+ adv_instance = hci_find_adv_instance(hdev, cp->instance);
+ if (!adv_instance)
+ goto unlock;
+
+ rp.instance = cp->instance;
+ rp.tx_power = adv_instance->tx_power;
+
+ /* While we're at it, inform userspace of the available space for this
+ * advertisement, given the flags that will be used.
+ */
+ flags = __le32_to_cpu(cp->flags);
+ rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
+ rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
+
+ if (status) {
+ /* If this advertisement was previously advertising and we
+ * failed to update it, we signal that it has been removed and
+ * delete its structure
+ */
+ if (!adv_instance->pending)
+ mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
+
+ hci_remove_adv_instance(hdev, cp->instance);
+
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(status));
+
+ } else {
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(status), &rp, sizeof(rp));
+ }
+
+unlock:
+ if (cmd)
+ mgmt_pending_remove(cmd);
+
+ hci_dev_unlock(hdev);
+}
+
+static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_cp_add_ext_adv_params *cp = data;
+ struct mgmt_rp_add_ext_adv_params rp;
+ struct mgmt_pending_cmd *cmd = NULL;
+ struct adv_info *adv_instance;
+ struct hci_request req;
+ u32 flags, min_interval, max_interval;
+ u16 timeout, duration;
+ u8 status;
+ s8 tx_power;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ status = mgmt_le_support(hdev);
+ if (status)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ status);
+
+ if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* The purpose of breaking add_advertising into two separate MGMT calls
+ * for params and data is to allow more parameters to be added to this
+ * structure in the future. For this reason, we verify that we have the
+ * bare minimum structure we know of when the interface was defined. Any
+ * extra parameters we don't know about will be ignored in this request.
+ */
+ if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ flags = __le32_to_cpu(cp->flags);
+
+ if (!requested_adv_flags_are_valid(hdev, flags))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ /* In new interface, we require that we are powered to register */
+ if (!hdev_is_powered(hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ if (adv_busy(hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ /* Parse defined parameters from request, use defaults otherwise */
+ timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
+ __le16_to_cpu(cp->timeout) : 0;
+
+ duration = (flags & MGMT_ADV_PARAM_DURATION) ?
+ __le16_to_cpu(cp->duration) :
+ hdev->def_multi_adv_rotation_duration;
+
+ min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
+ __le32_to_cpu(cp->min_interval) :
+ hdev->le_adv_min_interval;
+
+ max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
+ __le32_to_cpu(cp->max_interval) :
+ hdev->le_adv_max_interval;
+
+ tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
+ cp->tx_power :
+ HCI_ADV_TX_POWER_NO_PREFERENCE;
+
+ /* Create advertising instance with no advertising or response data */
+ err = hci_add_adv_instance(hdev, cp->instance, flags,
+ 0, NULL, 0, NULL, timeout, duration,
+ tx_power, min_interval, max_interval);
+
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_STATUS_FAILED);
+ goto unlock;
+ }
+
+ hdev->cur_adv_instance = cp->instance;
+ /* Submit request for advertising params if ext adv available */
+ if (ext_adv_capable(hdev)) {
+ hci_req_init(&req, hdev);
+ adv_instance = hci_find_adv_instance(hdev, cp->instance);
+
+ /* Updating parameters of an active instance will return a
+ * Command Disallowed error, so we must first disable the
+ * instance if it is active.
+ */
+ if (!adv_instance->pending)
+ __hci_req_disable_ext_adv_instance(&req, cp->instance);
+
+ __hci_req_setup_ext_adv_instance(&req, cp->instance);
+
+ err = hci_req_run(&req, add_ext_adv_params_complete);
+
+ if (!err)
+ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ hdev, data, data_len);
+ if (!cmd) {
+ err = -ENOMEM;
+ hci_remove_adv_instance(hdev, cp->instance);
+ goto unlock;
+ }
+
+ } else {
+ rp.instance = cp->instance;
+ rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
+ rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
+ rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_EXT_ADV_PARAMS,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct mgmt_cp_add_ext_adv_data *cp = data;
+ struct mgmt_rp_add_ext_adv_data rp;
+ u8 schedule_instance = 0;
+ struct adv_info *next_instance;
+ struct adv_info *adv_instance;
+ int err = 0;
+ struct mgmt_pending_cmd *cmd;
+ struct hci_request req;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ adv_instance = hci_find_adv_instance(hdev, cp->instance);
+
+ if (!adv_instance) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ /* In new interface, we require that we are powered to register */
+ if (!hdev_is_powered(hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_REJECTED);
+ goto clear_new_instance;
+ }
+
+ if (adv_busy(hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_BUSY);
+ goto clear_new_instance;
+ }
+
+ /* Validate new data */
+ if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
+ cp->adv_data_len, true) ||
+ !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
+ cp->adv_data_len, cp->scan_rsp_len, false)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto clear_new_instance;
+ }
+
+ /* Set the data in the advertising instance */
+ hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
+ cp->data, cp->scan_rsp_len,
+ cp->data + cp->adv_data_len);
+
+ /* We're good to go, update advertising data, parameters, and start
+ * advertising.
+ */
+
+ hci_req_init(&req, hdev);
+
+ hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+
+ if (ext_adv_capable(hdev)) {
+ __hci_req_update_adv_data(&req, cp->instance);
+ __hci_req_update_scan_rsp_data(&req, cp->instance);
+ __hci_req_enable_ext_advertising(&req, cp->instance);
+
+ } else {
+ /* If using software rotation, determine next instance to use */
+
+ if (hdev->cur_adv_instance == cp->instance) {
+ /* If the currently advertised instance is being changed
+ * then cancel the current advertising and schedule the
+ * next instance. If there is only one instance then the
+ * overridden advertising data will be visible right
+ * away
+ */
+ cancel_adv_timeout(hdev);
+
+ next_instance = hci_get_next_instance(hdev,
+ cp->instance);
+ if (next_instance)
+ schedule_instance = next_instance->instance;
+ } else if (!hdev->adv_instance_timeout) {
+ /* Immediately advertise the new instance if no other
+ * instance is currently being advertised.
+ */
+ schedule_instance = cp->instance;
+ }
+
+ /* If the HCI_ADVERTISING flag is set or there is no instance to
+ * be advertised then we have no HCI communication to make.
+ * Simply return.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ !schedule_instance) {
+ if (adv_instance->pending) {
+ mgmt_advertising_added(sk, hdev, cp->instance);
+ adv_instance->pending = false;
+ }
+ rp.instance = cp->instance;
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_SUCCESS, &rp,
+ sizeof(rp));
+ goto unlock;
+ }
+
+ err = __hci_req_schedule_adv_instance(&req, schedule_instance,
+ true);
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
+ data_len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto clear_new_instance;
+ }
+
+ if (!err)
+ err = hci_req_run(&req, add_advertising_complete);
+
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_FAILED);
+ mgmt_pending_remove(cmd);
+ goto clear_new_instance;
+ }
+
+ /* We were successful in updating data, so trigger advertising_added
+ * event if this is an instance that wasn't previously advertising. If
+ * a failure occurs in the requests we initiated, we will remove the
+ * instance again in add_advertising_complete
+ */
+ if (adv_instance->pending)
+ mgmt_advertising_added(sk, hdev, cp->instance);
+
+ goto unlock;
+
+clear_new_instance:
+ hci_remove_adv_instance(hdev, cp->instance);
+
+unlock:
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
@@ -7834,7 +8208,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
HCI_MGMT_VAR_LEN },
{ set_wideband_speech, MGMT_SETTING_SIZE },
- { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
+ { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
HCI_MGMT_UNTRUSTED },
{ read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
HCI_MGMT_UNTRUSTED |
@@ -7856,6 +8230,10 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
HCI_MGMT_VAR_LEN },
{ remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
+ { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
+ HCI_MGMT_VAR_LEN },
};
void mgmt_index_added(struct hci_dev *hdev)
diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c
index b30b571f8caf..1deb0ca7a929 100644
--- a/net/bluetooth/mgmt_config.c
+++ b/net/bluetooth/mgmt_config.c
@@ -11,74 +11,119 @@
#include "mgmt_util.h"
#include "mgmt_config.h"
-#define HDEV_PARAM_U16(_param_code_, _param_name_) \
-{ \
- { cpu_to_le16(_param_code_), sizeof(__u16) }, \
- { cpu_to_le16(hdev->_param_name_) } \
-}
+#define HDEV_PARAM_U16(_param_name_) \
+ struct {\
+ struct mgmt_tlv entry; \
+ __le16 value; \
+ } __packed _param_name_
-#define HDEV_PARAM_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \
-{ \
- { cpu_to_le16(_param_code_), sizeof(__u16) }, \
- { cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) } \
-}
+#define HDEV_PARAM_U8(_param_name_) \
+ struct {\
+ struct mgmt_tlv entry; \
+ __u8 value; \
+ } __packed _param_name_
+
+#define TLV_SET_U16(_param_code_, _param_name_) \
+ { \
+ { cpu_to_le16(_param_code_), sizeof(__u16) }, \
+ cpu_to_le16(hdev->_param_name_) \
+ }
+
+#define TLV_SET_U8(_param_code_, _param_name_) \
+ { \
+ { cpu_to_le16(_param_code_), sizeof(__u8) }, \
+ hdev->_param_name_ \
+ }
+
+#define TLV_SET_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \
+ { \
+ { cpu_to_le16(_param_code_), sizeof(__u16) }, \
+ cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) \
+ }
int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
- struct {
- struct mgmt_tlv entry;
- union {
- /* This is a simplification for now since all values
- * are 16 bits. In the future, this code may need
- * refactoring to account for variable length values
- * and properly calculate the required buffer size.
- */
- __le16 value;
- };
- } __packed params[] = {
+ int ret;
+ struct mgmt_rp_read_def_system_config {
/* Please see mgmt-api.txt for documentation of these values */
- HDEV_PARAM_U16(0x0000, def_page_scan_type),
- HDEV_PARAM_U16(0x0001, def_page_scan_int),
- HDEV_PARAM_U16(0x0002, def_page_scan_window),
- HDEV_PARAM_U16(0x0003, def_inq_scan_type),
- HDEV_PARAM_U16(0x0004, def_inq_scan_int),
- HDEV_PARAM_U16(0x0005, def_inq_scan_window),
- HDEV_PARAM_U16(0x0006, def_br_lsto),
- HDEV_PARAM_U16(0x0007, def_page_timeout),
- HDEV_PARAM_U16(0x0008, sniff_min_interval),
- HDEV_PARAM_U16(0x0009, sniff_max_interval),
- HDEV_PARAM_U16(0x000a, le_adv_min_interval),
- HDEV_PARAM_U16(0x000b, le_adv_max_interval),
- HDEV_PARAM_U16(0x000c, def_multi_adv_rotation_duration),
- HDEV_PARAM_U16(0x000d, le_scan_interval),
- HDEV_PARAM_U16(0x000e, le_scan_window),
- HDEV_PARAM_U16(0x000f, le_scan_int_suspend),
- HDEV_PARAM_U16(0x0010, le_scan_window_suspend),
- HDEV_PARAM_U16(0x0011, le_scan_int_discovery),
- HDEV_PARAM_U16(0x0012, le_scan_window_discovery),
- HDEV_PARAM_U16(0x0013, le_scan_int_adv_monitor),
- HDEV_PARAM_U16(0x0014, le_scan_window_adv_monitor),
- HDEV_PARAM_U16(0x0015, le_scan_int_connect),
- HDEV_PARAM_U16(0x0016, le_scan_window_connect),
- HDEV_PARAM_U16(0x0017, le_conn_min_interval),
- HDEV_PARAM_U16(0x0018, le_conn_max_interval),
- HDEV_PARAM_U16(0x0019, le_conn_latency),
- HDEV_PARAM_U16(0x001a, le_supv_timeout),
- HDEV_PARAM_U16_JIFFIES_TO_MSECS(0x001b,
- def_le_autoconnect_timeout),
+ HDEV_PARAM_U16(def_page_scan_type);
+ HDEV_PARAM_U16(def_page_scan_int);
+ HDEV_PARAM_U16(def_page_scan_window);
+ HDEV_PARAM_U16(def_inq_scan_type);
+ HDEV_PARAM_U16(def_inq_scan_int);
+ HDEV_PARAM_U16(def_inq_scan_window);
+ HDEV_PARAM_U16(def_br_lsto);
+ HDEV_PARAM_U16(def_page_timeout);
+ HDEV_PARAM_U16(sniff_min_interval);
+ HDEV_PARAM_U16(sniff_max_interval);
+ HDEV_PARAM_U16(le_adv_min_interval);
+ HDEV_PARAM_U16(le_adv_max_interval);
+ HDEV_PARAM_U16(def_multi_adv_rotation_duration);
+ HDEV_PARAM_U16(le_scan_interval);
+ HDEV_PARAM_U16(le_scan_window);
+ HDEV_PARAM_U16(le_scan_int_suspend);
+ HDEV_PARAM_U16(le_scan_window_suspend);
+ HDEV_PARAM_U16(le_scan_int_discovery);
+ HDEV_PARAM_U16(le_scan_window_discovery);
+ HDEV_PARAM_U16(le_scan_int_adv_monitor);
+ HDEV_PARAM_U16(le_scan_window_adv_monitor);
+ HDEV_PARAM_U16(le_scan_int_connect);
+ HDEV_PARAM_U16(le_scan_window_connect);
+ HDEV_PARAM_U16(le_conn_min_interval);
+ HDEV_PARAM_U16(le_conn_max_interval);
+ HDEV_PARAM_U16(le_conn_latency);
+ HDEV_PARAM_U16(le_supv_timeout);
+ HDEV_PARAM_U16(def_le_autoconnect_timeout);
+ HDEV_PARAM_U16(advmon_allowlist_duration);
+ HDEV_PARAM_U16(advmon_no_filter_duration);
+ HDEV_PARAM_U8(enable_advmon_interleave_scan);
+ } __packed rp = {
+ TLV_SET_U16(0x0000, def_page_scan_type),
+ TLV_SET_U16(0x0001, def_page_scan_int),
+ TLV_SET_U16(0x0002, def_page_scan_window),
+ TLV_SET_U16(0x0003, def_inq_scan_type),
+ TLV_SET_U16(0x0004, def_inq_scan_int),
+ TLV_SET_U16(0x0005, def_inq_scan_window),
+ TLV_SET_U16(0x0006, def_br_lsto),
+ TLV_SET_U16(0x0007, def_page_timeout),
+ TLV_SET_U16(0x0008, sniff_min_interval),
+ TLV_SET_U16(0x0009, sniff_max_interval),
+ TLV_SET_U16(0x000a, le_adv_min_interval),
+ TLV_SET_U16(0x000b, le_adv_max_interval),
+ TLV_SET_U16(0x000c, def_multi_adv_rotation_duration),
+ TLV_SET_U16(0x000d, le_scan_interval),
+ TLV_SET_U16(0x000e, le_scan_window),
+ TLV_SET_U16(0x000f, le_scan_int_suspend),
+ TLV_SET_U16(0x0010, le_scan_window_suspend),
+ TLV_SET_U16(0x0011, le_scan_int_discovery),
+ TLV_SET_U16(0x0012, le_scan_window_discovery),
+ TLV_SET_U16(0x0013, le_scan_int_adv_monitor),
+ TLV_SET_U16(0x0014, le_scan_window_adv_monitor),
+ TLV_SET_U16(0x0015, le_scan_int_connect),
+ TLV_SET_U16(0x0016, le_scan_window_connect),
+ TLV_SET_U16(0x0017, le_conn_min_interval),
+ TLV_SET_U16(0x0018, le_conn_max_interval),
+ TLV_SET_U16(0x0019, le_conn_latency),
+ TLV_SET_U16(0x001a, le_supv_timeout),
+ TLV_SET_U16_JIFFIES_TO_MSECS(0x001b,
+ def_le_autoconnect_timeout),
+ TLV_SET_U16(0x001d, advmon_allowlist_duration),
+ TLV_SET_U16(0x001e, advmon_no_filter_duration),
+ TLV_SET_U8(0x001f, enable_advmon_interleave_scan),
};
- struct mgmt_rp_read_def_system_config *rp = (void *)params;
bt_dev_dbg(hdev, "sock %p", sk);
- return mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_READ_DEF_SYSTEM_CONFIG,
- 0, rp, sizeof(params));
+ ret = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_READ_DEF_SYSTEM_CONFIG,
+ 0, &rp, sizeof(rp));
+ return ret;
}
#define TO_TLV(x) ((struct mgmt_tlv *)(x))
#define TLV_GET_LE16(tlv) le16_to_cpu(*((__le16 *)(TO_TLV(tlv)->value)))
+#define TLV_GET_U8(tlv) (*((__u8 *)(TO_TLV(tlv)->value)))
int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
@@ -95,6 +140,7 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
/* First pass to validate the tlv */
while (buffer_left >= sizeof(struct mgmt_tlv)) {
const u8 len = TO_TLV(buffer)->length;
+ size_t exp_type_len;
const u16 exp_len = sizeof(struct mgmt_tlv) +
len;
const u16 type = le16_to_cpu(TO_TLV(buffer)->type);
@@ -138,20 +184,28 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
case 0x0019:
case 0x001a:
case 0x001b:
- if (len != sizeof(u16)) {
- bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d",
- len, sizeof(u16), type);
-
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_DEF_SYSTEM_CONFIG,
- MGMT_STATUS_INVALID_PARAMS);
- }
+ case 0x001d:
+ case 0x001e:
+ exp_type_len = sizeof(u16);
+ break;
+ case 0x001f:
+ exp_type_len = sizeof(u8);
break;
default:
+ exp_type_len = 0;
bt_dev_warn(hdev, "unsupported parameter %u", type);
break;
}
+ if (exp_type_len && len != exp_type_len) {
+ bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d",
+ len, exp_type_len, type);
+
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_DEF_SYSTEM_CONFIG,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
buffer_left -= exp_len;
buffer += exp_len;
}
@@ -251,6 +305,15 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->def_le_autoconnect_timeout =
msecs_to_jiffies(TLV_GET_LE16(buffer));
break;
+ case 0x0001d:
+ hdev->advmon_allowlist_duration = TLV_GET_LE16(buffer);
+ break;
+ case 0x0001e:
+ hdev->advmon_no_filter_duration = TLV_GET_LE16(buffer);
+ break;
+ case 0x0001f:
+ hdev->enable_advmon_interleave_scan = TLV_GET_U8(buffer);
+ break;
default:
bt_dev_warn(hdev, "unsupported parameter %u", type);
break;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 79ffcdef0b7a..22a110f37abc 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1003,6 +1003,11 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
case BT_SNDMTU:
case BT_RCVMTU:
+ if (sk->sk_state != BT_CONNECTED) {
+ err = -ENOTCONN;
+ break;
+ }
+
if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval))
err = -EFAULT;
break;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index bf4bef13d935..c659c464f7ca 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3353,31 +3353,8 @@ static void smp_del_chan(struct l2cap_chan *chan)
l2cap_chan_put(chan);
}
-static ssize_t force_bredr_smp_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+int smp_force_bredr(struct hci_dev *hdev, bool enable)
{
- struct hci_dev *hdev = file->private_data;
- char buf[3];
-
- buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y': 'N';
- buf[1] = '\n';
- buf[2] = '\0';
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_bredr_smp_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- bool enable;
- int err;
-
- err = kstrtobool_from_user(user_buf, count, &enable);
- if (err)
- return err;
-
if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return -EALREADY;
@@ -3399,16 +3376,9 @@ static ssize_t force_bredr_smp_write(struct file *file,
hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP);
- return count;
+ return 0;
}
-static const struct file_operations force_bredr_smp_fops = {
- .open = simple_open,
- .read = force_bredr_smp_read,
- .write = force_bredr_smp_write,
- .llseek = default_llseek,
-};
-
int smp_register(struct hci_dev *hdev)
{
struct l2cap_chan *chan;
@@ -3433,17 +3403,7 @@ int smp_register(struct hci_dev *hdev)
hdev->smp_data = chan;
- /* If the controller does not support BR/EDR Secure Connections
- * feature, then the BR/EDR SMP channel shall not be present.
- *
- * To test this with Bluetooth 4.0 controllers, create a debugfs
- * switch that allows forcing BR/EDR SMP support and accepting
- * cross-transport pairing on non-AES encrypted connections.
- */
if (!lmp_sc_capable(hdev)) {
- debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs,
- hdev, &force_bredr_smp_fops);
-
/* Flag can be already set here (due to power toggle) */
if (!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return 0;
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 121edadd5f8d..fc35a8bf358e 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -193,6 +193,8 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa);
int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]);
+int smp_force_bredr(struct hci_dev *hdev, bool enable);
+
int smp_register(struct hci_dev *hdev);
void smp_unregister(struct hci_dev *hdev);
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 80879196560c..3c8ded7d3e84 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -73,3 +73,14 @@ config BRIDGE_MRP
Say N to exclude this support and reduce the binary size.
If unsure, say N.
+
+config BRIDGE_CFM
+ bool "CFM protocol"
+ depends on BRIDGE
+ help
+ If you say Y here, then the Ethernet bridge will be able to run CFM
+ protocol according to 802.1Q section 12.14
+
+ Say N to exclude this support and reduce the binary size.
+
+ If unsure, say N.
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index ccb394236fbd..4702702a74d3 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -27,3 +27,5 @@ bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o
obj-$(CONFIG_NETFILTER) += netfilter/
bridge-$(CONFIG_BRIDGE_MRP) += br_mrp_switchdev.o br_mrp.o br_mrp_netlink.o
+
+bridge-$(CONFIG_BRIDGE_CFM) += br_cfm.o br_cfm_netlink.o
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 401eeb9142eb..1b169f8e7491 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -43,7 +43,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
if (event == NETDEV_REGISTER) {
/* register of bridge completed, add sysfs entries */
- br_sysfs_addbr(dev);
+ err = br_sysfs_addbr(dev);
+ if (err)
+ return notifier_from_errno(err);
+
return NOTIFY_DONE;
}
}
diff --git a/net/bridge/br_cfm.c b/net/bridge/br_cfm.c
new file mode 100644
index 000000000000..001064f7583d
--- /dev/null
+++ b/net/bridge/br_cfm.c
@@ -0,0 +1,867 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/cfm_bridge.h>
+#include <uapi/linux/cfm_bridge.h>
+#include "br_private_cfm.h"
+
+static struct br_cfm_mep *br_mep_find(struct net_bridge *br, u32 instance)
+{
+ struct br_cfm_mep *mep;
+
+ hlist_for_each_entry(mep, &br->mep_list, head)
+ if (mep->instance == instance)
+ return mep;
+
+ return NULL;
+}
+
+static struct br_cfm_mep *br_mep_find_ifindex(struct net_bridge *br,
+ u32 ifindex)
+{
+ struct br_cfm_mep *mep;
+
+ hlist_for_each_entry_rcu(mep, &br->mep_list, head,
+ lockdep_rtnl_is_held())
+ if (mep->create.ifindex == ifindex)
+ return mep;
+
+ return NULL;
+}
+
+static struct br_cfm_peer_mep *br_peer_mep_find(struct br_cfm_mep *mep,
+ u32 mepid)
+{
+ struct br_cfm_peer_mep *peer_mep;
+
+ hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head,
+ lockdep_rtnl_is_held())
+ if (peer_mep->mepid == mepid)
+ return peer_mep;
+
+ return NULL;
+}
+
+static struct net_bridge_port *br_mep_get_port(struct net_bridge *br,
+ u32 ifindex)
+{
+ struct net_bridge_port *port;
+
+ list_for_each_entry(port, &br->port_list, list)
+ if (port->dev->ifindex == ifindex)
+ return port;
+
+ return NULL;
+}
+
+/* Calculate the CCM interval in us. */
+static u32 interval_to_us(enum br_cfm_ccm_interval interval)
+{
+ switch (interval) {
+ case BR_CFM_CCM_INTERVAL_NONE:
+ return 0;
+ case BR_CFM_CCM_INTERVAL_3_3_MS:
+ return 3300;
+ case BR_CFM_CCM_INTERVAL_10_MS:
+ return 10 * 1000;
+ case BR_CFM_CCM_INTERVAL_100_MS:
+ return 100 * 1000;
+ case BR_CFM_CCM_INTERVAL_1_SEC:
+ return 1000 * 1000;
+ case BR_CFM_CCM_INTERVAL_10_SEC:
+ return 10 * 1000 * 1000;
+ case BR_CFM_CCM_INTERVAL_1_MIN:
+ return 60 * 1000 * 1000;
+ case BR_CFM_CCM_INTERVAL_10_MIN:
+ return 10 * 60 * 1000 * 1000;
+ }
+ return 0;
+}
+
+/* Convert the interface interval to CCM PDU value. */
+static u32 interval_to_pdu(enum br_cfm_ccm_interval interval)
+{
+ switch (interval) {
+ case BR_CFM_CCM_INTERVAL_NONE:
+ return 0;
+ case BR_CFM_CCM_INTERVAL_3_3_MS:
+ return 1;
+ case BR_CFM_CCM_INTERVAL_10_MS:
+ return 2;
+ case BR_CFM_CCM_INTERVAL_100_MS:
+ return 3;
+ case BR_CFM_CCM_INTERVAL_1_SEC:
+ return 4;
+ case BR_CFM_CCM_INTERVAL_10_SEC:
+ return 5;
+ case BR_CFM_CCM_INTERVAL_1_MIN:
+ return 6;
+ case BR_CFM_CCM_INTERVAL_10_MIN:
+ return 7;
+ }
+ return 0;
+}
+
+/* Convert the CCM PDU value to interval on interface. */
+static u32 pdu_to_interval(u32 value)
+{
+ switch (value) {
+ case 0:
+ return BR_CFM_CCM_INTERVAL_NONE;
+ case 1:
+ return BR_CFM_CCM_INTERVAL_3_3_MS;
+ case 2:
+ return BR_CFM_CCM_INTERVAL_10_MS;
+ case 3:
+ return BR_CFM_CCM_INTERVAL_100_MS;
+ case 4:
+ return BR_CFM_CCM_INTERVAL_1_SEC;
+ case 5:
+ return BR_CFM_CCM_INTERVAL_10_SEC;
+ case 6:
+ return BR_CFM_CCM_INTERVAL_1_MIN;
+ case 7:
+ return BR_CFM_CCM_INTERVAL_10_MIN;
+ }
+ return BR_CFM_CCM_INTERVAL_NONE;
+}
+
+static void ccm_rx_timer_start(struct br_cfm_peer_mep *peer_mep)
+{
+ u32 interval_us;
+
+ interval_us = interval_to_us(peer_mep->mep->cc_config.exp_interval);
+ /* Function ccm_rx_dwork must be called with 1/4
+ * of the configured CC 'expected_interval'
+ * in order to detect CCM defect after 3.25 interval.
+ */
+ queue_delayed_work(system_wq, &peer_mep->ccm_rx_dwork,
+ usecs_to_jiffies(interval_us / 4));
+}
+
+static void br_cfm_notify(int event, const struct net_bridge_port *port)
+{
+ u32 filter = RTEXT_FILTER_CFM_STATUS;
+
+ return br_info_notify(event, port->br, NULL, filter);
+}
+
+static void cc_peer_enable(struct br_cfm_peer_mep *peer_mep)
+{
+ memset(&peer_mep->cc_status, 0, sizeof(peer_mep->cc_status));
+ peer_mep->ccm_rx_count_miss = 0;
+
+ ccm_rx_timer_start(peer_mep);
+}
+
+static void cc_peer_disable(struct br_cfm_peer_mep *peer_mep)
+{
+ cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
+}
+
+static struct sk_buff *ccm_frame_build(struct br_cfm_mep *mep,
+ const struct br_cfm_cc_ccm_tx_info *const tx_info)
+
+{
+ struct br_cfm_common_hdr *common_hdr;
+ struct net_bridge_port *b_port;
+ struct br_cfm_maid *maid;
+ u8 *itu_reserved, *e_tlv;
+ struct ethhdr *eth_hdr;
+ struct sk_buff *skb;
+ __be32 *status_tlv;
+ __be32 *snumber;
+ __be16 *mepid;
+
+ skb = dev_alloc_skb(CFM_CCM_MAX_FRAME_LENGTH);
+ if (!skb)
+ return NULL;
+
+ rcu_read_lock();
+ b_port = rcu_dereference(mep->b_port);
+ if (!b_port) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return NULL;
+ }
+ skb->dev = b_port->dev;
+ rcu_read_unlock();
+ /* The device cannot be deleted until the work_queue functions has
+ * completed. This function is called from ccm_tx_work_expired()
+ * that is a work_queue functions.
+ */
+
+ skb->protocol = htons(ETH_P_CFM);
+ skb->priority = CFM_FRAME_PRIO;
+
+ /* Ethernet header */
+ eth_hdr = skb_put(skb, sizeof(*eth_hdr));
+ ether_addr_copy(eth_hdr->h_dest, tx_info->dmac.addr);
+ ether_addr_copy(eth_hdr->h_source, mep->config.unicast_mac.addr);
+ eth_hdr->h_proto = htons(ETH_P_CFM);
+
+ /* Common CFM Header */
+ common_hdr = skb_put(skb, sizeof(*common_hdr));
+ common_hdr->mdlevel_version = mep->config.mdlevel << 5;
+ common_hdr->opcode = BR_CFM_OPCODE_CCM;
+ common_hdr->flags = (mep->rdi << 7) |
+ interval_to_pdu(mep->cc_config.exp_interval);
+ common_hdr->tlv_offset = CFM_CCM_TLV_OFFSET;
+
+ /* Sequence number */
+ snumber = skb_put(skb, sizeof(*snumber));
+ if (tx_info->seq_no_update) {
+ *snumber = cpu_to_be32(mep->ccm_tx_snumber);
+ mep->ccm_tx_snumber += 1;
+ } else {
+ *snumber = 0;
+ }
+
+ mepid = skb_put(skb, sizeof(*mepid));
+ *mepid = cpu_to_be16((u16)mep->config.mepid);
+
+ maid = skb_put(skb, sizeof(*maid));
+ memcpy(maid->data, mep->cc_config.exp_maid.data, sizeof(maid->data));
+
+ /* ITU reserved (CFM_CCM_ITU_RESERVED_SIZE octets) */
+ itu_reserved = skb_put(skb, CFM_CCM_ITU_RESERVED_SIZE);
+ memset(itu_reserved, 0, CFM_CCM_ITU_RESERVED_SIZE);
+
+ /* Generel CFM TLV format:
+ * TLV type: one byte
+ * TLV value length: two bytes
+ * TLV value: 'TLV value length' bytes
+ */
+
+ /* Port status TLV. The value length is 1. Total of 4 bytes. */
+ if (tx_info->port_tlv) {
+ status_tlv = skb_put(skb, sizeof(*status_tlv));
+ *status_tlv = cpu_to_be32((CFM_PORT_STATUS_TLV_TYPE << 24) |
+ (1 << 8) | /* Value length */
+ (tx_info->port_tlv_value & 0xFF));
+ }
+
+ /* Interface status TLV. The value length is 1. Total of 4 bytes. */
+ if (tx_info->if_tlv) {
+ status_tlv = skb_put(skb, sizeof(*status_tlv));
+ *status_tlv = cpu_to_be32((CFM_IF_STATUS_TLV_TYPE << 24) |
+ (1 << 8) | /* Value length */
+ (tx_info->if_tlv_value & 0xFF));
+ }
+
+ /* End TLV */
+ e_tlv = skb_put(skb, sizeof(*e_tlv));
+ *e_tlv = CFM_ENDE_TLV_TYPE;
+
+ return skb;
+}
+
+static void ccm_frame_tx(struct sk_buff *skb)
+{
+ skb_reset_network_header(skb);
+ dev_queue_xmit(skb);
+}
+
+/* This function is called with the configured CC 'expected_interval'
+ * in order to drive CCM transmission when enabled.
+ */
+static void ccm_tx_work_expired(struct work_struct *work)
+{
+ struct delayed_work *del_work;
+ struct br_cfm_mep *mep;
+ struct sk_buff *skb;
+ u32 interval_us;
+
+ del_work = to_delayed_work(work);
+ mep = container_of(del_work, struct br_cfm_mep, ccm_tx_dwork);
+
+ if (time_before_eq(mep->ccm_tx_end, jiffies)) {
+ /* Transmission period has ended */
+ mep->cc_ccm_tx_info.period = 0;
+ return;
+ }
+
+ skb = ccm_frame_build(mep, &mep->cc_ccm_tx_info);
+ if (skb)
+ ccm_frame_tx(skb);
+
+ interval_us = interval_to_us(mep->cc_config.exp_interval);
+ queue_delayed_work(system_wq, &mep->ccm_tx_dwork,
+ usecs_to_jiffies(interval_us));
+}
+
+/* This function is called with 1/4 of the configured CC 'expected_interval'
+ * in order to detect CCM defect after 3.25 interval.
+ */
+static void ccm_rx_work_expired(struct work_struct *work)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct net_bridge_port *b_port;
+ struct delayed_work *del_work;
+
+ del_work = to_delayed_work(work);
+ peer_mep = container_of(del_work, struct br_cfm_peer_mep, ccm_rx_dwork);
+
+ /* After 13 counts (4 * 3,25) then 3.25 intervals are expired */
+ if (peer_mep->ccm_rx_count_miss < 13) {
+ /* 3.25 intervals are NOT expired without CCM reception */
+ peer_mep->ccm_rx_count_miss++;
+
+ /* Start timer again */
+ ccm_rx_timer_start(peer_mep);
+ } else {
+ /* 3.25 intervals are expired without CCM reception.
+ * CCM defect detected
+ */
+ peer_mep->cc_status.ccm_defect = true;
+
+ /* Change in CCM defect status - notify */
+ rcu_read_lock();
+ b_port = rcu_dereference(peer_mep->mep->b_port);
+ if (b_port)
+ br_cfm_notify(RTM_NEWLINK, b_port);
+ rcu_read_unlock();
+ }
+}
+
+static u32 ccm_tlv_extract(struct sk_buff *skb, u32 index,
+ struct br_cfm_peer_mep *peer_mep)
+{
+ __be32 *s_tlv;
+ __be32 _s_tlv;
+ u32 h_s_tlv;
+ u8 *e_tlv;
+ u8 _e_tlv;
+
+ e_tlv = skb_header_pointer(skb, index, sizeof(_e_tlv), &_e_tlv);
+ if (!e_tlv)
+ return 0;
+
+ /* TLV is present - get the status TLV */
+ s_tlv = skb_header_pointer(skb,
+ index,
+ sizeof(_s_tlv), &_s_tlv);
+ if (!s_tlv)
+ return 0;
+
+ h_s_tlv = ntohl(*s_tlv);
+ if ((h_s_tlv >> 24) == CFM_IF_STATUS_TLV_TYPE) {
+ /* Interface status TLV */
+ peer_mep->cc_status.tlv_seen = true;
+ peer_mep->cc_status.if_tlv_value = (h_s_tlv & 0xFF);
+ }
+
+ if ((h_s_tlv >> 24) == CFM_PORT_STATUS_TLV_TYPE) {
+ /* Port status TLV */
+ peer_mep->cc_status.tlv_seen = true;
+ peer_mep->cc_status.port_tlv_value = (h_s_tlv & 0xFF);
+ }
+
+ /* The Sender ID TLV is not handled */
+ /* The Organization-Specific TLV is not handled */
+
+ /* Return the length of this tlv.
+ * This is the length of the value field plus 3 bytes for size of type
+ * field and length field
+ */
+ return ((h_s_tlv >> 8) & 0xFFFF) + 3;
+}
+
+/* note: already called with rcu_read_lock */
+static int br_cfm_frame_rx(struct net_bridge_port *port, struct sk_buff *skb)
+{
+ u32 mdlevel, interval, size, index, max;
+ const struct br_cfm_common_hdr *hdr;
+ struct br_cfm_peer_mep *peer_mep;
+ const struct br_cfm_maid *maid;
+ struct br_cfm_common_hdr _hdr;
+ struct br_cfm_maid _maid;
+ struct br_cfm_mep *mep;
+ struct net_bridge *br;
+ __be32 *snumber;
+ __be32 _snumber;
+ __be16 *mepid;
+ __be16 _mepid;
+
+ if (port->state == BR_STATE_DISABLED)
+ return 0;
+
+ hdr = skb_header_pointer(skb, 0, sizeof(_hdr), &_hdr);
+ if (!hdr)
+ return 1;
+
+ br = port->br;
+ mep = br_mep_find_ifindex(br, port->dev->ifindex);
+ if (unlikely(!mep))
+ /* No MEP on this port - must be forwarded */
+ return 0;
+
+ mdlevel = hdr->mdlevel_version >> 5;
+ if (mdlevel > mep->config.mdlevel)
+ /* The level is above this MEP level - must be forwarded */
+ return 0;
+
+ if ((hdr->mdlevel_version & 0x1F) != 0) {
+ /* Invalid version */
+ mep->status.version_unexp_seen = true;
+ return 1;
+ }
+
+ if (mdlevel < mep->config.mdlevel) {
+ /* The level is below this MEP level */
+ mep->status.rx_level_low_seen = true;
+ return 1;
+ }
+
+ if (hdr->opcode == BR_CFM_OPCODE_CCM) {
+ /* CCM PDU received. */
+ /* MA ID is after common header + sequence number + MEP ID */
+ maid = skb_header_pointer(skb,
+ CFM_CCM_PDU_MAID_OFFSET,
+ sizeof(_maid), &_maid);
+ if (!maid)
+ return 1;
+ if (memcmp(maid->data, mep->cc_config.exp_maid.data,
+ sizeof(maid->data)))
+ /* MA ID not as expected */
+ return 1;
+
+ /* MEP ID is after common header + sequence number */
+ mepid = skb_header_pointer(skb,
+ CFM_CCM_PDU_MEPID_OFFSET,
+ sizeof(_mepid), &_mepid);
+ if (!mepid)
+ return 1;
+ peer_mep = br_peer_mep_find(mep, (u32)ntohs(*mepid));
+ if (!peer_mep)
+ return 1;
+
+ /* Interval is in common header flags */
+ interval = hdr->flags & 0x07;
+ if (mep->cc_config.exp_interval != pdu_to_interval(interval))
+ /* Interval not as expected */
+ return 1;
+
+ /* A valid CCM frame is received */
+ if (peer_mep->cc_status.ccm_defect) {
+ peer_mep->cc_status.ccm_defect = false;
+
+ /* Change in CCM defect status - notify */
+ br_cfm_notify(RTM_NEWLINK, port);
+
+ /* Start CCM RX timer */
+ ccm_rx_timer_start(peer_mep);
+ }
+
+ peer_mep->cc_status.seen = true;
+ peer_mep->ccm_rx_count_miss = 0;
+
+ /* RDI is in common header flags */
+ peer_mep->cc_status.rdi = (hdr->flags & 0x80) ? true : false;
+
+ /* Sequence number is after common header */
+ snumber = skb_header_pointer(skb,
+ CFM_CCM_PDU_SEQNR_OFFSET,
+ sizeof(_snumber), &_snumber);
+ if (!snumber)
+ return 1;
+ if (ntohl(*snumber) != (mep->ccm_rx_snumber + 1))
+ /* Unexpected sequence number */
+ peer_mep->cc_status.seq_unexp_seen = true;
+
+ mep->ccm_rx_snumber = ntohl(*snumber);
+
+ /* TLV end is after common header + sequence number + MEP ID +
+ * MA ID + ITU reserved
+ */
+ index = CFM_CCM_PDU_TLV_OFFSET;
+ max = 0;
+ do { /* Handle all TLVs */
+ size = ccm_tlv_extract(skb, index, peer_mep);
+ index += size;
+ max += 1;
+ } while (size != 0 && max < 4); /* Max four TLVs possible */
+
+ return 1;
+ }
+
+ mep->status.opcode_unexp_seen = true;
+
+ return 1;
+}
+
+static struct br_frame_type cfm_frame_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_CFM),
+ .frame_handler = br_cfm_frame_rx,
+};
+
+int br_cfm_mep_create(struct net_bridge *br,
+ const u32 instance,
+ struct br_cfm_mep_create *const create,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge_port *p;
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ if (create->domain == BR_CFM_VLAN) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN domain not supported");
+ return -EINVAL;
+ }
+ if (create->domain != BR_CFM_PORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid domain value");
+ return -EINVAL;
+ }
+ if (create->direction == BR_CFM_MEP_DIRECTION_UP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Up-MEP not supported");
+ return -EINVAL;
+ }
+ if (create->direction != BR_CFM_MEP_DIRECTION_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid direction value");
+ return -EINVAL;
+ }
+ p = br_mep_get_port(br, create->ifindex);
+ if (!p) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port is not related to bridge");
+ return -EINVAL;
+ }
+ mep = br_mep_find(br, instance);
+ if (mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance already exists");
+ return -EEXIST;
+ }
+
+ /* In PORT domain only one instance can be created per port */
+ if (create->domain == BR_CFM_PORT) {
+ mep = br_mep_find_ifindex(br, create->ifindex);
+ if (mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one Port MEP on a port allowed");
+ return -EINVAL;
+ }
+ }
+
+ mep = kzalloc(sizeof(*mep), GFP_KERNEL);
+ if (!mep)
+ return -ENOMEM;
+
+ mep->create = *create;
+ mep->instance = instance;
+ rcu_assign_pointer(mep->b_port, p);
+
+ INIT_HLIST_HEAD(&mep->peer_mep_list);
+ INIT_DELAYED_WORK(&mep->ccm_tx_dwork, ccm_tx_work_expired);
+
+ if (hlist_empty(&br->mep_list))
+ br_add_frame(br, &cfm_frame_type);
+
+ hlist_add_tail_rcu(&mep->head, &br->mep_list);
+
+ return 0;
+}
+
+static void mep_delete_implementation(struct net_bridge *br,
+ struct br_cfm_mep *mep)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct hlist_node *n_store;
+
+ ASSERT_RTNL();
+
+ /* Empty and free peer MEP list */
+ hlist_for_each_entry_safe(peer_mep, n_store, &mep->peer_mep_list, head) {
+ cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
+ hlist_del_rcu(&peer_mep->head);
+ kfree_rcu(peer_mep, rcu);
+ }
+
+ cancel_delayed_work_sync(&mep->ccm_tx_dwork);
+
+ RCU_INIT_POINTER(mep->b_port, NULL);
+ hlist_del_rcu(&mep->head);
+ kfree_rcu(mep, rcu);
+
+ if (hlist_empty(&br->mep_list))
+ br_del_frame(br, &cfm_frame_type);
+}
+
+int br_cfm_mep_delete(struct net_bridge *br,
+ const u32 instance,
+ struct netlink_ext_ack *extack)
+{
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ mep = br_mep_find(br, instance);
+ if (!mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance does not exists");
+ return -ENOENT;
+ }
+
+ mep_delete_implementation(br, mep);
+
+ return 0;
+}
+
+int br_cfm_mep_config_set(struct net_bridge *br,
+ const u32 instance,
+ const struct br_cfm_mep_config *const config,
+ struct netlink_ext_ack *extack)
+{
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ mep = br_mep_find(br, instance);
+ if (!mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance does not exists");
+ return -ENOENT;
+ }
+
+ mep->config = *config;
+
+ return 0;
+}
+
+int br_cfm_cc_config_set(struct net_bridge *br,
+ const u32 instance,
+ const struct br_cfm_cc_config *const config,
+ struct netlink_ext_ack *extack)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ mep = br_mep_find(br, instance);
+ if (!mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance does not exists");
+ return -ENOENT;
+ }
+
+ /* Check for no change in configuration */
+ if (memcmp(config, &mep->cc_config, sizeof(*config)) == 0)
+ return 0;
+
+ if (config->enable && !mep->cc_config.enable)
+ /* CC is enabled */
+ hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
+ cc_peer_enable(peer_mep);
+
+ if (!config->enable && mep->cc_config.enable)
+ /* CC is disabled */
+ hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
+ cc_peer_disable(peer_mep);
+
+ mep->cc_config = *config;
+ mep->ccm_rx_snumber = 0;
+ mep->ccm_tx_snumber = 1;
+
+ return 0;
+}
+
+int br_cfm_cc_peer_mep_add(struct net_bridge *br, const u32 instance,
+ u32 mepid,
+ struct netlink_ext_ack *extack)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ mep = br_mep_find(br, instance);
+ if (!mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance does not exists");
+ return -ENOENT;
+ }
+
+ peer_mep = br_peer_mep_find(mep, mepid);
+ if (peer_mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Peer MEP-ID already exists");
+ return -EEXIST;
+ }
+
+ peer_mep = kzalloc(sizeof(*peer_mep), GFP_KERNEL);
+ if (!peer_mep)
+ return -ENOMEM;
+
+ peer_mep->mepid = mepid;
+ peer_mep->mep = mep;
+ INIT_DELAYED_WORK(&peer_mep->ccm_rx_dwork, ccm_rx_work_expired);
+
+ if (mep->cc_config.enable)
+ cc_peer_enable(peer_mep);
+
+ hlist_add_tail_rcu(&peer_mep->head, &mep->peer_mep_list);
+
+ return 0;
+}
+
+int br_cfm_cc_peer_mep_remove(struct net_bridge *br, const u32 instance,
+ u32 mepid,
+ struct netlink_ext_ack *extack)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ mep = br_mep_find(br, instance);
+ if (!mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance does not exists");
+ return -ENOENT;
+ }
+
+ peer_mep = br_peer_mep_find(mep, mepid);
+ if (!peer_mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Peer MEP-ID does not exists");
+ return -ENOENT;
+ }
+
+ cc_peer_disable(peer_mep);
+
+ hlist_del_rcu(&peer_mep->head);
+ kfree_rcu(peer_mep, rcu);
+
+ return 0;
+}
+
+int br_cfm_cc_rdi_set(struct net_bridge *br, const u32 instance,
+ const bool rdi, struct netlink_ext_ack *extack)
+{
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ mep = br_mep_find(br, instance);
+ if (!mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance does not exists");
+ return -ENOENT;
+ }
+
+ mep->rdi = rdi;
+
+ return 0;
+}
+
+int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance,
+ const struct br_cfm_cc_ccm_tx_info *const tx_info,
+ struct netlink_ext_ack *extack)
+{
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ mep = br_mep_find(br, instance);
+ if (!mep) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MEP instance does not exists");
+ return -ENOENT;
+ }
+
+ if (memcmp(tx_info, &mep->cc_ccm_tx_info, sizeof(*tx_info)) == 0) {
+ /* No change in tx_info. */
+ if (mep->cc_ccm_tx_info.period == 0)
+ /* Transmission is not enabled - just return */
+ return 0;
+
+ /* Transmission is ongoing, the end time is recalculated */
+ mep->ccm_tx_end = jiffies +
+ usecs_to_jiffies(tx_info->period * 1000000);
+ return 0;
+ }
+
+ if (tx_info->period == 0 && mep->cc_ccm_tx_info.period == 0)
+ /* Some change in info and transmission is not ongoing */
+ goto save;
+
+ if (tx_info->period != 0 && mep->cc_ccm_tx_info.period != 0) {
+ /* Some change in info and transmission is ongoing
+ * The end time is recalculated
+ */
+ mep->ccm_tx_end = jiffies +
+ usecs_to_jiffies(tx_info->period * 1000000);
+
+ goto save;
+ }
+
+ if (tx_info->period == 0 && mep->cc_ccm_tx_info.period != 0) {
+ cancel_delayed_work_sync(&mep->ccm_tx_dwork);
+ goto save;
+ }
+
+ /* Start delayed work to transmit CCM frames. It is done with zero delay
+ * to send first frame immediately
+ */
+ mep->ccm_tx_end = jiffies + usecs_to_jiffies(tx_info->period * 1000000);
+ queue_delayed_work(system_wq, &mep->ccm_tx_dwork, 0);
+
+save:
+ mep->cc_ccm_tx_info = *tx_info;
+
+ return 0;
+}
+
+int br_cfm_mep_count(struct net_bridge *br, u32 *count)
+{
+ struct br_cfm_mep *mep;
+
+ *count = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mep, &br->mep_list, head)
+ *count += 1;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+int br_cfm_peer_mep_count(struct net_bridge *br, u32 *count)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct br_cfm_mep *mep;
+
+ *count = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mep, &br->mep_list, head)
+ hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head)
+ *count += 1;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+bool br_cfm_created(struct net_bridge *br)
+{
+ return !hlist_empty(&br->mep_list);
+}
+
+/* Deletes the CFM instances on a specific bridge port
+ */
+void br_cfm_port_del(struct net_bridge *br, struct net_bridge_port *port)
+{
+ struct hlist_node *n_store;
+ struct br_cfm_mep *mep;
+
+ ASSERT_RTNL();
+
+ hlist_for_each_entry_safe(mep, n_store, &br->mep_list, head)
+ if (mep->create.ifindex == port->dev->ifindex)
+ mep_delete_implementation(br, mep);
+}
diff --git a/net/bridge/br_cfm_netlink.c b/net/bridge/br_cfm_netlink.c
new file mode 100644
index 000000000000..5c4c369f8536
--- /dev/null
+++ b/net/bridge/br_cfm_netlink.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <net/genetlink.h>
+
+#include "br_private.h"
+#include "br_private_cfm.h"
+
+static const struct nla_policy
+br_cfm_mep_create_policy[IFLA_BRIDGE_CFM_MEP_CREATE_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_MEP_CREATE_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy
+br_cfm_mep_delete_policy[IFLA_BRIDGE_CFM_MEP_DELETE_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_MEP_DELETE_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy
+br_cfm_mep_config_policy[IFLA_BRIDGE_CFM_MEP_CONFIG_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_MEP_CONFIG_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC] = NLA_POLICY_ETH_ADDR,
+ [IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL] = NLA_POLICY_MAX(NLA_U32, 7),
+ [IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID] = NLA_POLICY_MAX(NLA_U32, 0x1FFF),
+};
+
+static const struct nla_policy
+br_cfm_cc_config_policy[IFLA_BRIDGE_CFM_CC_CONFIG_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_CC_CONFIG_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID] = {
+ .type = NLA_BINARY, .len = CFM_MAID_LENGTH },
+};
+
+static const struct nla_policy
+br_cfm_cc_peer_mep_policy[IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_CC_PEER_MEP_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_PEER_MEPID] = NLA_POLICY_MAX(NLA_U32, 0x1FFF),
+};
+
+static const struct nla_policy
+br_cfm_cc_rdi_policy[IFLA_BRIDGE_CFM_CC_RDI_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_CC_RDI_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_CC_RDI_INSTANCE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_RDI_RDI] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy
+br_cfm_cc_ccm_tx_policy[IFLA_BRIDGE_CFM_CC_CCM_TX_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC] = NLA_POLICY_ETH_ADDR,
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE] = { .type = NLA_U8 },
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV] = { .type = NLA_U32 },
+ [IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE] = { .type = NLA_U8 },
+};
+
+static const struct nla_policy
+br_cfm_policy[IFLA_BRIDGE_CFM_MAX + 1] = {
+ [IFLA_BRIDGE_CFM_UNSPEC] = { .type = NLA_REJECT },
+ [IFLA_BRIDGE_CFM_MEP_CREATE] =
+ NLA_POLICY_NESTED(br_cfm_mep_create_policy),
+ [IFLA_BRIDGE_CFM_MEP_DELETE] =
+ NLA_POLICY_NESTED(br_cfm_mep_delete_policy),
+ [IFLA_BRIDGE_CFM_MEP_CONFIG] =
+ NLA_POLICY_NESTED(br_cfm_mep_config_policy),
+ [IFLA_BRIDGE_CFM_CC_CONFIG] =
+ NLA_POLICY_NESTED(br_cfm_cc_config_policy),
+ [IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD] =
+ NLA_POLICY_NESTED(br_cfm_cc_peer_mep_policy),
+ [IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE] =
+ NLA_POLICY_NESTED(br_cfm_cc_peer_mep_policy),
+ [IFLA_BRIDGE_CFM_CC_RDI] =
+ NLA_POLICY_NESTED(br_cfm_cc_rdi_policy),
+ [IFLA_BRIDGE_CFM_CC_CCM_TX] =
+ NLA_POLICY_NESTED(br_cfm_cc_ccm_tx_policy),
+};
+
+static int br_mep_create_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_CREATE_MAX + 1];
+ struct br_cfm_mep_create create;
+ u32 instance;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_CREATE_MAX, attr,
+ br_cfm_mep_create_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing DOMAIN attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing DIRECTION attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing IFINDEX attribute");
+ return -EINVAL;
+ }
+
+ memset(&create, 0, sizeof(create));
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]);
+ create.domain = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]);
+ create.direction = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]);
+ create.ifindex = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]);
+
+ return br_cfm_mep_create(br, instance, &create, extack);
+}
+
+static int br_mep_delete_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_DELETE_MAX + 1];
+ u32 instance;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_DELETE_MAX, attr,
+ br_cfm_mep_delete_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_DELETE_INSTANCE]);
+
+ return br_cfm_mep_delete(br, instance, extack);
+}
+
+static int br_mep_config_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MAX + 1];
+ struct br_cfm_mep_config config;
+ u32 instance;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_CONFIG_MAX, attr,
+ br_cfm_mep_config_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing UNICAST_MAC attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing MDLEVEL attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing MEPID attribute");
+ return -EINVAL;
+ }
+
+ memset(&config, 0, sizeof(config));
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE]);
+ nla_memcpy(&config.unicast_mac.addr,
+ tb[IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC],
+ sizeof(config.unicast_mac.addr));
+ config.mdlevel = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL]);
+ config.mepid = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID]);
+
+ return br_cfm_mep_config_set(br, instance, &config, extack);
+}
+
+static int br_cc_config_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_CC_CONFIG_MAX + 1];
+ struct br_cfm_cc_config config;
+ u32 instance;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_CONFIG_MAX, attr,
+ br_cfm_cc_config_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing ENABLE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INTERVAL attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing MAID attribute");
+ return -EINVAL;
+ }
+
+ memset(&config, 0, sizeof(config));
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE]);
+ config.enable = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE]);
+ config.exp_interval = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL]);
+ nla_memcpy(&config.exp_maid.data, tb[IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID],
+ sizeof(config.exp_maid.data));
+
+ return br_cfm_cc_config_set(br, instance, &config, extack);
+}
+
+static int br_cc_peer_mep_add_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX + 1];
+ u32 instance, peer_mep_id;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX, attr,
+ br_cfm_cc_peer_mep_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing PEER_MEP_ID attribute");
+ return -EINVAL;
+ }
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]);
+ peer_mep_id = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]);
+
+ return br_cfm_cc_peer_mep_add(br, instance, peer_mep_id, extack);
+}
+
+static int br_cc_peer_mep_remove_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX + 1];
+ u32 instance, peer_mep_id;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_PEER_MEP_MAX, attr,
+ br_cfm_cc_peer_mep_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing PEER_MEP_ID attribute");
+ return -EINVAL;
+ }
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE]);
+ peer_mep_id = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_PEER_MEPID]);
+
+ return br_cfm_cc_peer_mep_remove(br, instance, peer_mep_id, extack);
+}
+
+static int br_cc_rdi_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_CC_RDI_MAX + 1];
+ u32 instance, rdi;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_RDI_MAX, attr,
+ br_cfm_cc_rdi_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_RDI_RDI]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing RDI attribute");
+ return -EINVAL;
+ }
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]);
+ rdi = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_RDI]);
+
+ return br_cfm_cc_rdi_set(br, instance, rdi, extack);
+}
+
+static int br_cc_ccm_tx_parse(struct net_bridge *br, struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_CC_CCM_TX_MAX + 1];
+ struct br_cfm_cc_ccm_tx_info tx_info;
+ u32 instance;
+ int err;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_CC_CCM_TX_MAX, attr,
+ br_cfm_cc_ccm_tx_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing INSTANCE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing DMAC attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing SEQ_NO_UPDATE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing PERIOD attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing IF_TLV attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing IF_TLV_VALUE attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing PORT_TLV attribute");
+ return -EINVAL;
+ }
+ if (!tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing PORT_TLV_VALUE attribute");
+ return -EINVAL;
+ }
+
+ memset(&tx_info, 0, sizeof(tx_info));
+
+ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]);
+ nla_memcpy(&tx_info.dmac.addr,
+ tb[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC],
+ sizeof(tx_info.dmac.addr));
+ tx_info.seq_no_update = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE]);
+ tx_info.period = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD]);
+ tx_info.if_tlv = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV]);
+ tx_info.if_tlv_value = nla_get_u8(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE]);
+ tx_info.port_tlv = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV]);
+ tx_info.port_tlv_value = nla_get_u8(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE]);
+
+ return br_cfm_cc_ccm_tx(br, instance, &tx_info, extack);
+}
+
+int br_cfm_parse(struct net_bridge *br, struct net_bridge_port *p,
+ struct nlattr *attr, int cmd, struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_BRIDGE_CFM_MAX + 1];
+ int err;
+
+ /* When this function is called for a port then the br pointer is
+ * invalid, therefor set the br to point correctly
+ */
+ if (p)
+ br = p->br;
+
+ err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MAX, attr,
+ br_cfm_policy, extack);
+ if (err)
+ return err;
+
+ if (tb[IFLA_BRIDGE_CFM_MEP_CREATE]) {
+ err = br_mep_create_parse(br, tb[IFLA_BRIDGE_CFM_MEP_CREATE],
+ extack);
+ if (err)
+ return err;
+ }
+
+ if (tb[IFLA_BRIDGE_CFM_MEP_DELETE]) {
+ err = br_mep_delete_parse(br, tb[IFLA_BRIDGE_CFM_MEP_DELETE],
+ extack);
+ if (err)
+ return err;
+ }
+
+ if (tb[IFLA_BRIDGE_CFM_MEP_CONFIG]) {
+ err = br_mep_config_parse(br, tb[IFLA_BRIDGE_CFM_MEP_CONFIG],
+ extack);
+ if (err)
+ return err;
+ }
+
+ if (tb[IFLA_BRIDGE_CFM_CC_CONFIG]) {
+ err = br_cc_config_parse(br, tb[IFLA_BRIDGE_CFM_CC_CONFIG],
+ extack);
+ if (err)
+ return err;
+ }
+
+ if (tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD]) {
+ err = br_cc_peer_mep_add_parse(br, tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_ADD],
+ extack);
+ if (err)
+ return err;
+ }
+
+ if (tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE]) {
+ err = br_cc_peer_mep_remove_parse(br, tb[IFLA_BRIDGE_CFM_CC_PEER_MEP_REMOVE],
+ extack);
+ if (err)
+ return err;
+ }
+
+ if (tb[IFLA_BRIDGE_CFM_CC_RDI]) {
+ err = br_cc_rdi_parse(br, tb[IFLA_BRIDGE_CFM_CC_RDI],
+ extack);
+ if (err)
+ return err;
+ }
+
+ if (tb[IFLA_BRIDGE_CFM_CC_CCM_TX]) {
+ err = br_cc_ccm_tx_parse(br, tb[IFLA_BRIDGE_CFM_CC_CCM_TX],
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int br_cfm_config_fill_info(struct sk_buff *skb, struct net_bridge *br)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct br_cfm_mep *mep;
+ struct nlattr *tb;
+
+ hlist_for_each_entry_rcu(mep, &br->mep_list, head) {
+ tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_MEP_CREATE_INFO);
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN,
+ mep->create.domain))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION,
+ mep->create.direction))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX,
+ mep->create.ifindex))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, tb);
+
+ tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_INFO);
+
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_UNICAST_MAC,
+ sizeof(mep->config.unicast_mac.addr),
+ mep->config.unicast_mac.addr))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_MDLEVEL,
+ mep->config.mdlevel))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_CONFIG_MEPID,
+ mep->config.mepid))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, tb);
+
+ tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_CC_CONFIG_INFO);
+
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CONFIG_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CONFIG_ENABLE,
+ mep->cc_config.enable))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CONFIG_EXP_INTERVAL,
+ mep->cc_config.exp_interval))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_BRIDGE_CFM_CC_CONFIG_EXP_MAID,
+ sizeof(mep->cc_config.exp_maid.data),
+ mep->cc_config.exp_maid.data))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, tb);
+
+ tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_CC_RDI_INFO);
+
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_RDI_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_RDI_RDI,
+ mep->rdi))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, tb);
+
+ tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_INFO);
+
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC,
+ sizeof(mep->cc_ccm_tx_info.dmac),
+ mep->cc_ccm_tx_info.dmac.addr))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_SEQ_NO_UPDATE,
+ mep->cc_ccm_tx_info.seq_no_update))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_PERIOD,
+ mep->cc_ccm_tx_info.period))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV,
+ mep->cc_ccm_tx_info.if_tlv))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_IF_TLV_VALUE,
+ mep->cc_ccm_tx_info.if_tlv_value))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV,
+ mep->cc_ccm_tx_info.port_tlv))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BRIDGE_CFM_CC_CCM_TX_PORT_TLV_VALUE,
+ mep->cc_ccm_tx_info.port_tlv_value))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, tb);
+
+ hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head) {
+ tb = nla_nest_start(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_INFO);
+
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_MEP_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_PEER_MEPID,
+ peer_mep->mepid))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, tb);
+ }
+ }
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, tb);
+
+nla_info_failure:
+ return -EMSGSIZE;
+}
+
+int br_cfm_status_fill_info(struct sk_buff *skb,
+ struct net_bridge *br,
+ bool getlink)
+{
+ struct br_cfm_peer_mep *peer_mep;
+ struct br_cfm_mep *mep;
+ struct nlattr *tb;
+
+ hlist_for_each_entry_rcu(mep, &br->mep_list, head) {
+ tb = nla_nest_start(skb, IFLA_BRIDGE_CFM_MEP_STATUS_INFO);
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN,
+ mep->status.opcode_unexp_seen))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN,
+ mep->status.version_unexp_seen))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN,
+ mep->status.rx_level_low_seen))
+ goto nla_put_failure;
+
+ /* Only clear if this is a GETLINK */
+ if (getlink) {
+ /* Clear all 'seen' indications */
+ mep->status.opcode_unexp_seen = false;
+ mep->status.version_unexp_seen = false;
+ mep->status.rx_level_low_seen = false;
+ }
+
+ nla_nest_end(skb, tb);
+
+ hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head) {
+ tb = nla_nest_start(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_INFO);
+ if (!tb)
+ goto nla_info_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE,
+ mep->instance))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID,
+ peer_mep->mepid))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT,
+ peer_mep->cc_status.ccm_defect))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI,
+ peer_mep->cc_status.rdi))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE,
+ peer_mep->cc_status.port_tlv_value))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE,
+ peer_mep->cc_status.if_tlv_value))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN,
+ peer_mep->cc_status.seen))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN,
+ peer_mep->cc_status.tlv_seen))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb,
+ IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN,
+ peer_mep->cc_status.seq_unexp_seen))
+ goto nla_put_failure;
+
+ if (getlink) { /* Only clear if this is a GETLINK */
+ /* Clear all 'seen' indications */
+ peer_mep->cc_status.seen = false;
+ peer_mep->cc_status.tlv_seen = false;
+ peer_mep->cc_status.seq_unexp_seen = false;
+ }
+
+ nla_nest_end(skb, tb);
+ }
+ }
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, tb);
+
+nla_info_failure:
+ return -EMSGSIZE;
+}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index d3ea9d0779fb..3f2f06b4dd27 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -30,7 +30,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
- struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
const struct nf_br_ops *nf_ops;
u8 state = BR_STATE_FORWARDING;
const unsigned char *dest;
@@ -45,10 +44,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- u64_stats_update_begin(&brstats->syncp);
- brstats->tx_packets++;
- brstats->tx_bytes += skb->len;
- u64_stats_update_end(&brstats->syncp);
+ dev_sw_netstats_tx_add(dev, 1, skb->len);
br_switchdev_frame_unmark(skb);
BR_INPUT_SKB_CB(skb)->brdev = dev;
@@ -93,7 +89,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
- br_multicast_querier_exists(br, eth_hdr(skb)))
+ br_multicast_querier_exists(br, eth_hdr(skb), mdst))
br_multicast_flood(mdst, skb, false, true);
else
br_flood(br, skb, BR_PKT_MULTICAST, false, true);
@@ -119,26 +115,26 @@ static int br_dev_init(struct net_device *dev)
struct net_bridge *br = netdev_priv(dev);
int err;
- br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!br->stats)
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
return -ENOMEM;
err = br_fdb_hash_init(br);
if (err) {
- free_percpu(br->stats);
+ free_percpu(dev->tstats);
return err;
}
err = br_mdb_hash_init(br);
if (err) {
- free_percpu(br->stats);
+ free_percpu(dev->tstats);
br_fdb_hash_fini(br);
return err;
}
err = br_vlan_init(br);
if (err) {
- free_percpu(br->stats);
+ free_percpu(dev->tstats);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
return err;
@@ -146,7 +142,7 @@ static int br_dev_init(struct net_device *dev)
err = br_multicast_init_stats(br);
if (err) {
- free_percpu(br->stats);
+ free_percpu(dev->tstats);
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
@@ -165,7 +161,7 @@ static void br_dev_uninit(struct net_device *dev)
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
- free_percpu(br->stats);
+ free_percpu(dev->tstats);
}
static int br_dev_open(struct net_device *dev)
@@ -208,15 +204,6 @@ static int br_dev_stop(struct net_device *dev)
return 0;
}
-static void br_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- struct net_bridge *br = netdev_priv(dev);
-
- netdev_stats_to_stats64(stats, &dev->stats);
- dev_fetch_sw_netstats(stats, br->stats);
-}
-
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
@@ -410,7 +397,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_init = br_dev_init,
.ndo_uninit = br_dev_uninit,
.ndo_start_xmit = br_dev_xmit,
- .ndo_get_stats64 = br_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_rx_flags = br_dev_change_rx_flags,
@@ -461,8 +448,12 @@ void br_dev_setup(struct net_device *dev)
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
INIT_HLIST_HEAD(&br->fdb_list);
+ INIT_HLIST_HEAD(&br->frame_type_list);
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- INIT_LIST_HEAD(&br->mrp_list);
+ INIT_HLIST_HEAD(&br->mrp_list);
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_CFM)
+ INIT_HLIST_HEAD(&br->mep_list);
#endif
spin_lock_init(&br->hash_lock);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a0e9a7937412..f7d2f472ae24 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -334,6 +334,7 @@ static void del_nbp(struct net_bridge_port *p)
spin_unlock_bh(&br->lock);
br_mrp_port_del(br, p);
+ br_cfm_port_del(br, p);
br_ifinfo_notify(RTM_DELLINK, NULL, p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 59a318b9f646..8ca1f1bc6d12 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -35,12 +35,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
struct net_bridge_vlan_group *vg;
- struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
- u64_stats_update_begin(&brstats->syncp);
- brstats->rx_packets++;
- brstats->rx_bytes += skb->len;
- u64_stats_update_end(&brstats->syncp);
+ dev_sw_netstats_rx_add(brdev, skb->len);
vg = br_vlan_group_rcu(br);
/* Bridge is just like any other port. Make sure the
@@ -134,7 +130,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
case BR_PKT_MULTICAST:
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
- br_multicast_querier_exists(br, eth_hdr(skb))) {
+ br_multicast_querier_exists(br, eth_hdr(skb), mdst)) {
if ((mdst && mdst->host_joined) ||
br_multicast_is_router(br)) {
local_rcv = true;
@@ -254,6 +250,21 @@ frame_finish:
return RX_HANDLER_CONSUMED;
}
+/* Return 0 if the frame was not processed otherwise 1
+ * note: already called with rcu_read_lock
+ */
+static int br_process_frame_type(struct net_bridge_port *p,
+ struct sk_buff *skb)
+{
+ struct br_frame_type *tmp;
+
+ hlist_for_each_entry_rcu(tmp, &p->br->frame_type_list, list)
+ if (unlikely(tmp->type == skb->protocol))
+ return tmp->frame_handler(p, skb);
+
+ return 0;
+}
+
/*
* Return NULL if skb is handled
* note: already called with rcu_read_lock
@@ -343,7 +354,7 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
}
}
- if (unlikely(br_mrp_process(p, skb)))
+ if (unlikely(br_process_frame_type(p, skb)))
return RX_HANDLER_PASS;
forward:
@@ -380,3 +391,19 @@ rx_handler_func_t *br_get_rx_handler(const struct net_device *dev)
return br_handle_frame;
}
+
+void br_add_frame(struct net_bridge *br, struct br_frame_type *ft)
+{
+ hlist_add_head_rcu(&ft->list, &br->frame_type_list);
+}
+
+void br_del_frame(struct net_bridge *br, struct br_frame_type *ft)
+{
+ struct br_frame_type *tmp;
+
+ hlist_for_each_entry(tmp, &br->frame_type_list, list)
+ if (ft == tmp) {
+ hlist_del_rcu(&ft->list);
+ return;
+ }
+}
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index e15bab19a012..8846c5bcd075 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -87,6 +87,8 @@ static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
break;
#endif
+ default:
+ ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
}
}
@@ -174,9 +176,11 @@ static int __mdb_fill_info(struct sk_buff *skb,
if (mp->addr.proto == htons(ETH_P_IP))
e.addr.u.ip4 = mp->addr.dst.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- if (mp->addr.proto == htons(ETH_P_IPV6))
+ else if (mp->addr.proto == htons(ETH_P_IPV6))
e.addr.u.ip6 = mp->addr.dst.ip6;
#endif
+ else
+ ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
e.addr.proto = mp->addr.proto;
nest_ent = nla_nest_start_noflag(skb,
MDBA_MDB_ENTRY_INFO);
@@ -210,6 +214,8 @@ static int __mdb_fill_info(struct sk_buff *skb,
}
break;
#endif
+ default:
+ ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
}
if (p) {
if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
@@ -562,9 +568,12 @@ void br_mdb_notify(struct net_device *dev,
if (mp->addr.proto == htons(ETH_P_IP))
ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
#if IS_ENABLED(CONFIG_IPV6)
- else
+ else if (mp->addr.proto == htons(ETH_P_IPV6))
ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
#endif
+ else
+ ether_addr_copy(mdb.addr, mp->addr.dst.mac_addr);
+
mdb.obj.orig_dev = pg->key.port->dev;
switch (type) {
case RTM_NEWMDB:
@@ -693,6 +702,12 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
return false;
}
#endif
+ } else if (entry->addr.proto == 0) {
+ /* L2 mdb */
+ if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
+ return false;
+ }
} else {
NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
return false;
@@ -831,6 +846,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
struct net_bridge_port_group __rcu **pp;
struct br_ip group, star_group;
unsigned long now = jiffies;
+ unsigned char flags = 0;
u8 filter_mode;
int err;
@@ -849,6 +865,11 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
}
}
+ if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
+ NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
+ return -EINVAL;
+ }
+
mp = br_mdb_ip_get(br, &group);
if (!mp) {
mp = br_multicast_new_group(br, &group);
@@ -884,7 +905,10 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
MCAST_INCLUDE;
- p = br_multicast_new_port_group(port, &group, *pp, entry->state, NULL,
+ if (entry->state == MDB_PERMANENT)
+ flags |= MDB_PG_FLAGS_PERMANENT;
+
+ p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
filter_mode, RTPROT_STATIC);
if (unlikely(!p)) {
NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c
index b36689e6e7cb..cec2c4e4561d 100644
--- a/net/bridge/br_mrp.c
+++ b/net/bridge/br_mrp.c
@@ -6,6 +6,13 @@
static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
+static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb);
+
+static struct br_frame_type mrp_frame_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_MRP),
+ .frame_handler = br_mrp_process,
+};
+
static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
struct net_bridge_port *s_port,
struct net_bridge_port *port)
@@ -47,8 +54,8 @@ static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
struct br_mrp *res = NULL;
struct br_mrp *mrp;
- list_for_each_entry_rcu(mrp, &br->mrp_list, list,
- lockdep_rtnl_is_held()) {
+ hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
+ lockdep_rtnl_is_held()) {
if (mrp->ring_id == ring_id) {
res = mrp;
break;
@@ -63,8 +70,8 @@ static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
struct br_mrp *res = NULL;
struct br_mrp *mrp;
- list_for_each_entry_rcu(mrp, &br->mrp_list, list,
- lockdep_rtnl_is_held()) {
+ hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
+ lockdep_rtnl_is_held()) {
if (mrp->in_id == in_id) {
res = mrp;
break;
@@ -78,8 +85,8 @@ static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
{
struct br_mrp *mrp;
- list_for_each_entry_rcu(mrp, &br->mrp_list, list,
- lockdep_rtnl_is_held()) {
+ hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
+ lockdep_rtnl_is_held()) {
struct net_bridge_port *p;
p = rtnl_dereference(mrp->p_port);
@@ -104,8 +111,8 @@ static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
struct br_mrp *res = NULL;
struct br_mrp *mrp;
- list_for_each_entry_rcu(mrp, &br->mrp_list, list,
- lockdep_rtnl_is_held()) {
+ hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
+ lockdep_rtnl_is_held()) {
if (rcu_access_pointer(mrp->p_port) == p ||
rcu_access_pointer(mrp->s_port) == p ||
rcu_access_pointer(mrp->i_port) == p) {
@@ -443,8 +450,11 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
rcu_assign_pointer(mrp->i_port, NULL);
}
- list_del_rcu(&mrp->list);
+ hlist_del_rcu(&mrp->list);
kfree_rcu(mrp, rcu);
+
+ if (hlist_empty(&br->mrp_list))
+ br_del_frame(br, &mrp_frame_type);
}
/* Adds a new MRP instance.
@@ -493,9 +503,12 @@ int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
spin_unlock_bh(&br->lock);
rcu_assign_pointer(mrp->s_port, p);
+ if (hlist_empty(&br->mrp_list))
+ br_add_frame(br, &mrp_frame_type);
+
INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
- list_add_tail_rcu(&mrp->list, &br->mrp_list);
+ hlist_add_tail_rcu(&mrp->list, &br->mrp_list);
err = br_mrp_switchdev_add(br, mrp);
if (err)
@@ -845,7 +858,8 @@ static bool br_mrp_in_frame(struct sk_buff *skb)
if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
- hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP)
+ hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
+ hdr->type == BR_MRP_TLV_HEADER_IN_LINK_STATUS)
return true;
return false;
@@ -1113,9 +1127,9 @@ static int br_mrp_rcv(struct net_bridge_port *p,
goto no_forward;
}
} else {
- /* MIM should forward IntLinkChange and
+ /* MIM should forward IntLinkChange/Status and
* IntTopoChange between ring ports but MIM
- * should not forward IntLinkChange and
+ * should not forward IntLinkChange/Status and
* IntTopoChange if the frame was received at
* the interconnect port
*/
@@ -1142,6 +1156,17 @@ static int br_mrp_rcv(struct net_bridge_port *p,
in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
goto forward;
+ /* MIC should forward IntLinkStatus frames only to
+ * interconnect port if it was received on a ring port.
+ * If it is received on interconnect port then, it
+ * should be forward on both ring ports
+ */
+ if (br_mrp_is_ring_port(p_port, s_port, p) &&
+ in_type == BR_MRP_TLV_HEADER_IN_LINK_STATUS) {
+ p_dst = NULL;
+ s_dst = NULL;
+ }
+
/* Should forward the InTopo frames only between the
* ring ports
*/
@@ -1172,20 +1197,18 @@ no_forward:
* normal forwarding.
* note: already called with rcu_read_lock
*/
-int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
+static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
{
/* If there is no MRP instance do normal forwarding */
if (likely(!(p->flags & BR_MRP_AWARE)))
goto out;
- if (unlikely(skb->protocol == htons(ETH_P_MRP)))
- return br_mrp_rcv(p, skb, p->dev);
-
+ return br_mrp_rcv(p, skb, p->dev);
out:
return 0;
}
bool br_mrp_enabled(struct net_bridge *br)
{
- return !list_empty(&br->mrp_list);
+ return !hlist_empty(&br->mrp_list);
}
diff --git a/net/bridge/br_mrp_netlink.c b/net/bridge/br_mrp_netlink.c
index 2a2fdf3500c5..ce6f63c77cc0 100644
--- a/net/bridge/br_mrp_netlink.c
+++ b/net/bridge/br_mrp_netlink.c
@@ -453,7 +453,7 @@ int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br)
if (!mrp_tb)
return -EMSGSIZE;
- list_for_each_entry_rcu(mrp, &br->mrp_list, list) {
+ hlist_for_each_entry_rcu(mrp, &br->mrp_list, list) {
struct net_bridge_port *p;
tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP_INFO);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 54cb82a69056..257ac4e25f6d 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -179,7 +179,8 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
break;
#endif
default:
- return NULL;
+ ip.proto = 0;
+ ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
}
return br_mdb_ip_get_rcu(br, &ip);
@@ -1203,6 +1204,10 @@ void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
if (notify)
br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
}
+
+ if (br_group_is_l2(&mp->addr))
+ return;
+
mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
}
@@ -1254,8 +1259,8 @@ __br_multicast_add_group(struct net_bridge *br,
break;
}
- p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode,
- RTPROT_KERNEL);
+ p = br_multicast_new_port_group(port, group, *pp, 0, src,
+ filter_mode, RTPROT_KERNEL);
if (unlikely(!p)) {
p = ERR_PTR(-ENOMEM);
goto out;
@@ -3706,7 +3711,7 @@ bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
memset(&eth, 0, sizeof(eth));
eth.h_proto = htons(proto);
- ret = br_multicast_querier_exists(br, &eth);
+ ret = br_multicast_querier_exists(br, &eth, NULL);
unlock:
rcu_read_unlock();
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 92d64abffa87..49700ce0e919 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -16,6 +16,7 @@
#include "br_private.h"
#include "br_private_stp.h"
+#include "br_private_cfm.h"
#include "br_private_tunnel.h"
static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
@@ -93,9 +94,11 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
{
struct net_bridge_vlan_group *vg = NULL;
struct net_bridge_port *p = NULL;
- struct net_bridge *br;
- int num_vlan_infos;
+ struct net_bridge *br = NULL;
+ u32 num_cfm_peer_mep_infos;
+ u32 num_cfm_mep_infos;
size_t vinfo_sz = 0;
+ int num_vlan_infos;
rcu_read_lock();
if (netif_is_bridge_port(dev)) {
@@ -114,6 +117,49 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
/* Each VLAN is returned in bridge_vlan_info along with flags */
vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
+ if (!(filter_mask & RTEXT_FILTER_CFM_STATUS))
+ return vinfo_sz;
+
+ if (!br)
+ return vinfo_sz;
+
+ /* CFM status info must be added */
+ br_cfm_mep_count(br, &num_cfm_mep_infos);
+ br_cfm_peer_mep_count(br, &num_cfm_peer_mep_infos);
+
+ vinfo_sz += nla_total_size(0); /* IFLA_BRIDGE_CFM */
+ /* For each status struct the MEP instance (u32) is added */
+ /* MEP instance (u32) + br_cfm_mep_status */
+ vinfo_sz += num_cfm_mep_infos *
+ /*IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE */
+ (nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN */
+ + nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN */
+ + nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN */
+ + nla_total_size(sizeof(u32)));
+ /* MEP instance (u32) + br_cfm_cc_peer_status */
+ vinfo_sz += num_cfm_peer_mep_infos *
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE */
+ (nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID */
+ + nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT */
+ + nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI */
+ + nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE */
+ + nla_total_size(sizeof(u8))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE */
+ + nla_total_size(sizeof(u8))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN */
+ + nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN */
+ + nla_total_size(sizeof(u32))
+ /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN */
+ + nla_total_size(sizeof(u32)));
+
return vinfo_sz;
}
@@ -377,7 +423,8 @@ nla_put_failure:
static int br_fill_ifinfo(struct sk_buff *skb,
const struct net_bridge_port *port,
u32 pid, u32 seq, int event, unsigned int flags,
- u32 filter_mask, const struct net_device *dev)
+ u32 filter_mask, const struct net_device *dev,
+ bool getlink)
{
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
struct nlattr *af = NULL;
@@ -426,7 +473,9 @@ static int br_fill_ifinfo(struct sk_buff *skb,
if (filter_mask & (RTEXT_FILTER_BRVLAN |
RTEXT_FILTER_BRVLAN_COMPRESSED |
- RTEXT_FILTER_MRP)) {
+ RTEXT_FILTER_MRP |
+ RTEXT_FILTER_CFM_CONFIG |
+ RTEXT_FILTER_CFM_STATUS)) {
af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
if (!af)
goto nla_put_failure;
@@ -475,6 +524,36 @@ static int br_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
}
+ if (filter_mask & (RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS)) {
+ struct nlattr *cfm_nest = NULL;
+ int err;
+
+ if (!br_cfm_created(br) || port)
+ goto done;
+
+ cfm_nest = nla_nest_start(skb, IFLA_BRIDGE_CFM);
+ if (!cfm_nest)
+ goto nla_put_failure;
+
+ if (filter_mask & RTEXT_FILTER_CFM_CONFIG) {
+ rcu_read_lock();
+ err = br_cfm_config_fill_info(skb, br);
+ rcu_read_unlock();
+ if (err)
+ goto nla_put_failure;
+ }
+
+ if (filter_mask & RTEXT_FILTER_CFM_STATUS) {
+ rcu_read_lock();
+ err = br_cfm_status_fill_info(skb, br, getlink);
+ rcu_read_unlock();
+ if (err)
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(skb, cfm_nest);
+ }
+
done:
if (af)
nla_nest_end(skb, af);
@@ -486,11 +565,9 @@ nla_put_failure:
return -EMSGSIZE;
}
-/* Notify listeners of a change in bridge or port information */
-void br_ifinfo_notify(int event, const struct net_bridge *br,
- const struct net_bridge_port *port)
+void br_info_notify(int event, const struct net_bridge *br,
+ const struct net_bridge_port *port, u32 filter)
{
- u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
struct net_device *dev;
struct sk_buff *skb;
int err = -ENOBUFS;
@@ -515,7 +592,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
if (skb == NULL)
goto errout;
- err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev);
+ err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev, false);
if (err < 0) {
/* -EMSGSIZE implies BUG in br_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -528,6 +605,15 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
+/* Notify listeners of a change in bridge or port information */
+void br_ifinfo_notify(int event, const struct net_bridge *br,
+ const struct net_bridge_port *port)
+{
+ u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
+
+ return br_info_notify(event, br, port, filter);
+}
+
/*
* Dump information about all ports, in response to GETLINK
*/
@@ -538,11 +624,13 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) &&
- !(filter_mask & RTEXT_FILTER_MRP))
+ !(filter_mask & RTEXT_FILTER_MRP) &&
+ !(filter_mask & RTEXT_FILTER_CFM_CONFIG) &&
+ !(filter_mask & RTEXT_FILTER_CFM_STATUS))
return 0;
return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
- filter_mask, dev);
+ filter_mask, dev, true);
}
static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
@@ -700,6 +788,11 @@ static int br_afspec(struct net_bridge *br,
if (err)
return err;
break;
+ case IFLA_BRIDGE_CFM:
+ err = br_cfm_parse(br, p, attr, cmd, extack);
+ if (err)
+ return err;
+ break;
}
}
@@ -1631,7 +1724,7 @@ static int br_fill_linkxstats(struct sk_buff *skb,
pvid = br_get_pvid(vg);
list_for_each_entry(v, &vg->vlan_list, vlist) {
struct bridge_vlan_xstats vxi;
- struct br_vlan_stats stats;
+ struct pcpu_sw_netstats stats;
if (++vl_idx < *prividx)
continue;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8424464186a6..d62c6e1af64a 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -89,14 +89,6 @@ struct bridge_mcast_stats {
};
#endif
-struct br_vlan_stats {
- u64 rx_bytes;
- u64 rx_packets;
- u64 tx_bytes;
- u64 tx_packets;
- struct u64_stats_sync syncp;
-};
-
struct br_tunnel_info {
__be64 tunnel_id;
struct metadata_dst *tunnel_dst;
@@ -137,7 +129,7 @@ struct net_bridge_vlan {
u16 flags;
u16 priv_flags;
u8 state;
- struct br_vlan_stats __percpu *stats;
+ struct pcpu_sw_netstats __percpu *stats;
union {
struct net_bridge *br;
struct net_bridge_port *port;
@@ -383,9 +375,8 @@ enum net_bridge_opts {
struct net_bridge {
spinlock_t lock;
spinlock_t hash_lock;
- struct list_head port_list;
+ struct hlist_head frame_type_list;
struct net_device *dev;
- struct pcpu_sw_netstats __percpu *stats;
unsigned long options;
/* These fields are accessed on each packet */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
@@ -395,6 +386,7 @@ struct net_bridge {
#endif
struct rhashtable fdb_hash_tbl;
+ struct list_head port_list;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
union {
struct rtable fake_rtable;
@@ -481,7 +473,10 @@ struct net_bridge {
struct hlist_head fdb_list;
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- struct list_head mrp_list;
+ struct hlist_head mrp_list;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_CFM)
+ struct hlist_head mep_list;
#endif
};
@@ -755,6 +750,16 @@ int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev);
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
rx_handler_func_t *br_get_rx_handler(const struct net_device *dev);
+struct br_frame_type {
+ __be16 type;
+ int (*frame_handler)(struct net_bridge_port *port,
+ struct sk_buff *skb);
+ struct hlist_node list;
+};
+
+void br_add_frame(struct net_bridge *br, struct br_frame_type *ft);
+void br_del_frame(struct net_bridge *br, struct br_frame_type *ft);
+
static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->rx_handler) == br_get_rx_handler(dev);
@@ -842,6 +847,11 @@ void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
struct net_bridge_port_group *sg);
+static inline bool br_group_is_l2(const struct br_ip *group)
+{
+ return group->proto == 0;
+}
+
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -873,7 +883,8 @@ __br_multicast_querier_exists(struct net_bridge *br,
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
- struct ethhdr *eth)
+ struct ethhdr *eth,
+ const struct net_bridge_mdb_entry *mdb)
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
@@ -885,7 +896,7 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
&br->ip6_other_query, true);
#endif
default:
- return false;
+ return !!mdb && br_group_is_l2(&mdb->addr);
}
}
@@ -1003,7 +1014,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
- struct ethhdr *eth)
+ struct ethhdr *eth,
+ const struct net_bridge_mdb_entry *mdb)
{
return false;
}
@@ -1082,7 +1094,7 @@ void nbp_vlan_flush(struct net_bridge_port *port);
int nbp_vlan_init(struct net_bridge_port *port, struct netlink_ext_ack *extack);
int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
void br_vlan_get_stats(const struct net_bridge_vlan *v,
- struct br_vlan_stats *stats);
+ struct pcpu_sw_netstats *stats);
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
void *ptr);
@@ -1278,7 +1290,7 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
}
static inline void br_vlan_get_stats(const struct net_bridge_vlan *v,
- struct br_vlan_stats *stats)
+ struct pcpu_sw_netstats *stats)
{
}
@@ -1427,7 +1439,6 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
struct nlattr *attr, int cmd, struct netlink_ext_ack *extack);
-int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb);
bool br_mrp_enabled(struct net_bridge *br);
void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p);
int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br);
@@ -1439,11 +1450,6 @@ static inline int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
return -EOPNOTSUPP;
}
-static inline int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
-{
- return 0;
-}
-
static inline bool br_mrp_enabled(struct net_bridge *br)
{
return false;
@@ -1461,12 +1467,67 @@ static inline int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br)
#endif
+/* br_cfm.c */
+#if IS_ENABLED(CONFIG_BRIDGE_CFM)
+int br_cfm_parse(struct net_bridge *br, struct net_bridge_port *p,
+ struct nlattr *attr, int cmd, struct netlink_ext_ack *extack);
+bool br_cfm_created(struct net_bridge *br);
+void br_cfm_port_del(struct net_bridge *br, struct net_bridge_port *p);
+int br_cfm_config_fill_info(struct sk_buff *skb, struct net_bridge *br);
+int br_cfm_status_fill_info(struct sk_buff *skb,
+ struct net_bridge *br,
+ bool getlink);
+int br_cfm_mep_count(struct net_bridge *br, u32 *count);
+int br_cfm_peer_mep_count(struct net_bridge *br, u32 *count);
+#else
+static inline int br_cfm_parse(struct net_bridge *br, struct net_bridge_port *p,
+ struct nlattr *attr, int cmd,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool br_cfm_created(struct net_bridge *br)
+{
+ return false;
+}
+
+static inline void br_cfm_port_del(struct net_bridge *br,
+ struct net_bridge_port *p)
+{
+}
+
+static inline int br_cfm_config_fill_info(struct sk_buff *skb, struct net_bridge *br)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int br_cfm_status_fill_info(struct sk_buff *skb,
+ struct net_bridge *br,
+ bool getlink)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int br_cfm_mep_count(struct net_bridge *br, u32 *count)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int br_cfm_peer_mep_count(struct net_bridge *br, u32 *count)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/* br_netlink.c */
extern struct rtnl_link_ops br_link_ops;
int br_netlink_init(void);
void br_netlink_fini(void);
void br_ifinfo_notify(int event, const struct net_bridge *br,
const struct net_bridge_port *port);
+void br_info_notify(int event, const struct net_bridge *br,
+ const struct net_bridge_port *port, u32 filter);
int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags,
struct netlink_ext_ack *extack);
int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
diff --git a/net/bridge/br_private_cfm.h b/net/bridge/br_private_cfm.h
new file mode 100644
index 000000000000..a43a5e7fa2c3
--- /dev/null
+++ b/net/bridge/br_private_cfm.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _BR_PRIVATE_CFM_H_
+#define _BR_PRIVATE_CFM_H_
+
+#include "br_private.h"
+#include <uapi/linux/cfm_bridge.h>
+
+struct br_cfm_mep_create {
+ enum br_cfm_domain domain; /* Domain for this MEP */
+ enum br_cfm_mep_direction direction; /* Up or Down MEP direction */
+ u32 ifindex; /* Residence port */
+};
+
+int br_cfm_mep_create(struct net_bridge *br,
+ const u32 instance,
+ struct br_cfm_mep_create *const create,
+ struct netlink_ext_ack *extack);
+
+int br_cfm_mep_delete(struct net_bridge *br,
+ const u32 instance,
+ struct netlink_ext_ack *extack);
+
+struct br_cfm_mep_config {
+ u32 mdlevel;
+ u32 mepid; /* MEPID for this MEP */
+ struct mac_addr unicast_mac; /* The MEP unicast MAC */
+};
+
+int br_cfm_mep_config_set(struct net_bridge *br,
+ const u32 instance,
+ const struct br_cfm_mep_config *const config,
+ struct netlink_ext_ack *extack);
+
+struct br_cfm_maid {
+ u8 data[CFM_MAID_LENGTH];
+};
+
+struct br_cfm_cc_config {
+ /* Expected received CCM PDU MAID. */
+ struct br_cfm_maid exp_maid;
+
+ /* Expected received CCM PDU interval. */
+ /* Transmitting CCM PDU interval when CCM tx is enabled. */
+ enum br_cfm_ccm_interval exp_interval;
+
+ bool enable; /* Enable/disable CCM PDU handling */
+};
+
+int br_cfm_cc_config_set(struct net_bridge *br,
+ const u32 instance,
+ const struct br_cfm_cc_config *const config,
+ struct netlink_ext_ack *extack);
+
+int br_cfm_cc_peer_mep_add(struct net_bridge *br, const u32 instance,
+ u32 peer_mep_id,
+ struct netlink_ext_ack *extack);
+int br_cfm_cc_peer_mep_remove(struct net_bridge *br, const u32 instance,
+ u32 peer_mep_id,
+ struct netlink_ext_ack *extack);
+
+/* Transmitted CCM Remote Defect Indication status set.
+ * This RDI is inserted in transmitted CCM PDUs if CCM transmission is enabled.
+ * See br_cfm_cc_ccm_tx() with interval != BR_CFM_CCM_INTERVAL_NONE
+ */
+int br_cfm_cc_rdi_set(struct net_bridge *br, const u32 instance,
+ const bool rdi, struct netlink_ext_ack *extack);
+
+/* OAM PDU Tx information */
+struct br_cfm_cc_ccm_tx_info {
+ struct mac_addr dmac;
+ /* The CCM will be transmitted for this period in seconds.
+ * Call br_cfm_cc_ccm_tx before timeout to keep transmission alive.
+ * When period is zero any ongoing transmission will be stopped.
+ */
+ u32 period;
+
+ bool seq_no_update; /* Update Tx CCM sequence number */
+ bool if_tlv; /* Insert Interface Status TLV */
+ u8 if_tlv_value; /* Interface Status TLV value */
+ bool port_tlv; /* Insert Port Status TLV */
+ u8 port_tlv_value; /* Port Status TLV value */
+ /* Sender ID TLV ??
+ * Organization-Specific TLV ??
+ */
+};
+
+int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance,
+ const struct br_cfm_cc_ccm_tx_info *const tx_info,
+ struct netlink_ext_ack *extack);
+
+struct br_cfm_mep_status {
+ /* Indications that an OAM PDU has been seen. */
+ bool opcode_unexp_seen; /* RX of OAM PDU with unexpected opcode */
+ bool version_unexp_seen; /* RX of OAM PDU with unexpected version */
+ bool rx_level_low_seen; /* Rx of OAM PDU with level low */
+};
+
+struct br_cfm_cc_peer_status {
+ /* This CCM related status is based on the latest received CCM PDU. */
+ u8 port_tlv_value; /* Port Status TLV value */
+ u8 if_tlv_value; /* Interface Status TLV value */
+
+ /* CCM has not been received for 3.25 intervals */
+ u8 ccm_defect:1;
+
+ /* (RDI == 1) for last received CCM PDU */
+ u8 rdi:1;
+
+ /* Indications that a CCM PDU has been seen. */
+ u8 seen:1; /* CCM PDU received */
+ u8 tlv_seen:1; /* CCM PDU with TLV received */
+ /* CCM PDU with unexpected sequence number received */
+ u8 seq_unexp_seen:1;
+};
+
+struct br_cfm_mep {
+ /* list header of MEP instances */
+ struct hlist_node head;
+ u32 instance;
+ struct br_cfm_mep_create create;
+ struct br_cfm_mep_config config;
+ struct br_cfm_cc_config cc_config;
+ struct br_cfm_cc_ccm_tx_info cc_ccm_tx_info;
+ /* List of multiple peer MEPs */
+ struct hlist_head peer_mep_list;
+ struct net_bridge_port __rcu *b_port;
+ unsigned long ccm_tx_end;
+ struct delayed_work ccm_tx_dwork;
+ u32 ccm_tx_snumber;
+ u32 ccm_rx_snumber;
+ struct br_cfm_mep_status status;
+ bool rdi;
+ struct rcu_head rcu;
+};
+
+struct br_cfm_peer_mep {
+ struct hlist_node head;
+ struct br_cfm_mep *mep;
+ struct delayed_work ccm_rx_dwork;
+ u32 mepid;
+ struct br_cfm_cc_peer_status cc_status;
+ u32 ccm_rx_count_miss;
+ struct rcu_head rcu;
+};
+
+#endif /* _BR_PRIVATE_CFM_H_ */
diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h
index af0e9eff6549..1883118aae55 100644
--- a/net/bridge/br_private_mrp.h
+++ b/net/bridge/br_private_mrp.h
@@ -8,7 +8,7 @@
struct br_mrp {
/* list of mrp instances */
- struct list_head list;
+ struct hlist_node list;
struct net_bridge_port __rcu *p_port;
struct net_bridge_port __rcu *s_port;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 08c77418c687..701cad646b20 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -272,7 +272,8 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
}
v->brvlan = masterv;
if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
- v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
+ v->stats =
+ netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!v->stats) {
err = -ENOMEM;
goto out_filt;
@@ -423,7 +424,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
struct net_bridge_vlan_group *vg,
struct sk_buff *skb)
{
- struct br_vlan_stats *stats;
+ struct pcpu_sw_netstats *stats;
struct net_bridge_vlan *v;
u16 vid;
@@ -476,7 +477,7 @@ static bool __allowed_ingress(const struct net_bridge *br,
struct sk_buff *skb, u16 *vid,
u8 *state)
{
- struct br_vlan_stats *stats;
+ struct pcpu_sw_netstats *stats;
struct net_bridge_vlan *v;
bool tagged;
@@ -710,7 +711,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
if (!vlan)
return -ENOMEM;
- vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
+ vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!vlan->stats) {
kfree(vlan);
return -ENOMEM;
@@ -855,15 +856,25 @@ EXPORT_SYMBOL_GPL(br_vlan_get_proto);
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
{
+ struct switchdev_attr attr = {
+ .orig_dev = br->dev,
+ .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
+ .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
+ .u.vlan_protocol = ntohs(proto),
+ };
int err = 0;
struct net_bridge_port *p;
struct net_bridge_vlan *vlan;
struct net_bridge_vlan_group *vg;
- __be16 oldproto;
+ __be16 oldproto = br->vlan_proto;
if (br->vlan_proto == proto)
return 0;
+ err = switchdev_port_attr_set(br->dev, &attr);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
@@ -874,7 +885,6 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
}
}
- oldproto = br->vlan_proto;
br->vlan_proto = proto;
recalculate_group_addr(br);
@@ -890,6 +900,9 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
return 0;
err_filt:
+ attr.u.vlan_protocol = ntohs(oldproto);
+ switchdev_port_attr_set(br->dev, &attr);
+
list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
vlan_vid_del(p->dev, proto, vlan->vid);
@@ -1264,14 +1277,14 @@ void nbp_vlan_flush(struct net_bridge_port *port)
}
void br_vlan_get_stats(const struct net_bridge_vlan *v,
- struct br_vlan_stats *stats)
+ struct pcpu_sw_netstats *stats)
{
int i;
memset(stats, 0, sizeof(*stats));
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, txpackets, txbytes;
- struct br_vlan_stats *cpu_stats;
+ struct pcpu_sw_netstats *cpu_stats;
unsigned int start;
cpu_stats = per_cpu_ptr(v->stats, i);
@@ -1587,7 +1600,7 @@ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
static bool br_vlan_stats_fill(struct sk_buff *skb,
const struct net_bridge_vlan *v)
{
- struct br_vlan_stats stats;
+ struct pcpu_sw_netstats stats;
struct nlattr *nest;
nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 5040fe43f4b4..ac5372121e60 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -17,7 +17,9 @@ config NFT_BRIDGE_META
config NFT_BRIDGE_REJECT
tristate "Netfilter nf_tables bridge reject support"
- depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6
+ depends on NFT_REJECT
+ depends on NF_REJECT_IPV4
+ depends on NF_REJECT_IPV6
help
Add support to reject packets.
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index deae2c9a0f69..eba0efe64d05 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -39,30 +39,6 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
}
}
-static int nft_bridge_iphdr_validate(struct sk_buff *skb)
-{
- struct iphdr *iph;
- u32 len;
-
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return 0;
-
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
- return 0;
-
- len = ntohs(iph->tot_len);
- if (skb->len < len)
- return 0;
- else if (len < (iph->ihl*4))
- return 0;
-
- if (!pskb_may_pull(skb, iph->ihl*4))
- return 0;
-
- return 1;
-}
-
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
* or the bridge port (NF_BRIDGE PREROUTING).
*/
@@ -72,29 +48,11 @@ static void nft_reject_br_send_v4_tcp_reset(struct net *net,
int hook)
{
struct sk_buff *nskb;
- struct iphdr *niph;
- const struct tcphdr *oth;
- struct tcphdr _oth;
- if (!nft_bridge_iphdr_validate(oldskb))
- return;
-
- oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
- if (!oth)
- return;
-
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
+ nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv4.sysctl_ip_default_ttl);
- nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
@@ -106,139 +64,32 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
int hook, u8 code)
{
struct sk_buff *nskb;
- struct iphdr *niph;
- struct icmphdr *icmph;
- unsigned int len;
- __wsum csum;
- u8 proto;
-
- if (!nft_bridge_iphdr_validate(oldskb))
- return;
-
- /* IP header checks: fragment. */
- if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
- return;
-
- /* RFC says return as much as we can without exceeding 576 bytes. */
- len = min_t(unsigned int, 536, oldskb->len);
-
- if (!pskb_may_pull(oldskb, len))
- return;
-
- if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
- return;
-
- proto = ip_hdr(oldskb)->protocol;
-
- if (!skb_csum_unnecessary(oldskb) &&
- nf_reject_verify_csum(proto) &&
- nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
- return;
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
- LL_MAX_HEADER + len, GFP_ATOMIC);
+ nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
- net->ipv4.sysctl_ip_default_ttl);
-
- skb_reset_transport_header(nskb);
- icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
- icmph->type = ICMP_DEST_UNREACH;
- icmph->code = code;
-
- skb_put_data(nskb, skb_network_header(oldskb), len);
-
- csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
- icmph->checksum = csum_fold(csum);
-
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
-static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
-{
- struct ipv6hdr *hdr;
- u32 pkt_len;
-
- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
- return 0;
-
- hdr = ipv6_hdr(skb);
- if (hdr->version != 6)
- return 0;
-
- pkt_len = ntohs(hdr->payload_len);
- if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
- return 0;
-
- return 1;
-}
-
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
- const struct tcphdr *oth;
- struct tcphdr _oth;
- unsigned int otcplen;
- struct ipv6hdr *nip6h;
- if (!nft_bridge_ip6hdr_validate(oldskb))
- return;
-
- oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
- if (!oth)
- return;
-
- nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
+ nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv6.devconf_all->hop_limit);
- nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
- nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
-static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
-{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- int thoff;
- __be16 fo;
- u8 proto = ip6h->nexthdr;
-
- if (skb_csum_unnecessary(skb))
- return true;
-
- if (ip6h->payload_len &&
- pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
- return false;
-
- ip6h = ipv6_hdr(skb);
- thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
- if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
- return false;
-
- if (!nf_reject_verify_csum(proto))
- return true;
-
- return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
-}
static void nft_reject_br_send_v6_unreach(struct net *net,
struct sk_buff *oldskb,
@@ -246,49 +97,11 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
int hook, u8 code)
{
struct sk_buff *nskb;
- struct ipv6hdr *nip6h;
- struct icmp6hdr *icmp6h;
- unsigned int len;
-
- if (!nft_bridge_ip6hdr_validate(oldskb))
- return;
- /* Include "As much of invoking packet as possible without the ICMPv6
- * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
- */
- len = min_t(unsigned int, 1220, oldskb->len);
-
- if (!pskb_may_pull(oldskb, len))
- return;
-
- if (!reject6_br_csum_ok(oldskb, hook))
- return;
-
- nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
- LL_MAX_HEADER + len, GFP_ATOMIC);
+ nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
- net->ipv6.devconf_all->hop_limit);
-
- skb_reset_transport_header(nskb);
- icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
- icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
- icmp6h->icmp6_code = code;
-
- skb_put_data(nskb, skb_network_header(oldskb), len);
- nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
-
- icmp6h->icmp6_cksum =
- csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
- nskb->len - sizeof(struct ipv6hdr),
- IPPROTO_ICMPV6,
- csum_partial(icmp6h,
- nskb->len - sizeof(struct ipv6hdr),
- 0));
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
@@ -364,69 +177,13 @@ static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
(1 << NF_BR_LOCAL_IN));
}
-static int nft_reject_bridge_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_reject *priv = nft_expr_priv(expr);
- int icmp_code;
-
- if (tb[NFTA_REJECT_TYPE] == NULL)
- return -EINVAL;
-
- priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
- return -EINVAL;
-
- icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
- if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
- icmp_code > NFT_REJECT_ICMPX_MAX)
- return -EINVAL;
-
- priv->icmp_code = icmp_code;
- break;
- case NFT_REJECT_TCP_RST:
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int nft_reject_bridge_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_reject *priv = nft_expr_priv(expr);
-
- if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
- goto nla_put_failure;
-
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
- goto nla_put_failure;
- break;
- default:
- break;
- }
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_bridge_eval,
- .init = nft_reject_bridge_init,
- .dump = nft_reject_bridge_dump,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
.validate = nft_reject_bridge_validate,
};
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 4c343b43067f..837bb8af0ec3 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -891,7 +891,7 @@ static __init int can_init(void)
int err;
/* check for correct padding to be able to use the structs similarly */
- BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
+ BUILD_BUG_ON(offsetof(struct can_frame, len) !=
offsetof(struct canfd_frame, len) ||
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
diff --git a/net/can/gw.c b/net/can/gw.c
index 6b790b6ff8d2..8598d9da0e5f 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -199,6 +199,68 @@ static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod)
memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN);
}
+/* retrieve valid CC DLC value and store it into 'len' */
+static void mod_retrieve_ccdlc(struct canfd_frame *cf)
+{
+ struct can_frame *ccf = (struct can_frame *)cf;
+
+ /* len8_dlc is only valid if len == CAN_MAX_DLEN */
+ if (ccf->len != CAN_MAX_DLEN)
+ return;
+
+ /* do we have a valid len8_dlc value from 9 .. 15 ? */
+ if (ccf->len8_dlc > CAN_MAX_DLEN && ccf->len8_dlc <= CAN_MAX_RAW_DLC)
+ ccf->len = ccf->len8_dlc;
+}
+
+/* convert valid CC DLC value in 'len' into struct can_frame elements */
+static void mod_store_ccdlc(struct canfd_frame *cf)
+{
+ struct can_frame *ccf = (struct can_frame *)cf;
+
+ /* clear potential leftovers */
+ ccf->len8_dlc = 0;
+
+ /* plain data length 0 .. 8 - that was easy */
+ if (ccf->len <= CAN_MAX_DLEN)
+ return;
+
+ /* potentially broken values are catched in can_can_gw_rcv() */
+ if (ccf->len > CAN_MAX_RAW_DLC)
+ return;
+
+ /* we have a valid dlc value from 9 .. 15 in ccf->len */
+ ccf->len8_dlc = ccf->len;
+ ccf->len = CAN_MAX_DLEN;
+}
+
+static void mod_and_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
+{
+ mod_retrieve_ccdlc(cf);
+ mod_and_len(cf, mod);
+ mod_store_ccdlc(cf);
+}
+
+static void mod_or_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
+{
+ mod_retrieve_ccdlc(cf);
+ mod_or_len(cf, mod);
+ mod_store_ccdlc(cf);
+}
+
+static void mod_xor_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
+{
+ mod_retrieve_ccdlc(cf);
+ mod_xor_len(cf, mod);
+ mod_store_ccdlc(cf);
+}
+
+static void mod_set_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
+{
+ mod_set_len(cf, mod);
+ mod_store_ccdlc(cf);
+}
+
static void canframecpy(struct canfd_frame *dst, struct can_frame *src)
{
/* Copy the struct members separately to ensure that no uninitialized
@@ -207,7 +269,7 @@ static void canframecpy(struct canfd_frame *dst, struct can_frame *src)
*/
dst->can_id = src->can_id;
- dst->len = src->can_dlc;
+ dst->len = src->len;
*(u64 *)dst->data = *(u64 *)src->data;
}
@@ -842,8 +904,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_and_id;
- if (mb.modtype & CGW_MOD_LEN)
- mod->modfunc[modidx++] = mod_and_len;
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_and_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_and_data;
@@ -858,8 +920,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_or_id;
- if (mb.modtype & CGW_MOD_LEN)
- mod->modfunc[modidx++] = mod_or_len;
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_or_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_or_data;
@@ -874,8 +936,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_xor_id;
- if (mb.modtype & CGW_MOD_LEN)
- mod->modfunc[modidx++] = mod_xor_len;
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_xor_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_xor_data;
@@ -890,8 +952,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_set_id;
- if (mb.modtype & CGW_MOD_LEN)
- mod->modfunc[modidx++] = mod_set_len;
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_set_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_set_data;
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 26bdc3c20b7e..7839c3b9e5be 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -865,6 +865,14 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (!size || size > MAX_MSG_LENGTH)
return -EINVAL;
+ /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
+ off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
+
+ /* does the given data fit into a single frame for SF_BROADCAST? */
+ if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
+ (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off))
+ return -EINVAL;
+
err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0)
return err;
@@ -891,9 +899,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
cf = (struct canfd_frame *)skb->data;
skb_put(skb, so->ll.mtu);
- /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
- off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
-
/* check for single frame transmission depending on TX_DL */
if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
/* The message size generally fits into a SingleFrame - good.
@@ -1016,7 +1021,7 @@ static int isotp_release(struct socket *sock)
hrtimer_cancel(&so->rxtimer);
/* remove current filters & unregister */
- if (so->bound) {
+ if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
if (so->ifindex) {
struct net_device *dev;
@@ -1052,15 +1057,25 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
struct net_device *dev;
int err = 0;
int notify_enetdown = 0;
+ int do_rx_reg = 1;
if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
return -EINVAL;
- if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
- return -EADDRNOTAVAIL;
+ /* do not register frame reception for functional addressing */
+ if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
+ do_rx_reg = 0;
+
+ /* do not validate rx address for functional addressing */
+ if (do_rx_reg) {
+ if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
+ return -EADDRNOTAVAIL;
+
+ if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
+ return -EADDRNOTAVAIL;
+ }
- if ((addr->can_addr.tp.rx_id | addr->can_addr.tp.tx_id) &
- (CAN_ERR_FLAG | CAN_RTR_FLAG))
+ if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
return -EADDRNOTAVAIL;
if (!addr->can_ifindex)
@@ -1093,13 +1108,14 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
ifindex = dev->ifindex;
- can_rx_register(net, dev, addr->can_addr.tp.rx_id,
- SINGLE_MASK(addr->can_addr.tp.rx_id), isotp_rcv, sk,
- "isotp", sk);
+ if (do_rx_reg)
+ can_rx_register(net, dev, addr->can_addr.tp.rx_id,
+ SINGLE_MASK(addr->can_addr.tp.rx_id),
+ isotp_rcv, sk, "isotp", sk);
dev_put(dev);
- if (so->bound) {
+ if (so->bound && do_rx_reg) {
/* unregister old filter */
if (so->ifindex) {
dev = dev_get_by_index(net, so->ifindex);
@@ -1302,7 +1318,7 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
- if (so->bound)
+ if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST)))
can_rx_unregister(dev_net(dev), dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 137054bff9ec..bb914d8b4216 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -62,7 +62,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
skb_pull(skb, J1939_CAN_HDR);
/* fix length, set to dlc, with 8 maximum */
- skb_trim(skb, min_t(uint8_t, cf->can_dlc, 8));
+ skb_trim(skb, min_t(uint8_t, cf->len, 8));
/* set addr */
skcb = j1939_skb_to_cb(skb);
@@ -335,7 +335,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
canid |= skcb->addr.da << 8;
cf->can_id = canid;
- cf->can_dlc = dlc;
+ cf->len = dlc;
return can_send(skb, 1);
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index c907f0dc7f87..4edd033e899c 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/bpf_local_storage.h>
#include <net/bpf_sk_storage.h>
@@ -15,20 +16,8 @@
DEFINE_BPF_STORAGE_CACHE(sk_cache);
-static int omem_charge(struct sock *sk, unsigned int size)
-{
- /* same check as in sock_kmalloc() */
- if (size <= sysctl_optmem_max &&
- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
- atomic_add(size, &sk->sk_omem_alloc);
- return 0;
- }
-
- return -ENOMEM;
-}
-
static struct bpf_local_storage_data *
-sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
+bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
{
struct bpf_local_storage *sk_storage;
struct bpf_local_storage_map *smap;
@@ -41,11 +30,11 @@ sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
}
-static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
+static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
{
struct bpf_local_storage_data *sdata;
- sdata = sk_storage_lookup(sk, map, false);
+ sdata = bpf_sk_storage_lookup(sk, map, false);
if (!sdata)
return -ENOENT;
@@ -94,7 +83,7 @@ void bpf_sk_storage_free(struct sock *sk)
kfree_rcu(sk_storage, rcu);
}
-static void sk_storage_map_free(struct bpf_map *map)
+static void bpf_sk_storage_map_free(struct bpf_map *map)
{
struct bpf_local_storage_map *smap;
@@ -103,7 +92,7 @@ static void sk_storage_map_free(struct bpf_map *map)
bpf_local_storage_map_free(smap);
}
-static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr)
+static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
{
struct bpf_local_storage_map *smap;
@@ -130,7 +119,7 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
- sdata = sk_storage_lookup(sock->sk, map, true);
+ sdata = bpf_sk_storage_lookup(sock->sk, map, true);
sockfd_put(sock);
return sdata ? sdata->data : NULL;
}
@@ -166,7 +155,7 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
- err = sk_storage_delete(sock->sk, map);
+ err = bpf_sk_storage_del(sock->sk, map);
sockfd_put(sock);
return err;
}
@@ -272,7 +261,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
return (unsigned long)NULL;
- sdata = sk_storage_lookup(sk, map, true);
+ sdata = bpf_sk_storage_lookup(sk, map, true);
if (sdata)
return (unsigned long)sdata->data;
@@ -305,7 +294,7 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
int err;
- err = sk_storage_delete(sk, map);
+ err = bpf_sk_storage_del(sk, map);
sock_put(sk);
return err;
}
@@ -313,14 +302,23 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
return -ENOENT;
}
-static int sk_storage_charge(struct bpf_local_storage_map *smap,
- void *owner, u32 size)
+static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
+ void *owner, u32 size)
{
- return omem_charge(owner, size);
+ struct sock *sk = (struct sock *)owner;
+
+ /* same check as in sock_kmalloc() */
+ if (size <= sysctl_optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+ atomic_add(size, &sk->sk_omem_alloc);
+ return 0;
+ }
+
+ return -ENOMEM;
}
-static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
- void *owner, u32 size)
+static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
+ void *owner, u32 size)
{
struct sock *sk = owner;
@@ -328,7 +326,7 @@ static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
}
static struct bpf_local_storage __rcu **
-sk_storage_ptr(void *owner)
+bpf_sk_storage_ptr(void *owner)
{
struct sock *sk = owner;
@@ -339,8 +337,8 @@ static int sk_storage_map_btf_id;
const struct bpf_map_ops sk_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check,
- .map_alloc = sk_storage_map_alloc,
- .map_free = sk_storage_map_free,
+ .map_alloc = bpf_sk_storage_map_alloc,
+ .map_free = bpf_sk_storage_map_free,
.map_get_next_key = notsupp_get_next_key,
.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
.map_update_elem = bpf_fd_sk_storage_update_elem,
@@ -348,9 +346,9 @@ const struct bpf_map_ops sk_storage_map_ops = {
.map_check_btf = bpf_local_storage_map_check_btf,
.map_btf_name = "bpf_local_storage_map",
.map_btf_id = &sk_storage_map_btf_id,
- .map_local_storage_charge = sk_storage_charge,
- .map_local_storage_uncharge = sk_storage_uncharge,
- .map_owner_storage_ptr = sk_storage_ptr,
+ .map_local_storage_charge = bpf_sk_storage_charge,
+ .map_local_storage_uncharge = bpf_sk_storage_uncharge,
+ .map_owner_storage_ptr = bpf_sk_storage_ptr,
};
const struct bpf_func_proto bpf_sk_storage_get_proto = {
@@ -381,6 +379,80 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
+static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
+{
+ const struct btf *btf_vmlinux;
+ const struct btf_type *t;
+ const char *tname;
+ u32 btf_id;
+
+ if (prog->aux->dst_prog)
+ return false;
+
+ /* Ensure the tracing program is not tracing
+ * any bpf_sk_storage*() function and also
+ * use the bpf_sk_storage_(get|delete) helper.
+ */
+ switch (prog->expected_attach_type) {
+ case BPF_TRACE_ITER:
+ case BPF_TRACE_RAW_TP:
+ /* bpf_sk_storage has no trace point */
+ return true;
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ btf_vmlinux = bpf_get_btf_vmlinux();
+ btf_id = prog->aux->attach_btf_id;
+ t = btf_type_by_id(btf_vmlinux, btf_id);
+ tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ return !!strncmp(tname, "bpf_sk_storage",
+ strlen("bpf_sk_storage"));
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
+ void *, value, u64, flags)
+{
+ if (in_irq() || in_nmi())
+ return (unsigned long)NULL;
+
+ return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
+}
+
+BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
+ struct sock *, sk)
+{
+ if (in_irq() || in_nmi())
+ return -EPERM;
+
+ return ____bpf_sk_storage_delete(map, sk);
+}
+
+const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
+ .func = bpf_sk_storage_get_tracing,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+ .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg4_type = ARG_ANYTHING,
+ .allowed = bpf_sk_storage_tracing_allowed,
+};
+
+const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
+ .func = bpf_sk_storage_delete_tracing,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+ .allowed = bpf_sk_storage_tracing_allowed,
+};
+
struct bpf_sk_storage_diag {
u32 nr_maps;
struct bpf_map *maps[];
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 9fcaa544f11a..81809fa735a7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -709,7 +709,7 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
EXPORT_SYMBOL(zerocopy_sg_from_iter);
/**
- * skb_copy_and_csum_datagram_iter - Copy datagram to an iovec iterator
+ * skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
* and update a checksum.
* @skb: buffer to copy
* @offset: offset in the buffer to start copying from
diff --git a/net/core/dev.c b/net/core/dev.c
index ac7a7ae4614a..a46334906c94 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1069,19 +1069,6 @@ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
}
EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
-struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
-{
- struct net_device *dev;
-
- ASSERT_RTNL();
- for_each_netdev(net, dev)
- if (dev->type == type)
- return dev;
-
- return NULL;
-}
-EXPORT_SYMBOL(__dev_getfirstbyhwtype);
-
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
struct net_device *dev, *ret = NULL;
@@ -3206,7 +3193,7 @@ int skb_checksum_help(struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_COMPLETE)
goto out_set_summed;
- if (unlikely(skb_shinfo(skb)->gso_size)) {
+ if (unlikely(skb_is_gso(skb))) {
skb_warn_bad_offload(skb);
return -EINVAL;
}
@@ -3495,6 +3482,11 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
if (gso_segs > dev->gso_max_segs)
return features & ~NETIF_F_GSO_MASK;
+ if (!skb_shinfo(skb)->gso_type) {
+ skb_warn_bad_offload(skb);
+ return features & ~NETIF_F_GSO_MASK;
+ }
+
/* Support for GSO partial features requires software
* intervention before we can actually process the packets
* so we need to strip support for any partial features now
@@ -3867,6 +3859,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
return skb;
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
+ qdisc_skb_cb(skb)->mru = 0;
mini_qdisc_bstats_cpu_update(miniq, skb);
switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
@@ -4950,6 +4943,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
}
qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_skb_cb(skb)->mru = 0;
skb->tc_at_ingress = 1;
mini_qdisc_bstats_cpu_update(miniq, skb);
@@ -6454,7 +6448,8 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
- new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
+ new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
+ NAPIF_STATE_PREFER_BUSY_POLL);
/* If STATE_MISSED was set, leave STATE_SCHED set,
* because we will call napi->poll() one more time.
@@ -6491,10 +6486,30 @@ static struct napi_struct *napi_by_id(unsigned int napi_id)
#if defined(CONFIG_NET_RX_BUSY_POLL)
-#define BUSY_POLL_BUDGET 8
+static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
+{
+ if (!skip_schedule) {
+ gro_normal_list(napi);
+ __napi_schedule(napi);
+ return;
+ }
-static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
+ if (napi->gro_bitmask) {
+ /* flush too old packets
+ * If HZ < 1000, flush all packets.
+ */
+ napi_gro_flush(napi, HZ >= 1000);
+ }
+
+ gro_normal_list(napi);
+ clear_bit(NAPI_STATE_SCHED, &napi->state);
+}
+
+static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
+ u16 budget)
{
+ bool skip_schedule = false;
+ unsigned long timeout;
int rc;
/* Busy polling means there is a high chance device driver hard irq
@@ -6511,29 +6526,33 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
local_bh_disable();
+ if (prefer_busy_poll) {
+ napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
+ timeout = READ_ONCE(napi->dev->gro_flush_timeout);
+ if (napi->defer_hard_irqs_count && timeout) {
+ hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
+ skip_schedule = true;
+ }
+ }
+
/* All we really want here is to re-enable device interrupts.
* Ideally, a new ndo_busy_poll_stop() could avoid another round.
*/
- rc = napi->poll(napi, BUSY_POLL_BUDGET);
+ rc = napi->poll(napi, budget);
/* We can't gro_normal_list() here, because napi->poll() might have
* rearmed the napi (napi_complete_done()) in which case it could
* already be running on another CPU.
*/
- trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
+ trace_napi_poll(napi, rc, budget);
netpoll_poll_unlock(have_poll_lock);
- if (rc == BUSY_POLL_BUDGET) {
- /* As the whole budget was spent, we still own the napi so can
- * safely handle the rx_list.
- */
- gro_normal_list(napi);
- __napi_schedule(napi);
- }
+ if (rc == budget)
+ __busy_poll_stop(napi, skip_schedule);
local_bh_enable();
}
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
- void *loop_end_arg)
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{
unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget);
@@ -6561,17 +6580,23 @@ restart:
* we avoid dirtying napi->state as much as we can.
*/
if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
- NAPIF_STATE_IN_BUSY_POLL))
+ NAPIF_STATE_IN_BUSY_POLL)) {
+ if (prefer_busy_poll)
+ set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
+ }
if (cmpxchg(&napi->state, val,
val | NAPIF_STATE_IN_BUSY_POLL |
- NAPIF_STATE_SCHED) != val)
+ NAPIF_STATE_SCHED) != val) {
+ if (prefer_busy_poll)
+ set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
+ }
have_poll_lock = netpoll_poll_lock(napi);
napi_poll = napi->poll;
}
- work = napi_poll(napi, BUSY_POLL_BUDGET);
- trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
+ work = napi_poll(napi, budget);
+ trace_napi_poll(napi, work, budget);
gro_normal_list(napi);
count:
if (work > 0)
@@ -6584,7 +6609,7 @@ count:
if (unlikely(need_resched())) {
if (napi_poll)
- busy_poll_stop(napi, have_poll_lock);
+ busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
preempt_enable();
rcu_read_unlock();
cond_resched();
@@ -6595,7 +6620,7 @@ count:
cpu_relax();
}
if (napi_poll)
- busy_poll_stop(napi, have_poll_lock);
+ busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
preempt_enable();
out:
rcu_read_unlock();
@@ -6646,8 +6671,10 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
* NAPI_STATE_MISSED, since we do not react to a device IRQ.
*/
if (!napi_disable_pending(napi) &&
- !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
+ !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
+ clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
__napi_schedule_irqoff(napi);
+ }
return HRTIMER_NORESTART;
}
@@ -6705,6 +6732,7 @@ void napi_disable(struct napi_struct *n)
hrtimer_cancel(&n->timer);
+ clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
clear_bit(NAPI_STATE_DISABLE, &n->state);
}
EXPORT_SYMBOL(napi_disable);
@@ -6777,6 +6805,19 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
goto out_unlock;
}
+ /* The NAPI context has more processing work, but busy-polling
+ * is preferred. Exit early.
+ */
+ if (napi_prefer_busy_poll(n)) {
+ if (napi_complete_done(n, work)) {
+ /* If timeout is not set, we need to make sure
+ * that the NAPI is re-scheduled.
+ */
+ napi_schedule(n);
+ }
+ goto out_unlock;
+ }
+
if (n->gro_bitmask) {
/* flush too old packets
* If HZ < 1000, flush all packets.
@@ -6915,7 +6956,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
EXPORT_SYMBOL(netdev_has_upper_dev);
/**
- * netdev_has_upper_dev_all - Check if device is linked to an upper device
+ * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
* @dev: device
* @upper_dev: upper device to check
*
@@ -8153,7 +8194,7 @@ EXPORT_SYMBOL(netdev_lower_dev_get_private);
/**
- * netdev_lower_change - Dispatch event about lower device state change
+ * netdev_lower_state_changed - Dispatch event about lower device state change
* @lower_dev: device
* @lower_state_info: state to dispatch
*
@@ -8898,7 +8939,7 @@ static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
return dev->netdev_ops->ndo_bpf;
default:
return NULL;
- };
+ }
}
static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
@@ -9602,6 +9643,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
}
}
+ if ((features & NETIF_F_HW_TLS_TX) && !(features & NETIF_F_HW_CSUM)) {
+ netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_TX;
+ }
+
return features;
}
@@ -9777,7 +9823,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
rx[i].dev = dev;
/* XDP RX-queue setup */
- err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
+ err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
if (err < 0)
goto err_rxq_info;
}
@@ -10380,6 +10426,21 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
}
EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
+/**
+ * dev_get_tstats64 - ndo_get_stats64 implementation
+ * @dev: device to get statistics from
+ * @s: place to store stats
+ *
+ * Populate @s from dev->stats and dev->tstats. Can be used as
+ * ndo_get_stats64() callback.
+ */
+void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
+{
+ netdev_stats_to_stats64(s, &dev->stats);
+ dev_fetch_sw_netstats(s, dev->tstats);
+}
+EXPORT_SYMBOL_GPL(dev_get_tstats64);
+
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
{
struct netdev_queue *queue = dev_ingress_queue(dev);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 205e92e604ef..db8a0ff86f36 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -230,7 +230,7 @@ static int dev_do_ioctl(struct net_device *dev,
struct ifreq *ifr, unsigned int cmd)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int err = -EOPNOTSUPP;
+ int err;
err = dsa_ndo_do_ioctl(dev, ifr, cmd);
if (err == 0 || err != -EOPNOTSUPP)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 8c5ddffd707d..ee828e4b1007 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -3394,7 +3394,7 @@ out_free_msg:
nlmsg_free(msg);
}
-void devlink_flash_update_begin_notify(struct devlink *devlink)
+static void devlink_flash_update_begin_notify(struct devlink *devlink)
{
struct devlink_flash_notify params = { 0 };
@@ -3402,9 +3402,8 @@ void devlink_flash_update_begin_notify(struct devlink *devlink)
DEVLINK_CMD_FLASH_UPDATE,
&params);
}
-EXPORT_SYMBOL_GPL(devlink_flash_update_begin_notify);
-void devlink_flash_update_end_notify(struct devlink *devlink)
+static void devlink_flash_update_end_notify(struct devlink *devlink)
{
struct devlink_flash_notify params = { 0 };
@@ -3412,7 +3411,6 @@ void devlink_flash_update_end_notify(struct devlink *devlink)
DEVLINK_CMD_FLASH_UPDATE_END,
&params);
}
-EXPORT_SYMBOL_GPL(devlink_flash_update_end_notify);
void devlink_flash_update_status_notify(struct devlink *devlink,
const char *status_msg,
@@ -3453,10 +3451,12 @@ EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
struct genl_info *info)
{
- struct nlattr *nla_component, *nla_overwrite_mask;
+ struct nlattr *nla_component, *nla_overwrite_mask, *nla_file_name;
struct devlink_flash_update_params params = {};
struct devlink *devlink = info->user_ptr[0];
+ const char *file_name;
u32 supported_params;
+ int ret;
if (!devlink->ops->flash_update)
return -EOPNOTSUPP;
@@ -3466,8 +3466,6 @@ static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
supported_params = devlink->ops->supported_flash_update_params;
- params.file_name = nla_data(info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]);
-
nla_component = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT];
if (nla_component) {
if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT)) {
@@ -3491,7 +3489,21 @@ static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
params.overwrite_mask = sections.value & sections.selector;
}
- return devlink->ops->flash_update(devlink, &params, info->extack);
+ nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
+ file_name = nla_data(nla_file_name);
+ ret = request_firmware(&params.fw, file_name, devlink->dev);
+ if (ret) {
+ NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file");
+ return ret;
+ }
+
+ devlink_flash_update_begin_notify(devlink);
+ ret = devlink->ops->flash_update(devlink, &params, info->extack);
+ devlink_flash_update_end_notify(devlink);
+
+ release_firmware(params.fw);
+
+ return ret;
}
static const struct devlink_param devlink_param_generic[] = {
@@ -6981,7 +6993,6 @@ static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
struct netlink_ext_ack *extack = info->extack;
struct devlink *devlink = info->user_ptr[0];
struct devlink_trap_item *trap_item;
- int err;
if (list_empty(&devlink->trap_list))
return -EOPNOTSUPP;
@@ -6992,11 +7003,7 @@ static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
return -ENOENT;
}
- err = devlink_trap_action_set(devlink, trap_item, info);
- if (err)
- return err;
-
- return 0;
+ return devlink_trap_action_set(devlink, trap_item, info);
}
static struct devlink_trap_group_item *
@@ -9500,6 +9507,7 @@ static const struct devlink_trap devlink_trap_generic[] = {
DEVLINK_TRAP(DCCP_PARSING, DROP),
DEVLINK_TRAP(GTP_PARSING, DROP),
DEVLINK_TRAP(ESP_PARSING, DROP),
+ DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
};
#define DEVLINK_TRAP_GROUP(_id) \
@@ -10249,12 +10257,18 @@ int devlink_compat_flash_update(struct net_device *dev, const char *file_name)
goto out;
}
- params.file_name = file_name;
+ ret = request_firmware(&params.fw, file_name, devlink->dev);
+ if (ret)
+ goto out;
mutex_lock(&devlink->lock);
+ devlink_flash_update_begin_notify(devlink);
ret = devlink->ops->flash_update(devlink, &params, NULL);
+ devlink_flash_update_end_notify(devlink);
mutex_unlock(&devlink->lock);
+ release_firmware(params.fw);
+
out:
rtnl_lock();
dev_put(dev);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 7bcfb16854cb..cd80ffed6d26 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -563,7 +563,7 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device *dev;
nlrule->iifindex = -1;
- nla_strlcpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
+ nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
dev = __dev_get_by_name(net, nlrule->iifname);
if (dev)
nlrule->iifindex = dev->ifindex;
@@ -573,7 +573,7 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device *dev;
nlrule->oifindex = -1;
- nla_strlcpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
+ nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
dev = __dev_get_by_name(net, nlrule->oifname);
if (dev)
nlrule->oifindex = dev->ifindex;
diff --git a/net/core/filter.c b/net/core/filter.c
index 2ca5eecebacf..255aeee72402 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4910,6 +4910,9 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
tp->notsent_lowat = val;
sk->sk_write_space(sk);
break;
+ case TCP_WINDOW_CLAMP:
+ ret = tcp_set_window_clamp(sk, val);
+ break;
default:
ret = -EINVAL;
}
@@ -6995,6 +6998,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_storage_delete_proto;
case BPF_FUNC_setsockopt:
switch (prog->expected_attach_type) {
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
case BPF_CGROUP_INET4_CONNECT:
case BPF_CGROUP_INET6_CONNECT:
return &bpf_sock_addr_setsockopt_proto;
@@ -7003,6 +7008,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
case BPF_FUNC_getsockopt:
switch (prog->expected_attach_type) {
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
case BPF_CGROUP_INET4_CONNECT:
case BPF_CGROUP_INET6_CONNECT:
return &bpf_sock_addr_getsockopt_proto;
@@ -10406,6 +10413,24 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
};
+BPF_CALL_1(bpf_sock_from_file, struct file *, file)
+{
+ return (unsigned long)sock_from_file(file);
+}
+
+BTF_ID_LIST(bpf_sock_from_file_btf_ids)
+BTF_ID(struct, socket)
+BTF_ID(struct, file)
+
+const struct bpf_func_proto bpf_sock_from_file_proto = {
+ .func = bpf_sock_from_file,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
+ .ret_btf_id = &bpf_sock_from_file_btf_ids[0],
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_sock_from_file_btf_ids[1],
+};
+
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id)
{
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index e21950a2c897..6f1adba6695f 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -48,7 +48,7 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
memset(flow_dissector, 0, sizeof(*flow_dissector));
for (i = 0; i < key_count; i++, key++) {
- /* User should make sure that every key target offset is withing
+ /* User should make sure that every key target offset is within
* boundaries of unsigned short.
*/
BUG_ON(key->offset > USHRT_MAX);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 41b24cd31562..b49c57d35a88 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -68,9 +68,8 @@ struct update_classid_context {
static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
- int err;
struct update_classid_context *ctx = (void *)v;
- struct socket *sock = sock_from_file(file, &err);
+ struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 9bd4cab7d510..99a431c56f23 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -220,8 +220,7 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
static int update_netprio(const void *v, struct file *file, unsigned n)
{
- int err;
- struct socket *sock = sock_from_file(file, &err);
+ struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ef98372facf6..f3c690b8c8e3 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -11,6 +11,8 @@
#include <linux/device.h>
#include <net/page_pool.h>
+#include <net/xdp.h>
+
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
@@ -362,8 +364,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
* If the page refcnt != 1, then the page will be returned to memory
* subsystem.
*/
-void page_pool_put_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct)
+static __always_inline struct page *
+__page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
{
/* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the
@@ -379,15 +382,12 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
- if (allow_direct && in_serving_softirq())
- if (page_pool_recycle_in_cache(page, pool))
- return;
+ if (allow_direct && in_serving_softirq() &&
+ page_pool_recycle_in_cache(page, pool))
+ return NULL;
- if (!page_pool_recycle_in_ring(pool, page)) {
- /* Cache full, fallback to free pages */
- page_pool_return_page(pool, page);
- }
- return;
+ /* Page found as candidate for recycling */
+ return page;
}
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
@@ -405,9 +405,59 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
/* Do not replace this with page_pool_return_page() */
page_pool_release_page(pool, page);
put_page(page);
+
+ return NULL;
+}
+
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
+{
+ page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
+ if (page && !page_pool_recycle_in_ring(pool, page)) {
+ /* Cache full, fallback to free pages */
+ page_pool_return_page(pool, page);
+ }
}
EXPORT_SYMBOL(page_pool_put_page);
+/* Caller must not use data area after call, as this function overwrites it */
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count)
+{
+ int i, bulk_len = 0;
+
+ for (i = 0; i < count; i++) {
+ struct page *page = virt_to_head_page(data[i]);
+
+ page = __page_pool_put_page(pool, page, -1, false);
+ /* Approved for bulk recycling in ptr_ring cache */
+ if (page)
+ data[bulk_len++] = page;
+ }
+
+ if (unlikely(!bulk_len))
+ return;
+
+ /* Bulk producer into ptr_ring page_pool cache */
+ page_pool_ring_lock(pool);
+ for (i = 0; i < bulk_len; i++) {
+ if (__ptr_ring_produce(&pool->ring, data[i]))
+ break; /* ring full */
+ }
+ page_pool_ring_unlock(pool);
+
+ /* Hopefully all pages was return into ptr_ring */
+ if (likely(i == bulk_len))
+ return;
+
+ /* ptr_ring cache full, free remaining pages outside producer lock
+ * since put_page() with refcnt == 1 can be an expensive operation
+ */
+ for (; i < bulk_len; i++)
+ page_pool_return_page(pool, data[i]);
+}
+EXPORT_SYMBOL(page_pool_put_page_bulk);
+
static void page_pool_empty_ring(struct page_pool *pool)
{
struct page *page;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 7d7223691783..bb0596c41b3e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -139,7 +139,7 @@ bool lockdep_rtnl_is_held(void)
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
-static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
+static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
static inline int rtm_msgindex(int msgtype)
{
@@ -157,7 +157,7 @@ static inline int rtm_msgindex(int msgtype)
static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
{
- struct rtnl_link **tab;
+ struct rtnl_link __rcu **tab;
if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
protocol = PF_UNSPEC;
@@ -166,7 +166,7 @@ static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
if (!tab)
tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
- return tab[msgtype];
+ return rcu_dereference_rtnl(tab[msgtype]);
}
static int rtnl_register_internal(struct module *owner,
@@ -183,7 +183,7 @@ static int rtnl_register_internal(struct module *owner,
msgindex = rtm_msgindex(msgtype);
rtnl_lock();
- tab = rtnl_msg_handlers[protocol];
+ tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
if (tab == NULL) {
tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
if (!tab)
@@ -286,7 +286,8 @@ void rtnl_register(int protocol, int msgtype,
*/
int rtnl_unregister(int protocol, int msgtype)
{
- struct rtnl_link **tab, *link;
+ struct rtnl_link __rcu **tab;
+ struct rtnl_link *link;
int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
@@ -299,7 +300,7 @@ int rtnl_unregister(int protocol, int msgtype)
return -ENOENT;
}
- link = tab[msgindex];
+ link = rtnl_dereference(tab[msgindex]);
rcu_assign_pointer(tab[msgindex], NULL);
rtnl_unlock();
@@ -318,20 +319,21 @@ EXPORT_SYMBOL_GPL(rtnl_unregister);
*/
void rtnl_unregister_all(int protocol)
{
- struct rtnl_link **tab, *link;
+ struct rtnl_link __rcu **tab;
+ struct rtnl_link *link;
int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
rtnl_lock();
- tab = rtnl_msg_handlers[protocol];
+ tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
if (!tab) {
rtnl_unlock();
return;
}
RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
- link = tab[msgindex];
+ link = rtnl_dereference(tab[msgindex]);
if (!link)
continue;
@@ -1939,7 +1941,7 @@ static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla
if (linfo[IFLA_INFO_KIND]) {
char kind[MODULE_NAME_LEN];
- nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
+ nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
ops = rtnl_link_ops_get(kind);
}
@@ -2953,9 +2955,9 @@ static struct net_device *rtnl_dev_get(struct net *net,
if (!ifname) {
ifname = buffer;
if (ifname_attr)
- nla_strlcpy(ifname, ifname_attr, IFNAMSIZ);
+ nla_strscpy(ifname, ifname_attr, IFNAMSIZ);
else if (altifname_attr)
- nla_strlcpy(ifname, altifname_attr, ALTIFNAMSIZ);
+ nla_strscpy(ifname, altifname_attr, ALTIFNAMSIZ);
else
return NULL;
}
@@ -2983,7 +2985,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout;
if (tb[IFLA_IFNAME])
- nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
@@ -3264,7 +3266,7 @@ replay:
return err;
if (tb[IFLA_IFNAME])
- nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
@@ -3296,7 +3298,7 @@ replay:
memset(linkinfo, 0, sizeof(linkinfo));
if (linkinfo[IFLA_INFO_KIND]) {
- nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
+ nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
ops = rtnl_link_ops_get(kind);
} else {
kind[0] = '\0';
@@ -3754,7 +3756,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = 1;
for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
- struct rtnl_link **tab;
+ struct rtnl_link __rcu **tab;
struct rtnl_link *link;
rtnl_dumpit_func dumpit;
@@ -3768,7 +3770,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
if (!tab)
continue;
- link = tab[type];
+ link = rcu_dereference_rtnl(tab[type]);
if (!link)
continue;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e578544b2cc7..f62cae3f75d8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -249,6 +249,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
fclones->skb2.fclone = SKB_FCLONE_CLONE;
}
+
+ skb_set_kcov_handle(skb, kcov_common_handle());
+
out:
return skb;
nodata:
@@ -282,6 +285,8 @@ static struct sk_buff *__build_skb_around(struct sk_buff *skb,
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
+ skb_set_kcov_handle(skb, kcov_common_handle());
+
return skb;
}
@@ -837,7 +842,7 @@ EXPORT_SYMBOL(consume_skb);
#endif
/**
- * consume_stateless_skb - free an skbuff, assuming it is stateless
+ * __consume_stateless_skb - free an skbuff, assuming it is stateless
* @skb: buffer to free
*
* Alike consume_skb(), but this variant assumes that this is the last
@@ -897,6 +902,8 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}
+ lockdep_assert_in_softirq();
+
if (!skb_unref(skb))
return;
@@ -2011,6 +2018,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
skb->csum = csum_block_sub(skb->csum,
skb_checksum(skb, len, delta, 0),
len);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
+ int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
+
+ if (offset + sizeof(__sum16) > hdlen)
+ return -EINVAL;
}
return __pskb_trim(skb, len);
}
@@ -5430,7 +5443,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
goto err_free;
skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
+ if (!skb_transport_header_was_set(skb))
+ skb_reset_transport_header(skb);
skb_reset_mac_len(skb);
return skb;
diff --git a/net/core/sock.c b/net/core/sock.c
index 727ea1cc633c..bbcd4b97eddd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1159,6 +1159,22 @@ set_sndbuf:
sk->sk_ll_usec = val;
}
break;
+ case SO_PREFER_BUSY_POLL:
+ if (valbool && !capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
+ break;
+ case SO_BUSY_POLL_BUDGET:
+ if (val > READ_ONCE(sk->sk_busy_poll_budget) && !capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ } else {
+ if (val < 0 || val > U16_MAX)
+ ret = -EINVAL;
+ else
+ WRITE_ONCE(sk->sk_busy_poll_budget, val);
+ }
+ break;
#endif
case SO_MAX_PACING_RATE:
@@ -1523,6 +1539,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
+ case SO_PREFER_BUSY_POLL:
+ v.val = READ_ONCE(sk->sk_prefer_busy_poll);
+ break;
#endif
case SO_MAX_PACING_RATE:
@@ -2486,7 +2505,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
}
EXPORT_SYMBOL(sk_page_frag_refill);
-static void __lock_sock(struct sock *sk)
+void __lock_sock(struct sock *sk)
__releases(&sk->sk_lock.slock)
__acquires(&sk->sk_lock.slock)
{
@@ -2808,14 +2827,8 @@ EXPORT_SYMBOL(sock_no_mmap);
void __receive_sock(struct file *file)
{
struct socket *sock;
- int error;
- /*
- * The resulting value of "error" is ignored here since we only
- * need to take action when the file is a socket and testing
- * "sock" for NULL is sufficient.
- */
- sock = sock_from_file(file, &error);
+ sock = sock_from_file(file);
if (sock) {
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sock_update_classid(&sock->sk->sk_cgrp_data);
@@ -3078,7 +3091,7 @@ EXPORT_SYMBOL(release_sock);
*
* sk_lock.slock unlocked, owned = 1, BH enabled
*/
-bool lock_sock_fast(struct sock *sk)
+bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
@@ -3096,6 +3109,7 @@ bool lock_sock_fast(struct sock *sk)
* The sk_lock has mutex_lock() semantics here:
*/
mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+ __acquire(&sk->sk_lock.slock);
local_bh_enable();
return true;
}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index ddc899e83313..64b5ec14ff50 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -27,8 +27,6 @@ struct bpf_stab {
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
- u64 cost;
- int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -39,29 +37,22 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
- stab = kzalloc(sizeof(*stab), GFP_USER);
+ stab = kzalloc(sizeof(*stab), GFP_USER | __GFP_ACCOUNT);
if (!stab)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&stab->map, attr);
raw_spin_lock_init(&stab->lock);
- /* Make sure page count doesn't overflow. */
- cost = (u64) stab->map.max_entries * sizeof(struct sock *);
- err = bpf_map_charge_init(&stab->map.memory, cost);
- if (err)
- goto free_stab;
-
stab->sks = bpf_map_area_alloc(stab->map.max_entries *
sizeof(struct sock *),
stab->map.numa_node);
- if (stab->sks)
- return &stab->map;
- err = -ENOMEM;
- bpf_map_charge_finish(&stab->map.memory);
-free_stab:
- kfree(stab);
- return ERR_PTR(err);
+ if (!stab->sks) {
+ kfree(stab);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return &stab->map;
}
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
@@ -975,8 +966,9 @@ static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
}
}
- new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
- htab->map.numa_node);
+ new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
+ GFP_ATOMIC | __GFP_NOWARN,
+ htab->map.numa_node);
if (!new) {
atomic_dec(&htab->count);
return ERR_PTR(-ENOMEM);
@@ -1103,7 +1095,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
{
struct bpf_shtab *htab;
int i, err;
- u64 cost;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -1116,7 +1107,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
if (attr->key_size > MAX_BPF_STACK)
return ERR_PTR(-E2BIG);
- htab = kzalloc(sizeof(*htab), GFP_USER);
+ htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT);
if (!htab)
return ERR_PTR(-ENOMEM);
@@ -1131,21 +1122,10 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
goto free_htab;
}
- cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
- (u64) htab->elem_size * htab->map.max_entries;
- if (cost >= U32_MAX - PAGE_SIZE) {
- err = -EINVAL;
- goto free_htab;
- }
- err = bpf_map_charge_init(&htab->map.memory, cost);
- if (err)
- goto free_htab;
-
htab->buckets = bpf_map_area_alloc(htab->buckets_num *
sizeof(struct bpf_shtab_bucket),
htab->map.numa_node);
if (!htab->buckets) {
- bpf_map_charge_finish(&htab->map.memory);
err = -ENOMEM;
goto free_htab;
}
diff --git a/net/core/xdp.c b/net/core/xdp.c
index d900cebc0acd..3a8c9ab4ecbe 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -158,7 +158,7 @@ static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
/* Returns 0 on success, negative on failure */
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
- struct net_device *dev, u32 queue_index)
+ struct net_device *dev, u32 queue_index, unsigned int napi_id)
{
if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
WARN(1, "Driver promised not to register this");
@@ -179,6 +179,7 @@ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
xdp_rxq_info_init(xdp_rxq);
xdp_rxq->dev = dev;
xdp_rxq->queue_index = queue_index;
+ xdp_rxq->napi_id = napi_id;
xdp_rxq->reg_state = REG_STATE_REGISTERED;
return 0;
@@ -383,6 +384,60 @@ void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
+/* XDP bulk APIs introduce a defer/flush mechanism to return
+ * pages belonging to the same xdp_mem_allocator object
+ * (identified via the mem.id field) in bulk to optimize
+ * I-cache and D-cache.
+ * The bulk queue size is set to 16 to be aligned to how
+ * XDP_REDIRECT bulking works. The bulk is flushed when
+ * it is full or when mem.id changes.
+ * xdp_frame_bulk is usually stored/allocated on the function
+ * call-stack to avoid locking penalties.
+ */
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
+{
+ struct xdp_mem_allocator *xa = bq->xa;
+
+ if (unlikely(!xa || !bq->count))
+ return;
+
+ page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
+ /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
+ bq->count = 0;
+}
+EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
+
+/* Must be called with rcu_read_lock held */
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq)
+{
+ struct xdp_mem_info *mem = &xdpf->mem;
+ struct xdp_mem_allocator *xa;
+
+ if (mem->type != MEM_TYPE_PAGE_POOL) {
+ __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
+ return;
+ }
+
+ xa = bq->xa;
+ if (unlikely(!xa)) {
+ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+ bq->count = 0;
+ bq->xa = xa;
+ }
+
+ if (bq->count == XDP_BULK_QUEUE_SIZE)
+ xdp_flush_frame_bulk(bq);
+
+ if (unlikely(mem->id != xa->mem.id)) {
+ xdp_flush_frame_bulk(bq);
+ bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+ }
+
+ bq->q[bq->count++] = xdpf->data;
+}
+EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
+
void xdp_return_buff(struct xdp_buff *xdp)
{
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 16014ad19406..084e159a12ba 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1827,6 +1827,8 @@ static int dcb_app_add(const struct dcb_app *app, int ifindex)
/**
* dcb_getapp - retrieve the DCBX application user priority
+ * @dev: network interface
+ * @app: application to get user priority of
*
* On success returns a non-zero 802.1p user priority bitmap
* otherwise returns 0 as the invalid user priority bitmap to
@@ -1849,6 +1851,8 @@ EXPORT_SYMBOL(dcb_getapp);
/**
* dcb_setapp - add CEE dcb application data to app list
+ * @dev: network interface
+ * @new: application data to add
*
* Priority 0 is an invalid priority in CEE spec. This routine
* removes applications from the app list if the priority is
@@ -1890,6 +1894,8 @@ EXPORT_SYMBOL(dcb_setapp);
/**
* dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
+ * @dev: network interface
+ * @app: where to store the retrieve application data
*
* Helper routine which on success returns a non-zero 802.1Qaz user
* priority bitmap otherwise returns 0 to indicate the dcb_app was
@@ -1912,6 +1918,8 @@ EXPORT_SYMBOL(dcb_ieee_getapp_mask);
/**
* dcb_ieee_setapp - add IEEE dcb application data to app list
+ * @dev: network interface
+ * @new: application data to add
*
* This adds Application data to the list. Multiple application
* entries may exists for the same selector and protocol as long
@@ -1946,6 +1954,8 @@ EXPORT_SYMBOL(dcb_ieee_setapp);
/**
* dcb_ieee_delapp - delete IEEE dcb application data from list
+ * @dev: network interface
+ * @del: application data to delete
*
* This removes a matching APP data from the APP list
*/
@@ -1975,7 +1985,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
}
EXPORT_SYMBOL(dcb_ieee_delapp);
-/**
+/*
* dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
* priorities to the DSCP values assigned to that priority. Initialize p_map
* such that each map element holds a bit mask of DSCP values configured for
@@ -2004,7 +2014,7 @@ void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
}
EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
-/**
+/*
* dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
* DSCP values to the priorities assigned to that DSCP value. Initialize p_map
* such that each map element holds a bit mask of priorities configured for a
@@ -2031,7 +2041,7 @@ dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
}
EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
-/**
+/*
* Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
* type, with valid PID values >= 1536. A special meaning is then assigned to
* protocol value of 0: "default priority. For use when priority is not
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 8f3dd3b1d2d0..c4bbac99740d 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -242,6 +242,8 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
/**
* dccp_ackvec_input - Register incoming packet in the buffer
+ * @av: Ack Vector to register packet to
+ * @skb: Packet to register
*/
void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
{
@@ -273,6 +275,9 @@ void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
/**
* dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
+ * @av: Ack Vector record to clean
+ * @ackno: last Ack Vector which has been acknowledged
+ *
* This routine is called when the peer acknowledges the receipt of Ack Vectors
* up to and including @ackno. While based on section A.3 of RFC 4340, here
* are additional precautions to prevent corrupted buffer state. In particular,
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 1e9bb121ba72..6beac5d348e2 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -76,7 +76,7 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
return err;
}
-static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
+static __printf(3, 4) struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
{
struct kmem_cache *slab;
va_list args;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 3da1f77bd039..4d9823d6dced 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -181,6 +181,9 @@ MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
/**
* ccid2_update_used_window - Track how much of cwnd is actually used
+ * @hc: socket to update window
+ * @new_wnd: new window values to add into the filter
+ *
* This is done in addition to CWV. The sender needs to have an idea of how many
* packets may be in flight, to set the local Sequence Window value accordingly
* (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the
@@ -349,6 +352,8 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
/**
* ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
+ * @sk: socket to perform estimator on
+ *
* This code is almost identical with TCP's tcp_rtt_estimator(), since
* - it has a higher sampling frequency (recommended by RFC 1323),
* - the RTO does not collapse into RTT due to RTTVAR going towards zero,
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index b9ee1a4a8955..ca8670f78ac6 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -79,6 +79,8 @@ static inline u64 rfc3390_initial_rate(struct sock *sk)
/**
* ccid3_update_send_interval - Calculate new t_ipi = s / X_inst
+ * @hc: socket to have the send interval updated
+ *
* This respects the granularity of X_inst (64 * bytes/second).
*/
static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
@@ -99,6 +101,7 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
/**
* ccid3_hc_tx_update_x - Update allowed sending rate X
+ * @sk: socket to be updated
* @stamp: most recent time if available - can be left NULL.
*
* This function tracks draft rfc3448bis, check there for latest details.
@@ -151,6 +154,7 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
/**
* ccid3_hc_tx_update_s - Track the mean packet size `s'
+ * @hc: socket to be updated
* @len: DCCP packet payload size in bytes
*
* cf. RFC 4342, 5.3 and RFC 3448, 4.1
@@ -259,6 +263,7 @@ out:
/**
* ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
+ * @sk: socket to send packet from
* @skb: next packet candidate to send on @sk
*
* This function uses the convention of ccid_packet_dequeue_eval() and
@@ -655,6 +660,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
/**
* ccid3_first_li - Implements [RFC 5348, 6.3.1]
+ * @sk: socket to calculate loss interval for
*
* Determine the length of the first loss interval via inverse lookup.
* Assume that X_recv can be computed by the throughput equation
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 67abad695e66..da95319842bb 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -79,6 +79,9 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
/**
* tfrc_lh_update_i_mean - Update the `open' loss interval I_0
+ * @lh: histogram to update
+ * @skb: received socket triggering loss interval update
+ *
* For recomputing p: returns `true' if p > p_prev <=> 1/p < 1/p_prev
*/
u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index af08e2df7108..0cdda3c66fb5 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -385,6 +385,9 @@ static inline struct tfrc_rx_hist_entry *
/**
* tfrc_rx_hist_sample_rtt - Sample RTT from timestamp / CCVal
+ * @h: receive histogram
+ * @skb: packet containing timestamp.
+ *
* Based on ideas presented in RFC 4342, 8.1. Returns 0 if it was not able
* to compute a sample with given data - calling function should check this.
*/
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 788dd629c420..305f56804832 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -996,6 +996,8 @@ int dccp_feat_finalise_settings(struct dccp_sock *dp)
/**
* dccp_feat_server_ccid_dependencies - Resolve CCID-dependent features
+ * @dreq: server socket to resolve
+ *
* It is the server which resolves the dependencies once the CCID has been
* fully negotiated. If no CCID has been negotiated, it uses the default CCID.
*/
@@ -1033,6 +1035,10 @@ static int dccp_feat_preflist_match(u8 *servlist, u8 slen, u8 *clilist, u8 clen)
/**
* dccp_feat_prefer - Move preferred entry to the start of array
+ * @preferred_value: entry to move to start of array
+ * @array: array of preferred entries
+ * @array_len: size of the array
+ *
* Reorder the @array_len elements in @array so that @preferred_value comes
* first. Returns >0 to indicate that @preferred_value does occur in @array.
*/
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 50e6d5699bb2..b8a24734385e 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -143,6 +143,8 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
/**
* dccp_determine_ccmps - Find out about CCID-specific packet-size limits
+ * @dp: socket to find packet size limits of
+ *
* We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
* since the RX CCID is restricted to feedback packets (Acks), which are small
* in comparison with the data traffic. A value of 0 means "no current CCMPS".
@@ -236,6 +238,8 @@ static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
/**
* dccp_xmit_packet - Send data packet under control of CCID
+ * @sk: socket to send data packet on
+ *
* Transmits next-queued payload and informs CCID to account for the packet.
*/
static void dccp_xmit_packet(struct sock *sk)
@@ -296,6 +300,9 @@ static void dccp_xmit_packet(struct sock *sk)
/**
* dccp_flush_write_queue - Drain queue at end of connection
+ * @sk: socket to be drained
+ * @time_budget: time allowed to drain the queue
+ *
* Since dccp_sendmsg queues packets without waiting for them to be sent, it may
* happen that the TX queue is not empty at the end of a connection. We give the
* HC-sender CCID a grace period of up to @time_budget jiffies. If this function
@@ -367,6 +374,8 @@ void dccp_write_xmit(struct sock *sk)
/**
* dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
+ * @sk: socket to perform retransmit on
+ *
* There are only four retransmittable packet types in DCCP:
* - Request in client-REQUEST state (sec. 8.1.1),
* - CloseReq in server-CLOSEREQ state (sec. 8.3),
diff --git a/net/dccp/qpolicy.c b/net/dccp/qpolicy.c
index db2448c33a62..5ba204ec0aca 100644
--- a/net/dccp/qpolicy.c
+++ b/net/dccp/qpolicy.c
@@ -65,14 +65,16 @@ static bool qpolicy_prio_full(struct sock *sk)
* @push: add a new @skb to the write queue
* @full: indicates that no more packets will be admitted
* @top: peeks at whatever the queueing policy defines as its `top'
+ * @params: parameter passed to policy operation
*/
-static struct dccp_qpolicy_operations {
+struct dccp_qpolicy_operations {
void (*push) (struct sock *sk, struct sk_buff *skb);
bool (*full) (struct sock *sk);
struct sk_buff* (*top) (struct sock *sk);
__be32 params;
+};
-} qpol_table[DCCPQ_POLICY_MAX] = {
+static struct dccp_qpolicy_operations qpol_table[DCCPQ_POLICY_MAX] = {
[DCCPQ_POLICY_SIMPLE] = {
.push = qpolicy_simple_push,
.full = qpolicy_simple_full,
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index a934d2932373..db768f223ef7 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -215,13 +215,14 @@ out:
/**
* dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface
- * @data: Socket to act on
+ * @t: pointer to the tasklet associated with this handler
*
* See the comments above %ccid_dequeueing_decision for supported modes.
*/
-static void dccp_write_xmitlet(unsigned long data)
+static void dccp_write_xmitlet(struct tasklet_struct *t)
{
- struct sock *sk = (struct sock *)data;
+ struct dccp_sock *dp = from_tasklet(dp, t, dccps_xmitlet);
+ struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
@@ -235,16 +236,15 @@ static void dccp_write_xmitlet(unsigned long data)
static void dccp_write_xmit_timer(struct timer_list *t)
{
struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer);
- struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
- dccp_write_xmitlet((unsigned long)sk);
+ dccp_write_xmitlet(&dp->dccps_xmitlet);
}
void dccp_init_xmit_timers(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
- tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
+ tasklet_setup(&dp->dccps_xmitlet, dccp_write_xmitlet);
timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0);
inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
&dccp_keepalive_timer);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 15d42353f1a3..d1c50a48614b 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -658,7 +658,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
ifa->ifa_dev = dn_db;
if (tb[IFA_LABEL])
- nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
+ nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 1f9b9b11008c..dfecd7b22fd7 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -56,20 +56,31 @@ config NET_DSA_TAG_BRCM_PREPEND
Broadcom switches which places the tag before the Ethernet header
(prepended).
+config NET_DSA_TAG_HELLCREEK
+ tristate "Tag driver for Hirschmann Hellcreek TSN switches"
+ help
+ Say Y or M if you want to enable support for tagging frames
+ for the Hirschmann Hellcreek TSN switches.
+
config NET_DSA_TAG_GSWIP
tristate "Tag driver for Lantiq / Intel GSWIP switches"
help
Say Y or M if you want to enable support for tagging frames for the
Lantiq / Intel GSWIP switches.
+config NET_DSA_TAG_DSA_COMMON
+ tristate
+
config NET_DSA_TAG_DSA
tristate "Tag driver for Marvell switches using DSA headers"
+ select NET_DSA_TAG_DSA_COMMON
help
Say Y or M if you want to enable support for tagging frames for the
Marvell switches which use DSA headers.
config NET_DSA_TAG_EDSA
tristate "Tag driver for Marvell switches using EtherType DSA headers"
+ select NET_DSA_TAG_DSA_COMMON
help
Say Y or M if you want to enable support for tagging frames for the
Marvell switches which use EtherType DSA headers.
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 4f47b2025ff5..0fb2b75a7ae3 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -7,9 +7,9 @@ dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o
obj-$(CONFIG_NET_DSA_TAG_8021Q) += tag_8021q.o
obj-$(CONFIG_NET_DSA_TAG_AR9331) += tag_ar9331.o
obj-$(CONFIG_NET_DSA_TAG_BRCM_COMMON) += tag_brcm.o
-obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
-obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+obj-$(CONFIG_NET_DSA_TAG_DSA_COMMON) += tag_dsa.o
obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
+obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 2131bf2b3a67..a1b1dc8a4d87 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -201,7 +201,6 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
- struct pcpu_sw_netstats *s;
struct dsa_slave_priv *p;
if (unlikely(!cpu_dp)) {
@@ -234,11 +233,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
skb = nskb;
}
- s = this_cpu_ptr(p->stats64);
- u64_stats_update_begin(&s->syncp);
- s->rx_packets++;
- s->rx_bytes += skb->len;
- u64_stats_update_end(&s->syncp);
+ dev_sw_netstats_rx_add(skb->dev, skb->len);
if (dsa_skb_defer_rx_timestamp(p, skb))
return 0;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 12998bf04e55..7c96aae9062c 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -78,8 +78,6 @@ struct dsa_slave_priv {
struct sk_buff * (*xmit)(struct sk_buff *skb,
struct net_device *dev);
- struct pcpu_sw_netstats __percpu *stats64;
-
struct gro_cells gcells;
/* DSA port data, such as switch, port index, etc. */
diff --git a/net/dsa/master.c b/net/dsa/master.c
index c91de041a91d..5a0f6fec4271 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -308,14 +308,15 @@ static struct lock_class_key dsa_master_addr_list_lock_key;
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
+ int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
int ret;
rtnl_lock();
- ret = dev_set_mtu(dev, ETH_DATA_LEN + cpu_dp->tag_ops->overhead);
+ ret = dev_set_mtu(dev, mtu);
rtnl_unlock();
if (ret)
- netdev_warn(dev, "error %d setting MTU to include DSA overhead\n",
- ret);
+ netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
+ ret, mtu);
/* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 3bc5ca40c9fb..4a0498bf6c65 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -522,10 +522,10 @@ static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
if (!clone)
return;
- DSA_SKB_CB(skb)->clone = clone;
-
- if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
+ if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) {
+ DSA_SKB_CB(skb)->clone = clone;
return;
+ }
kfree_skb(clone);
}
@@ -548,17 +548,36 @@ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
}
EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
+static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
+{
+ int needed_headroom = dev->needed_headroom;
+ int needed_tailroom = dev->needed_tailroom;
+
+ /* For tail taggers, we need to pad short frames ourselves, to ensure
+ * that the tail tag does not fail at its role of being at the end of
+ * the packet, once the master interface pads the frame. Account for
+ * that pad length here, and pad later.
+ */
+ if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
+ needed_tailroom += ETH_ZLEN - skb->len;
+ /* skb_headroom() returns unsigned int... */
+ needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
+
+ if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
+ /* No reallocation needed, yay! */
+ return 0;
+
+ return pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+}
+
static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct pcpu_sw_netstats *s;
struct sk_buff *nskb;
- s = this_cpu_ptr(p->stats64);
- u64_stats_update_begin(&s->syncp);
- s->tx_packets++;
- s->tx_bytes += skb->len;
- u64_stats_update_end(&s->syncp);
+ dev_sw_netstats_tx_add(dev, 1, skb->len);
DSA_SKB_CB(skb)->clone = NULL;
@@ -567,6 +586,17 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
*/
dsa_skb_tx_timestamp(p, skb);
+ if (dsa_realloc_skb(skb, dev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* needed_tailroom should still be 'warm' in the cache line from
+ * dsa_realloc_skb(), which has also ensured that padding is safe.
+ */
+ if (dev->needed_tailroom)
+ eth_skb_pad(skb);
+
/* Transmit function may have to reallocate the original SKB,
* in which case it must have freed it. Only free it here on error.
*/
@@ -679,7 +709,6 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
uint64_t *data)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = dp->ds;
struct pcpu_sw_netstats *s;
unsigned int start;
@@ -688,7 +717,7 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
for_each_possible_cpu(i) {
u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
- s = per_cpu_ptr(p->stats64, i);
+ s = per_cpu_ptr(dev->tstats, i);
do {
start = u64_stats_fetch_begin_irq(&s->syncp);
tx_packets = s->tx_packets;
@@ -1217,15 +1246,6 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
}
-static void dsa_slave_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
-
- netdev_stats_to_stats64(stats, &dev->stats);
- dev_fetch_sw_netstats(stats, p->stats64);
-}
-
static int dsa_slave_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
@@ -1601,7 +1621,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
#endif
.ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
.ndo_setup_tc = dsa_slave_setup_tc,
- .ndo_get_stats64 = dsa_slave_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
@@ -1791,6 +1811,16 @@ int dsa_slave_create(struct dsa_port *port)
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
if (ds->ops->port_max_mtu)
slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
+ if (cpu_dp->tag_ops->tail_tag)
+ slave_dev->needed_tailroom = cpu_dp->tag_ops->overhead;
+ else
+ slave_dev->needed_headroom = cpu_dp->tag_ops->overhead;
+ /* Try to save one extra realloc later in the TX path (in the master)
+ * by also inheriting the master's needed headroom and tailroom.
+ * The 8021q driver also does this.
+ */
+ slave_dev->needed_headroom += master->needed_headroom;
+ slave_dev->needed_tailroom += master->needed_tailroom;
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
@@ -1801,8 +1831,8 @@ int dsa_slave_create(struct dsa_port *port)
slave_dev->vlan_features = master->vlan_features;
p = netdev_priv(slave_dev);
- p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!p->stats64) {
+ slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!slave_dev->tstats) {
free_netdev(slave_dev);
return -ENOMEM;
}
@@ -1820,8 +1850,8 @@ int dsa_slave_create(struct dsa_port *port)
ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
rtnl_unlock();
if (ret && ret != -EOPNOTSUPP)
- dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n",
- ret, port->index);
+ dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
+ ret, ETH_DATA_LEN, port->index);
netif_carrier_off(slave_dev);
@@ -1864,7 +1894,7 @@ out_phy:
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
- free_percpu(p->stats64);
+ free_percpu(slave_dev->tstats);
free_netdev(slave_dev);
port->slave = NULL;
return ret;
@@ -1886,7 +1916,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
phylink_destroy(dp->pl);
gro_cells_destroy(&p->gcells);
- free_percpu(p->stats64);
+ free_percpu(slave_dev->tstats);
free_netdev(slave_dev);
}
@@ -1987,10 +2017,22 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
switch (event) {
case NETDEV_PRECHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
+ struct dsa_switch *ds;
+ struct dsa_port *dp;
+ int err;
if (!dsa_slave_dev_check(dev))
return dsa_prevent_bridging_8021q_upper(dev, ptr);
+ dp = dsa_slave_to_port(dev);
+ ds = dp->ds;
+
+ if (ds->ops->port_prechangeupper) {
+ err = ds->ops->port_prechangeupper(ds, dp->index, info);
+ if (err)
+ return notifier_from_errno(err);
+ }
+
if (is_vlan_dev(info->upper_dev))
return dsa_slave_check_8021q_upper(dev, ptr);
break;
diff --git a/net/dsa/tag_ar9331.c b/net/dsa/tag_ar9331.c
index 55b00694cdba..002cf7f952e2 100644
--- a/net/dsa/tag_ar9331.c
+++ b/net/dsa/tag_ar9331.c
@@ -31,9 +31,6 @@ static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb,
__le16 *phdr;
u16 hdr;
- if (skb_cow_head(skb, AR9331_HDR_LEN) < 0)
- return NULL;
-
phdr = skb_push(skb, AR9331_HDR_LEN);
hdr = FIELD_PREP(AR9331_HDR_VERSION_MASK, AR9331_HDR_VERSION);
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index ad72dff8d524..e934dace3922 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -66,9 +66,6 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
u16 queue = skb_get_queue_mapping(skb);
u8 *brcm_tag;
- if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
- return NULL;
-
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 0b756fae68a5..112c7c6dd568 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -1,7 +1,48 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * net/dsa/tag_dsa.c - (Non-ethertype) DSA tagging
+ * Regular and Ethertype DSA tagging
* Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * Regular DSA
+ * -----------
+
+ * For untagged (in 802.1Q terms) packets, the switch will splice in
+ * the tag between the SA and the ethertype of the original
+ * packet. Tagged frames will instead have their outermost .1Q tag
+ * converted to a DSA tag. It expects the same layout when receiving
+ * packets from the CPU.
+ *
+ * Example:
+ *
+ * .----.----.----.---------
+ * Pu: | DA | SA | ET | Payload ...
+ * '----'----'----'---------
+ * 6 6 2 N
+ * .----.----.--------.-----.----.---------
+ * Pt: | DA | SA | 0x8100 | TCI | ET | Payload ...
+ * '----'----'--------'-----'----'---------
+ * 6 6 2 2 2 N
+ * .----.----.-----.----.---------
+ * Pd: | DA | SA | DSA | ET | Payload ...
+ * '----'----'-----'----'---------
+ * 6 6 4 2 N
+ *
+ * No matter if a packet is received untagged (Pu) or tagged (Pt),
+ * they will both have the same layout (Pd) when they are sent to the
+ * CPU. This is done by ignoring 802.3, replacing the ethertype field
+ * with more metadata, among which is a bit to signal if the original
+ * packet was tagged or not.
+ *
+ * Ethertype DSA
+ * -------------
+ * Uses the exact same tag format as regular DSA, but also includes a
+ * proper ethertype field (which the mv88e6xxx driver sets to
+ * ETH_P_EDSA/0xdada) followed by two zero bytes:
+ *
+ * .----.----.--------.--------.-----.----.---------
+ * | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ...
+ * '----'----'--------'--------'-----'----'---------
+ * 6 6 2 2 4 2 N
*/
#include <linux/etherdevice.h>
@@ -12,46 +53,104 @@
#define DSA_HLEN 4
-static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+/**
+ * enum dsa_cmd - DSA Command
+ * @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to
+ * the CPU port. This is needed to implement control protocols,
+ * e.g. STP and LLDP, that must not allow those control packets to
+ * be switched according to the normal rules.
+ * @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific
+ * port, ignoring all the barriers that the switch normally
+ * enforces (VLANs, STP port states etc.). No source address
+ * learning takes place. "sudo send packet"
+ * @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some
+ * user configured ingress or egress monitor criteria. These are
+ * forwarded by the switch tree to the user configured ingress or
+ * egress monitor port, which can be set to the CPU port or a
+ * regular port. If the destination is a regular port, the tag
+ * will be removed before egressing the port. If the destination
+ * is the CPU port, the tag will not be removed.
+ * @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing
+ * through the switch tree, including the flows that are directed
+ * towards the CPU. Its device/port tuple encodes the original
+ * source port on which the packet ingressed. It can also be used
+ * on transmit by the CPU to defer the forwarding decision to the
+ * hardware, based on the current config of PVT/VTU/ATU
+ * etc. Source address learning takes places if enabled on the
+ * receiving DSA/CPU port.
+ */
+enum dsa_cmd {
+ DSA_CMD_TO_CPU = 0,
+ DSA_CMD_FROM_CPU = 1,
+ DSA_CMD_TO_SNIFFER = 2,
+ DSA_CMD_FORWARD = 3
+};
+
+/**
+ * enum dsa_code - TO_CPU Code
+ *
+ * @DSA_CODE_MGMT_TRAP: DA was classified as a management
+ * address. Typical examples include STP BPDUs and LLDP.
+ * @DSA_CODE_FRAME2REG: Response to a "remote management" request.
+ * @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling.
+ * @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on
+ * the device. Typical examples are matching on DA/SA/VID and DHCP
+ * snooping.
+ * @DSA_CODE_ARP_MIRROR: The name says it all really.
+ * @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the
+ * particular policy was set to trigger a mirror instead of a
+ * trap.
+ * @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X.
+ * @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X.
+ *
+ * A 3-bit code is used to relay why a particular frame was sent to
+ * the CPU. We only use this to determine if the packet was mirrored
+ * or trapped, i.e. whether the packet has been forwarded by hardware
+ * or not.
+ *
+ * This is the superset of all possible codes. Any particular device
+ * may only implement a subset.
+ */
+enum dsa_code {
+ DSA_CODE_MGMT_TRAP = 0,
+ DSA_CODE_FRAME2REG = 1,
+ DSA_CODE_IGMP_MLD_TRAP = 2,
+ DSA_CODE_POLICY_TRAP = 3,
+ DSA_CODE_ARP_MIRROR = 4,
+ DSA_CODE_POLICY_MIRROR = 5,
+ DSA_CODE_RESERVED_6 = 6,
+ DSA_CODE_RESERVED_7 = 7
+};
+
+static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
+ u8 extra)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *dsa_header;
- /*
- * Convert the outermost 802.1q tag to a DSA tag for tagged
- * packets, or insert a DSA tag between the addresses and
- * the ethertype field for untagged packets.
- */
if (skb->protocol == htons(ETH_P_8021Q)) {
- if (skb_cow_head(skb, 0) < 0)
- return NULL;
+ if (extra) {
+ skb_push(skb, extra);
+ memmove(skb->data, skb->data + extra, 2 * ETH_ALEN);
+ }
- /*
- * Construct tagged FROM_CPU DSA tag from 802.1q tag.
- */
- dsa_header = skb->data + 2 * ETH_ALEN;
- dsa_header[0] = 0x60 | dp->ds->index;
+ /* Construct tagged FROM_CPU DSA tag from 802.1Q tag. */
+ dsa_header = skb->data + 2 * ETH_ALEN + extra;
+ dsa_header[0] = (DSA_CMD_FROM_CPU << 6) | 0x20 | dp->ds->index;
dsa_header[1] = dp->index << 3;
- /*
- * Move CFI field from byte 2 to byte 1.
- */
+ /* Move CFI field from byte 2 to byte 1. */
if (dsa_header[2] & 0x10) {
dsa_header[1] |= 0x01;
dsa_header[2] &= ~0x10;
}
} else {
- if (skb_cow_head(skb, DSA_HLEN) < 0)
- return NULL;
- skb_push(skb, DSA_HLEN);
+ skb_push(skb, DSA_HLEN + extra);
+ memmove(skb->data, skb->data + DSA_HLEN + extra, 2 * ETH_ALEN);
- memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
-
- /*
- * Construct untagged FROM_CPU DSA tag.
- */
- dsa_header = skb->data + 2 * ETH_ALEN;
- dsa_header[0] = 0x40 | dp->ds->index;
+ /* Construct untagged FROM_CPU DSA tag. */
+ dsa_header = skb->data + 2 * ETH_ALEN + extra;
+ dsa_header[0] = (DSA_CMD_FROM_CPU << 6) | dp->ds->index;
dsa_header[1] = dp->index << 3;
dsa_header[2] = 0x00;
dsa_header[3] = 0x00;
@@ -60,30 +159,60 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
return skb;
}
-static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt)
+static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
+ u8 extra)
{
+ int source_device, source_port;
+ enum dsa_code code;
+ enum dsa_cmd cmd;
u8 *dsa_header;
- int source_device;
- int source_port;
- if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
- return NULL;
-
- /*
- * The ethertype field is part of the DSA header.
- */
+ /* The ethertype field is part of the DSA header. */
dsa_header = skb->data - 2;
- /*
- * Check that frame type is either TO_CPU or FORWARD.
- */
- if ((dsa_header[0] & 0xc0) != 0x00 && (dsa_header[0] & 0xc0) != 0xc0)
+ cmd = dsa_header[0] >> 6;
+ switch (cmd) {
+ case DSA_CMD_FORWARD:
+ skb->offload_fwd_mark = 1;
+ break;
+
+ case DSA_CMD_TO_CPU:
+ code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
+
+ switch (code) {
+ case DSA_CODE_FRAME2REG:
+ /* Remote management is not implemented yet,
+ * drop.
+ */
+ return NULL;
+ case DSA_CODE_ARP_MIRROR:
+ case DSA_CODE_POLICY_MIRROR:
+ /* Mark mirrored packets to notify any upper
+ * device (like a bridge) that forwarding has
+ * already been done by hardware.
+ */
+ skb->offload_fwd_mark = 1;
+ break;
+ case DSA_CODE_MGMT_TRAP:
+ case DSA_CODE_IGMP_MLD_TRAP:
+ case DSA_CODE_POLICY_TRAP:
+ /* Traps have, by definition, not been
+ * forwarded by hardware, so don't mark them.
+ */
+ break;
+ default:
+ /* Reserved code, this could be anything. Drop
+ * seems like the safest option.
+ */
+ return NULL;
+ }
+
+ break;
+
+ default:
return NULL;
+ }
- /*
- * Determine source device and port.
- */
source_device = dsa_header[0] & 0x1f;
source_port = (dsa_header[1] >> 3) & 0x1f;
@@ -91,16 +220,15 @@ static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb->dev)
return NULL;
- /*
- * Convert the DSA header to an 802.1q header if the 'tagged'
- * bit in the DSA header is set. If the 'tagged' bit is clear,
- * delete the DSA header entirely.
+ /* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
+ * tag, and delete the ethertype (extra) if applicable. If the
+ * 'tagged' bit is cleared; delete the DSA tag, and ethertype
+ * if applicable.
*/
if (dsa_header[0] & 0x20) {
u8 new_header[4];
- /*
- * Insert 802.1q ethertype and copy the VLAN-related
+ /* Insert 802.1Q ethertype and copy the VLAN-related
* fields, but clear the bit that will hold CFI (since
* DSA uses that bit location for another purpose).
*/
@@ -109,16 +237,13 @@ static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
new_header[2] = dsa_header[2] & ~0x10;
new_header[3] = dsa_header[3];
- /*
- * Move CFI bit from its place in the DSA header to
- * its 802.1q-designated place.
+ /* Move CFI bit from its place in the DSA header to
+ * its 802.1Q-designated place.
*/
if (dsa_header[1] & 0x01)
new_header[2] |= 0x10;
- /*
- * Update packet checksum if skb is CHECKSUM_COMPLETE.
- */
+ /* Update packet checksum if skb is CHECKSUM_COMPLETE. */
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__wsum c = skb->csum;
c = csum_add(c, csum_partial(new_header + 2, 2, 0));
@@ -127,30 +252,101 @@ static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
}
memcpy(dsa_header, new_header, DSA_HLEN);
+
+ if (extra)
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - extra,
+ 2 * ETH_ALEN);
} else {
- /*
- * Remove DSA tag and update checksum.
- */
skb_pull_rcsum(skb, DSA_HLEN);
memmove(skb->data - ETH_HLEN,
- skb->data - ETH_HLEN - DSA_HLEN,
+ skb->data - ETH_HLEN - DSA_HLEN - extra,
2 * ETH_ALEN);
}
- skb->offload_fwd_mark = 1;
-
return skb;
}
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
+
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ return dsa_xmit_ll(skb, dev, 0);
+}
+
+static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
+ return NULL;
+
+ return dsa_rcv_ll(skb, dev, 0);
+}
+
static const struct dsa_device_ops dsa_netdev_ops = {
- .name = "dsa",
- .proto = DSA_TAG_PROTO_DSA,
- .xmit = dsa_xmit,
- .rcv = dsa_rcv,
+ .name = "dsa",
+ .proto = DSA_TAG_PROTO_DSA,
+ .xmit = dsa_xmit,
+ .rcv = dsa_rcv,
.overhead = DSA_HLEN,
};
-MODULE_LICENSE("GPL");
+DSA_TAG_DRIVER(dsa_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA);
+#endif /* CONFIG_NET_DSA_TAG_DSA */
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
+
+#define EDSA_HLEN 8
+
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u8 *edsa_header;
-module_dsa_tag_driver(dsa_netdev_ops);
+ skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
+ if (!skb)
+ return NULL;
+
+ edsa_header = skb->data + 2 * ETH_ALEN;
+ edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
+ edsa_header[1] = ETH_P_EDSA & 0xff;
+ edsa_header[2] = 0x00;
+ edsa_header[3] = 0x00;
+ return skb;
+}
+
+static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
+ return NULL;
+
+ skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
+
+ return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
+}
+
+static const struct dsa_device_ops edsa_netdev_ops = {
+ .name = "edsa",
+ .proto = DSA_TAG_PROTO_EDSA,
+ .xmit = edsa_xmit,
+ .rcv = edsa_rcv,
+ .overhead = EDSA_HLEN,
+};
+
+DSA_TAG_DRIVER(edsa_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
+#endif /* CONFIG_NET_DSA_TAG_EDSA */
+
+static struct dsa_tag_driver *dsa_tag_drivers[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
+ &DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
+ &DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
+#endif
+};
+
+module_dsa_tag_drivers(dsa_tag_drivers);
+
+MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
deleted file mode 100644
index 120614240319..000000000000
--- a/net/dsa/tag_edsa.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * net/dsa/tag_edsa.c - Ethertype DSA tagging
- * Copyright (c) 2008-2009 Marvell Semiconductor
- */
-
-#include <linux/etherdevice.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include "dsa_priv.h"
-
-#define DSA_HLEN 4
-#define EDSA_HLEN 8
-
-#define FRAME_TYPE_TO_CPU 0x00
-#define FRAME_TYPE_FORWARD 0x03
-
-#define TO_CPU_CODE_MGMT_TRAP 0x00
-#define TO_CPU_CODE_FRAME2REG 0x01
-#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02
-#define TO_CPU_CODE_POLICY_TRAP 0x03
-#define TO_CPU_CODE_ARP_MIRROR 0x04
-#define TO_CPU_CODE_POLICY_MIRROR 0x05
-
-static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
- u8 *edsa_header;
-
- /*
- * Convert the outermost 802.1q tag to a DSA tag and prepend
- * a DSA ethertype field is the packet is tagged, or insert
- * a DSA ethertype plus DSA tag between the addresses and the
- * current ethertype field if the packet is untagged.
- */
- if (skb->protocol == htons(ETH_P_8021Q)) {
- if (skb_cow_head(skb, DSA_HLEN) < 0)
- return NULL;
- skb_push(skb, DSA_HLEN);
-
- memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
-
- /*
- * Construct tagged FROM_CPU DSA tag from 802.1q tag.
- */
- edsa_header = skb->data + 2 * ETH_ALEN;
- edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
- edsa_header[1] = ETH_P_EDSA & 0xff;
- edsa_header[2] = 0x00;
- edsa_header[3] = 0x00;
- edsa_header[4] = 0x60 | dp->ds->index;
- edsa_header[5] = dp->index << 3;
-
- /*
- * Move CFI field from byte 6 to byte 5.
- */
- if (edsa_header[6] & 0x10) {
- edsa_header[5] |= 0x01;
- edsa_header[6] &= ~0x10;
- }
- } else {
- if (skb_cow_head(skb, EDSA_HLEN) < 0)
- return NULL;
- skb_push(skb, EDSA_HLEN);
-
- memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN);
-
- /*
- * Construct untagged FROM_CPU DSA tag.
- */
- edsa_header = skb->data + 2 * ETH_ALEN;
- edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
- edsa_header[1] = ETH_P_EDSA & 0xff;
- edsa_header[2] = 0x00;
- edsa_header[3] = 0x00;
- edsa_header[4] = 0x40 | dp->ds->index;
- edsa_header[5] = dp->index << 3;
- edsa_header[6] = 0x00;
- edsa_header[7] = 0x00;
- }
-
- return skb;
-}
-
-static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt)
-{
- u8 *edsa_header;
- int frame_type;
- int code;
- int source_device;
- int source_port;
-
- if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
- return NULL;
-
- /*
- * Skip the two null bytes after the ethertype.
- */
- edsa_header = skb->data + 2;
-
- /*
- * Check that frame type is either TO_CPU or FORWARD.
- */
- frame_type = edsa_header[0] >> 6;
-
- switch (frame_type) {
- case FRAME_TYPE_TO_CPU:
- code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
-
- /*
- * Mark the frame to never egress on any port of the same switch
- * unless it's a trapped IGMP/MLD packet, in which case the
- * bridge might want to forward it.
- */
- if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
- skb->offload_fwd_mark = 1;
-
- break;
-
- case FRAME_TYPE_FORWARD:
- skb->offload_fwd_mark = 1;
- break;
-
- default:
- return NULL;
- }
-
- /*
- * Determine source device and port.
- */
- source_device = edsa_header[0] & 0x1f;
- source_port = (edsa_header[1] >> 3) & 0x1f;
-
- skb->dev = dsa_master_find_slave(dev, source_device, source_port);
- if (!skb->dev)
- return NULL;
-
- /*
- * If the 'tagged' bit is set, convert the DSA tag to a 802.1q
- * tag and delete the ethertype part. If the 'tagged' bit is
- * clear, delete the ethertype and the DSA tag parts.
- */
- if (edsa_header[0] & 0x20) {
- u8 new_header[4];
-
- /*
- * Insert 802.1q ethertype and copy the VLAN-related
- * fields, but clear the bit that will hold CFI (since
- * DSA uses that bit location for another purpose).
- */
- new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
- new_header[1] = ETH_P_8021Q & 0xff;
- new_header[2] = edsa_header[2] & ~0x10;
- new_header[3] = edsa_header[3];
-
- /*
- * Move CFI bit from its place in the DSA header to
- * its 802.1q-designated place.
- */
- if (edsa_header[1] & 0x01)
- new_header[2] |= 0x10;
-
- skb_pull_rcsum(skb, DSA_HLEN);
-
- /*
- * Update packet checksum if skb is CHECKSUM_COMPLETE.
- */
- if (skb->ip_summed == CHECKSUM_COMPLETE) {
- __wsum c = skb->csum;
- c = csum_add(c, csum_partial(new_header + 2, 2, 0));
- c = csum_sub(c, csum_partial(edsa_header + 2, 2, 0));
- skb->csum = c;
- }
-
- memcpy(edsa_header, new_header, DSA_HLEN);
-
- memmove(skb->data - ETH_HLEN,
- skb->data - ETH_HLEN - DSA_HLEN,
- 2 * ETH_ALEN);
- } else {
- /*
- * Remove DSA tag and update checksum.
- */
- skb_pull_rcsum(skb, EDSA_HLEN);
- memmove(skb->data - ETH_HLEN,
- skb->data - ETH_HLEN - EDSA_HLEN,
- 2 * ETH_ALEN);
- }
-
- return skb;
-}
-
-static const struct dsa_device_ops edsa_netdev_ops = {
- .name = "edsa",
- .proto = DSA_TAG_PROTO_EDSA,
- .xmit = edsa_xmit,
- .rcv = edsa_rcv,
- .overhead = EDSA_HLEN,
-};
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
-
-module_dsa_tag_driver(edsa_netdev_ops);
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
index 408d4af390a0..2f5bd5e338ab 100644
--- a/net/dsa/tag_gswip.c
+++ b/net/dsa/tag_gswip.c
@@ -60,13 +60,8 @@ static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- int err;
u8 *gswip_tag;
- err = skb_cow_head(skb, GSWIP_TX_HEADER_LEN);
- if (err)
- return NULL;
-
skb_push(skb, GSWIP_TX_HEADER_LEN);
gswip_tag = skb->data;
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
new file mode 100644
index 000000000000..a09805c8e1ab
--- /dev/null
+++ b/net/dsa/tag_hellcreek.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * net/dsa/tag_hellcreek.c - Hirschmann Hellcreek switch tag format handling
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ *
+ * Based on tag_ksz.c.
+ */
+
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+#include "dsa_priv.h"
+
+#define HELLCREEK_TAG_LEN 1
+
+static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *tag;
+
+ /* Tag encoding */
+ tag = skb_put(skb, HELLCREEK_TAG_LEN);
+ *tag = BIT(dp->index);
+
+ return skb;
+}
+
+static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+{
+ /* Tag decoding */
+ u8 *tag = skb_tail_pointer(skb) - HELLCREEK_TAG_LEN;
+ unsigned int port = tag[0] & 0x03;
+
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev) {
+ netdev_warn(dev, "Failed to get source port: %d\n", port);
+ return NULL;
+ }
+
+ pskb_trim_rcsum(skb, skb->len - HELLCREEK_TAG_LEN);
+
+ skb->offload_fwd_mark = true;
+
+ return skb;
+}
+
+static const struct dsa_device_ops hellcreek_netdev_ops = {
+ .name = "hellcreek",
+ .proto = DSA_TAG_PROTO_HELLCREEK,
+ .xmit = hellcreek_xmit,
+ .rcv = hellcreek_rcv,
+ .overhead = HELLCREEK_TAG_LEN,
+ .tail_tag = true,
+};
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_HELLCREEK);
+
+module_dsa_tag_driver(hellcreek_netdev_ops);
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 0a5aa982c60d..4820dbcedfa2 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -14,46 +14,6 @@
#define KSZ_EGRESS_TAG_LEN 1
#define KSZ_INGRESS_TAG_LEN 1
-static struct sk_buff *ksz_common_xmit(struct sk_buff *skb,
- struct net_device *dev, int len)
-{
- struct sk_buff *nskb;
- int padlen;
-
- padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
-
- if (skb_tailroom(skb) >= padlen + len) {
- /* Let dsa_slave_xmit() free skb */
- if (__skb_put_padto(skb, skb->len + padlen, false))
- return NULL;
-
- nskb = skb;
- } else {
- nskb = alloc_skb(NET_IP_ALIGN + skb->len +
- padlen + len, GFP_ATOMIC);
- if (!nskb)
- return NULL;
- skb_reserve(nskb, NET_IP_ALIGN);
-
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb,
- skb_network_header(skb) - skb->head);
- skb_set_transport_header(nskb,
- skb_transport_header(skb) - skb->head);
- skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
-
- /* Let skb_put_padto() free nskb, and let dsa_slave_xmit() free
- * skb
- */
- if (skb_put_padto(nskb, nskb->len + padlen))
- return NULL;
-
- consume_skb(skb);
- }
-
- return nskb;
-}
-
static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
struct net_device *dev,
unsigned int port, unsigned int len)
@@ -90,23 +50,18 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct sk_buff *nskb;
u8 *tag;
u8 *addr;
- nskb = ksz_common_xmit(skb, dev, KSZ_INGRESS_TAG_LEN);
- if (!nskb)
- return NULL;
-
/* Tag encoding */
- tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
- addr = skb_mac_header(nskb);
+ tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
+ addr = skb_mac_header(skb);
*tag = 1 << dp->index;
if (is_link_local_ether_addr(addr))
*tag |= KSZ8795_TAIL_TAG_OVERRIDE;
- return nskb;
+ return skb;
}
static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -156,18 +111,13 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct sk_buff *nskb;
__be16 *tag;
u8 *addr;
u16 val;
- nskb = ksz_common_xmit(skb, dev, KSZ9477_INGRESS_TAG_LEN);
- if (!nskb)
- return NULL;
-
/* Tag encoding */
- tag = skb_put(nskb, KSZ9477_INGRESS_TAG_LEN);
- addr = skb_mac_header(nskb);
+ tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
+ addr = skb_mac_header(skb);
val = BIT(dp->index);
@@ -176,7 +126,7 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
*tag = cpu_to_be16(val);
- return nskb;
+ return skb;
}
static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -213,24 +163,19 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct sk_buff *nskb;
u8 *addr;
u8 *tag;
- nskb = ksz_common_xmit(skb, dev, KSZ_INGRESS_TAG_LEN);
- if (!nskb)
- return NULL;
-
/* Tag encoding */
- tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
- addr = skb_mac_header(nskb);
+ tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
+ addr = skb_mac_header(skb);
*tag = BIT(dp->index);
if (is_link_local_ether_addr(addr))
*tag |= KSZ9893_TAIL_TAG_OVERRIDE;
- return nskb;
+ return skb;
}
static const struct dsa_device_ops ksz9893_netdev_ops = {
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index ccfb6f641bbf..aa1318dccaf0 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -58,15 +58,6 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
__be16 *lan9303_tag;
u16 tag;
- /* insert a special VLAN tag between the MAC addresses
- * and the current ethertype field.
- */
- if (skb_cow_head(skb, LAN9303_TAG_LEN) < 0) {
- dev_dbg(&dev->dev,
- "Cannot make room for the special tag. Dropping packet\n");
- return NULL;
- }
-
/* provide 'LAN9303_TAG_LEN' bytes additional space */
skb_push(skb, LAN9303_TAG_LEN);
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 4cdd9cf428fb..38dcdded74c0 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -34,9 +34,6 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
* table with VID.
*/
if (!skb_vlan_tagged(skb)) {
- if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
- return NULL;
-
skb_push(skb, MTK_HDR_LEN);
memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
is_vlan_skb = false;
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 3b468aca5c53..16a1afd5b8e1 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -143,13 +143,6 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct ocelot_port *ocelot_port;
u8 *prefix, *injection;
u64 qos_class, rew_op;
- int err;
-
- err = skb_cow_head(skb, OCELOT_TOTAL_TAG_LEN);
- if (unlikely(err < 0)) {
- netdev_err(netdev, "Cannot make room for tag.\n");
- return NULL;
- }
ocelot_port = ocelot->ports[dp->index];
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 1b9e8507112b..88181b52f480 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -34,9 +34,6 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
__be16 *phdr;
u16 hdr;
- if (skb_cow_head(skb, QCA_HDR_LEN) < 0)
- return NULL;
-
skb_push(skb, QCA_HDR_LEN);
memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 3a1cc24a4f0a..5b97ede56a0f 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -13,42 +13,15 @@
static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct sk_buff *nskb;
- int padlen;
u8 *trailer;
- /*
- * We have to make sure that the trailer ends up as the very
- * last 4 bytes of the packet. This means that we have to pad
- * the packet to the minimum ethernet frame size, if necessary,
- * before adding the trailer.
- */
- padlen = 0;
- if (skb->len < 60)
- padlen = 60 - skb->len;
-
- nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
- if (!nskb)
- return NULL;
- skb_reserve(nskb, NET_IP_ALIGN);
-
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
- skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
- skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
- consume_skb(skb);
-
- if (padlen) {
- skb_put_zero(nskb, padlen);
- }
-
- trailer = skb_put(nskb, 4);
+ trailer = skb_put(skb, 4);
trailer[0] = 0x80;
trailer[1] = 1 << dp->index;
trailer[2] = 0x10;
trailer[3] = 0x00;
- return nskb;
+ return skb;
}
static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index dac65180c4ef..4106373180c6 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -272,7 +272,7 @@ void eth_header_cache_update(struct hh_cache *hh,
EXPORT_SYMBOL(eth_header_cache_update);
/**
- * eth_header_parser_protocol - extract protocol from L2 header
+ * eth_header_parse_protocol - extract protocol from L2 header
* @skb: packet to extract protocol from
*/
__be16 eth_header_parse_protocol(const struct sk_buff *skb)
@@ -523,8 +523,8 @@ int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
EXPORT_SYMBOL(eth_platform_get_mac_address);
/**
- * Obtain the MAC address from an nvmem cell named 'mac-address' associated
- * with given device.
+ * nvmem_get_mac_address - Obtain the MAC address from an nvmem cell named
+ * 'mac-address' associated with given device.
*
* @dev: Device with which the mac-address cell is associated.
* @addrbuf: Buffer to which the MAC address will be copied on success.
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index ec2cd7aab5ad..771688e1b0da 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2433,7 +2433,7 @@ static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev,
return ethtool_set_per_queue_coalesce(dev, useraddr, &per_queue_opt);
default:
return -EOPNOTSUPP;
- };
+ }
}
static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna)
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 6d091e419d3e..9c640d670ffe 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -149,7 +149,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
char name[IFNAMSIZ + 1];
- nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
+ nla_strscpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
sizeof(name));
dev = dev_get_by_name(&init_net, name);
} else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index b7260c8cef2e..b94fa8eb831b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -450,7 +450,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* BPF prog is run before any checks are done so that if the prog
* changes context in a wrong way it will be caught.
*/
- err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
+ err = BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr);
if (err)
return err;
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 618954f82764..d520e61649c8 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -95,6 +95,7 @@ static bool bpf_tcp_ca_is_valid_access(int off, int size,
}
static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf *btf,
const struct btf_type *t, int off,
int size, enum bpf_access_type atype,
u32 *next_btf_id)
@@ -102,7 +103,7 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
size_t end;
if (atype == BPF_READ)
- return btf_struct_access(log, t, off, size, atype, next_btf_id);
+ return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
if (t != tcp_sock_type) {
bpf_log(log, "only read is supported\n");
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 123a6d39438f..75f67994fc85 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -650,8 +650,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
struct in_device *in_dev;
struct ifaddrmsg *ifm;
struct in_ifaddr *ifa;
-
- int err = -EINVAL;
+ int err;
ASSERT_RTNL();
@@ -881,7 +880,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
if (tb[IFA_LABEL])
- nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
+ nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 1f75dc686b6b..b5400cec4f69 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -973,7 +973,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
char tmp[TCP_CA_NAME_MAX];
bool ecn_ca = false;
- nla_strlcpy(tmp, nla, sizeof(tmp));
+ nla_strscpy(tmp, nla, sizeof(tmp));
val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
} else {
if (nla_len(nla) != sizeof(u32))
@@ -1641,9 +1641,8 @@ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
break;
}
- *flags |= (nhc->nhc_flags & RTNH_F_ONLINK);
- if (nhc->nhc_flags & RTNH_F_OFFLOAD)
- *flags |= RTNH_F_OFFLOAD;
+ *flags |= (nhc->nhc_flags &
+ (RTNH_F_ONLINK | RTNH_F_OFFLOAD | RTNH_F_TRAP));
if (!skip_oif && nhc->nhc_dev &&
nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ffc5332f1390..28117c05dc35 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2100,15 +2100,6 @@ static void __fib_info_notify_update(struct net *net, struct fib_table *tb,
rtmsg_fib(RTM_NEWROUTE, htonl(n->key), fa,
KEYLENGTH - fa->fa_slen, tb->tb_id,
info, NLM_F_REPLACE);
-
- /* call_fib_entry_notifiers will be removed when
- * in-kernel notifier is implemented and supported
- * for nexthop objects
- */
- call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
- n->key,
- KEYLENGTH - fa->fa_slen, fa,
- NULL);
}
}
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 10d31733297d..05cd198d7a6b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -145,12 +145,16 @@ static void inet_frags_free_cb(void *ptr, void *arg)
inet_frag_destroy(fq);
}
-static void fqdir_work_fn(struct work_struct *work)
+static LLIST_HEAD(fqdir_free_list);
+
+static void fqdir_free_fn(struct work_struct *work)
{
- struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
- struct inet_frags *f = fqdir->f;
+ struct llist_node *kill_list;
+ struct fqdir *fqdir, *tmp;
+ struct inet_frags *f;
- rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
+ /* Atomically snapshot the list of fqdirs to free */
+ kill_list = llist_del_all(&fqdir_free_list);
/* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
* have completed, since they need to dereference fqdir.
@@ -158,10 +162,25 @@ static void fqdir_work_fn(struct work_struct *work)
*/
rcu_barrier();
- if (refcount_dec_and_test(&f->refcnt))
- complete(&f->completion);
+ llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
+ f = fqdir->f;
+ if (refcount_dec_and_test(&f->refcnt))
+ complete(&f->completion);
- kfree(fqdir);
+ kfree(fqdir);
+ }
+}
+
+static DECLARE_WORK(fqdir_free_work, fqdir_free_fn);
+
+static void fqdir_work_fn(struct work_struct *work)
+{
+ struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
+
+ rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
+
+ if (llist_add(&fqdir->free_list, &fqdir_free_list))
+ queue_work(system_wq, &fqdir_free_work);
}
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
@@ -184,10 +203,22 @@ int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
}
EXPORT_SYMBOL(fqdir_init);
+static struct workqueue_struct *inet_frag_wq;
+
+static int __init inet_frag_wq_init(void)
+{
+ inet_frag_wq = create_workqueue("inet_frag_wq");
+ if (!inet_frag_wq)
+ panic("Could not create inet frag workq");
+ return 0;
+}
+
+pure_initcall(inet_frag_wq_init);
+
void fqdir_exit(struct fqdir *fqdir)
{
INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
- queue_work(system_wq, &fqdir->destroy_work);
+ queue_work(inet_frag_wq, &fqdir->destroy_work);
}
EXPORT_SYMBOL(fqdir_exit);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index e70291748889..a68bf4c6fe9b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -920,7 +920,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
.ndo_start_xmit = ipgre_xmit,
.ndo_do_ioctl = ip_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipgre_tunnel_ctl,
};
@@ -1275,7 +1275,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip_tunnel_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
};
@@ -1308,7 +1308,7 @@ static const struct net_device_ops erspan_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip_tunnel_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
};
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index e25be2d01a7a..7ca338fbe8ba 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -429,15 +429,6 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
}
EXPORT_SYMBOL(skb_tunnel_check_pmtu);
-/* Often modified stats are per cpu, other are shared (netdev->stats) */
-void ip_tunnel_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
-{
- netdev_stats_to_stats64(tot, &dev->stats);
- dev_fetch_sw_netstats(tot, dev->tstats);
-}
-EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
-
static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
[LWTUNNEL_IP_UNSPEC] = { .strict_start_type = LWTUNNEL_IP_OPTS },
[LWTUNNEL_IP_ID] = { .type = NLA_U64 },
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index b957cbee2cf7..abc171e79d3e 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -404,7 +404,7 @@ static const struct net_device_ops vti_netdev_ops = {
.ndo_start_xmit = vti_tunnel_xmit,
.ndo_do_ioctl = ip_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = vti_tunnel_ctl,
};
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 561f15b5a944..3cd13e1bc6a7 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1441,7 +1441,7 @@ static int __init ip_auto_config(void)
int retries = CONF_OPEN_RETRIES;
#endif
int err;
- unsigned int i;
+ unsigned int i, count;
/* Initialise all name servers and NTP servers to NONE (but only if the
* "ip=" or "nfsaddrs=" kernel command line parameters weren't decoded,
@@ -1575,7 +1575,7 @@ static int __init ip_auto_config(void)
if (ic_dev_mtu)
pr_cont(", mtu=%d", ic_dev_mtu);
/* Name servers (if any): */
- for (i = 0; i < CONF_NAMESERVERS_MAX; i++) {
+ for (i = 0, count = 0; i < CONF_NAMESERVERS_MAX; i++) {
if (ic_nameservers[i] != NONE) {
if (i == 0)
pr_info(" nameserver%u=%pI4",
@@ -1583,12 +1583,14 @@ static int __init ip_auto_config(void)
else
pr_cont(", nameserver%u=%pI4",
i, &ic_nameservers[i]);
+
+ count++;
}
- if (i + 1 == CONF_NAMESERVERS_MAX)
+ if ((i + 1 == CONF_NAMESERVERS_MAX) && count > 0)
pr_cont("\n");
}
/* NTP servers (if any): */
- for (i = 0; i < CONF_NTP_SERVERS_MAX; i++) {
+ for (i = 0, count = 0; i < CONF_NTP_SERVERS_MAX; i++) {
if (ic_ntp_servers[i] != NONE) {
if (i == 0)
pr_info(" ntpserver%u=%pI4",
@@ -1596,8 +1598,10 @@ static int __init ip_auto_config(void)
else
pr_cont(", ntpserver%u=%pI4",
i, &ic_ntp_servers[i]);
+
+ count++;
}
- if (i + 1 == CONF_NTP_SERVERS_MAX)
+ if ((i + 1 == CONF_NTP_SERVERS_MAX) && count > 0)
pr_cont("\n");
}
#endif /* !SILENT */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 75d35e76bec2..d5bfa087c23a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -347,7 +347,7 @@ static const struct net_device_ops ipip_netdev_ops = {
.ndo_start_xmit = ipip_tunnel_xmit,
.ndo_do_ioctl = ip_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipip_tunnel_ctl,
};
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 3205d5f7c8c9..25ea6ac44db9 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -31,7 +31,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];
- nla_strlcpy(tmp, nla, sizeof(tmp));
+ nla_strscpy(tmp, nla, sizeof(tmp));
val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
if (val == TCP_CA_UNSPEC) {
NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm");
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index e16b98ee6266..4b8840734762 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -56,7 +56,8 @@ reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
break;
case IPT_TCP_RESET:
- nf_send_reset(xt_net(par), skb, hook);
+ nf_send_reset(xt_net(par), par->state->sk, skb, hook);
+ break;
case IPT_ICMP_ECHOREPLY:
/* Doesn't happen. */
break;
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 93b07739807b..4eed5afca392 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -12,6 +12,128 @@
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
+static int nf_reject_iphdr_validate(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ u32 len;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return 0;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len)
+ return 0;
+ else if (len < (iph->ihl*4))
+ return 0;
+
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ return 0;
+
+ return 1;
+}
+
+struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ const struct tcphdr *oth;
+ struct sk_buff *nskb;
+ struct iphdr *niph;
+ struct tcphdr _oth;
+
+ if (!nf_reject_iphdr_validate(oldskb))
+ return NULL;
+
+ oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
+ if (!oth)
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ LL_MAX_HEADER, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+ net->ipv4.sysctl_ip_default_ttl);
+ nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
+
+struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+ struct iphdr *niph;
+ struct icmphdr *icmph;
+ unsigned int len;
+ __wsum csum;
+ u8 proto;
+
+ if (!nf_reject_iphdr_validate(oldskb))
+ return NULL;
+
+ /* IP header checks: fragment. */
+ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
+ return NULL;
+
+ /* RFC says return as much as we can without exceeding 576 bytes. */
+ len = min_t(unsigned int, 536, oldskb->len);
+
+ if (!pskb_may_pull(oldskb, len))
+ return NULL;
+
+ if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
+ return NULL;
+
+ proto = ip_hdr(oldskb)->protocol;
+
+ if (!skb_csum_unnecessary(oldskb) &&
+ nf_reject_verify_csum(proto) &&
+ nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
+ LL_MAX_HEADER + len, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
+ net->ipv4.sysctl_ip_default_ttl);
+
+ skb_reset_transport_header(nskb);
+ icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
+ icmph->type = ICMP_DEST_UNREACH;
+ icmph->code = code;
+
+ skb_put_data(nskb, skb_network_header(oldskb), len);
+
+ csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
+ icmph->checksum = csum_fold(csum);
+
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
+
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook)
{
@@ -112,7 +234,8 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
}
/* Send RST reply */
-void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
+void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ int hook)
{
struct net_device *br_indev __maybe_unused;
struct sk_buff *nskb;
@@ -124,7 +247,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
if (!oth)
return;
- if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(oldskb))
+ if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+ nf_reject_fill_skb_dst(oldskb) < 0)
return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -144,8 +268,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
ip4_dst_hoplimit(skb_dst(nskb)));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
-
- if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
+ if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
goto free_nskb;
niph = ip_hdr(nskb);
@@ -193,7 +316,8 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
if (iph->frag_off & htons(IP_OFFSET))
return;
- if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(skb_in))
+ if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+ nf_reject_fill_skb_dst(skb_in) < 0)
return;
if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index e408f813f5d8..ff437e4ed6db 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -27,7 +27,8 @@ static void nft_reject_ipv4_eval(const struct nft_expr *expr,
nf_send_unreach(pkt->skb, priv->icmp_code, nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt));
+ nf_send_reset(nft_net(pkt), pkt->xt.state->sk, pkt->skb,
+ nft_hook(pkt));
break;
default:
break;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 0dc43ad28eb9..5e1b22d4f939 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -36,14 +36,145 @@ static const struct nla_policy rtm_nh_policy[NHA_MAX + 1] = {
[NHA_FDB] = { .type = NLA_FLAG },
};
+static bool nexthop_notifiers_is_empty(struct net *net)
+{
+ return !net->nexthop.notifier_chain.head;
+}
+
+static void
+__nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
+ const struct nexthop *nh)
+{
+ struct nh_info *nhi = rtnl_dereference(nh->nh_info);
+
+ nh_info->dev = nhi->fib_nhc.nhc_dev;
+ nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
+ if (nh_info->gw_family == AF_INET)
+ nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
+ else if (nh_info->gw_family == AF_INET6)
+ nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
+
+ nh_info->is_reject = nhi->reject_nh;
+ nh_info->is_fdb = nhi->fdb_nh;
+ nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
+}
+
+static int nh_notifier_single_info_init(struct nh_notifier_info *info,
+ const struct nexthop *nh)
+{
+ info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
+ if (!info->nh)
+ return -ENOMEM;
+
+ __nh_notifier_single_info_init(info->nh, nh);
+
+ return 0;
+}
+
+static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
+{
+ kfree(info->nh);
+}
+
+static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
+ const struct nexthop *nh)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ u16 num_nh = nhg->num_nh;
+ int i;
+
+ info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
+ GFP_KERNEL);
+ if (!info->nh_grp)
+ return -ENOMEM;
+
+ info->nh_grp->num_nh = num_nh;
+ info->nh_grp->is_fdb = nhg->fdb_nh;
+
+ for (i = 0; i < num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ info->nh_grp->nh_entries[i].id = nhge->nh->id;
+ info->nh_grp->nh_entries[i].weight = nhge->weight;
+ __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
+ nhge->nh);
+ }
+
+ return 0;
+}
+
+static void nh_notifier_grp_info_fini(struct nh_notifier_info *info)
+{
+ kfree(info->nh_grp);
+}
+
+static int nh_notifier_info_init(struct nh_notifier_info *info,
+ const struct nexthop *nh)
+{
+ info->id = nh->id;
+ info->is_grp = nh->is_group;
+
+ if (info->is_grp)
+ return nh_notifier_grp_info_init(info, nh);
+ else
+ return nh_notifier_single_info_init(info, nh);
+}
+
+static void nh_notifier_info_fini(struct nh_notifier_info *info)
+{
+ if (info->is_grp)
+ nh_notifier_grp_info_fini(info);
+ else
+ nh_notifier_single_info_fini(info);
+}
+
static int call_nexthop_notifiers(struct net *net,
enum nexthop_event_type event_type,
- struct nexthop *nh)
+ struct nexthop *nh,
+ struct netlink_ext_ack *extack)
{
+ struct nh_notifier_info info = {
+ .net = net,
+ .extack = extack,
+ };
int err;
+ ASSERT_RTNL();
+
+ if (nexthop_notifiers_is_empty(net))
+ return 0;
+
+ err = nh_notifier_info_init(&info, nh);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
+ return err;
+ }
+
err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
- event_type, nh);
+ event_type, &info);
+ nh_notifier_info_fini(&info);
+
+ return notifier_to_errno(err);
+}
+
+static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
+ enum nexthop_event_type event_type,
+ struct nexthop *nh,
+ struct netlink_ext_ack *extack)
+{
+ struct nh_notifier_info info = {
+ .net = net,
+ .extack = extack,
+ };
+ int err;
+
+ err = nh_notifier_info_init(&info, nh);
+ if (err)
+ return err;
+
+ err = nb->notifier_call(nb, event_type, &info);
+ nh_notifier_info_fini(&info);
+
return notifier_to_errno(err);
}
@@ -782,9 +913,10 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
{
struct nh_grp_entry *nhges, *new_nhges;
struct nexthop *nhp = nhge->nh_parent;
+ struct netlink_ext_ack extack;
struct nexthop *nh = nhge->nh;
struct nh_group *nhg, *newg;
- int i, j;
+ int i, j, err;
WARN_ON(!nh);
@@ -832,6 +964,10 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
list_del(&nhge->nh_list);
nexthop_put(nhge->nh);
+ err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp, &extack);
+ if (err)
+ pr_err("%s\n", extack._msg);
+
if (nlinfo)
nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
}
@@ -907,7 +1043,7 @@ static void __remove_nexthop(struct net *net, struct nexthop *nh,
static void remove_nexthop(struct net *net, struct nexthop *nh,
struct nl_info *nlinfo)
{
- call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh);
+ call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
/* remove from the tree */
rb_erase(&nh->rb_node, &net->nexthop.rb_root);
@@ -940,13 +1076,17 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
struct netlink_ext_ack *extack)
{
struct nh_group *oldg, *newg;
- int i;
+ int i, err;
if (!new->is_group) {
NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
return -EINVAL;
}
+ err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
+ if (err)
+ return err;
+
oldg = rtnl_dereference(old->nh_grp);
newg = rtnl_dereference(new->nh_grp);
@@ -985,31 +1125,54 @@ static int replace_nexthop_single(struct net *net, struct nexthop *old,
struct nexthop *new,
struct netlink_ext_ack *extack)
{
+ u8 old_protocol, old_nh_flags;
struct nh_info *oldi, *newi;
+ struct nh_grp_entry *nhge;
+ int err;
if (new->is_group) {
NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
return -EINVAL;
}
+ err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
+ if (err)
+ return err;
+
+ /* Hardware flags were set on 'old' as 'new' is not in the red-black
+ * tree. Therefore, inherit the flags from 'old' to 'new'.
+ */
+ new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
+
oldi = rtnl_dereference(old->nh_info);
newi = rtnl_dereference(new->nh_info);
newi->nh_parent = old;
oldi->nh_parent = new;
+ old_protocol = old->protocol;
+ old_nh_flags = old->nh_flags;
+
old->protocol = new->protocol;
old->nh_flags = new->nh_flags;
rcu_assign_pointer(old->nh_info, newi);
rcu_assign_pointer(new->nh_info, oldi);
+ /* Send a replace notification for all the groups using the nexthop. */
+ list_for_each_entry(nhge, &old->grp_list, nh_list) {
+ struct nexthop *nhp = nhge->nh_parent;
+
+ err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
+ extack);
+ if (err)
+ goto err_notify;
+ }
+
/* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
* update IPv4 indication in all the groups using the nexthop.
*/
if (oldi->family == AF_INET && newi->family == AF_INET6) {
- struct nh_grp_entry *nhge;
-
list_for_each_entry(nhge, &old->grp_list, nh_list) {
struct nexthop *nhp = nhge->nh_parent;
struct nh_group *nhg;
@@ -1020,6 +1183,21 @@ static int replace_nexthop_single(struct net *net, struct nexthop *old,
}
return 0;
+
+err_notify:
+ rcu_assign_pointer(new->nh_info, newi);
+ rcu_assign_pointer(old->nh_info, oldi);
+ old->nh_flags = old_nh_flags;
+ old->protocol = old_protocol;
+ oldi->nh_parent = old;
+ newi->nh_parent = new;
+ list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
+ struct nexthop *nhp = nhge->nh_parent;
+
+ call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp, extack);
+ }
+ call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
+ return err;
}
static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
@@ -1168,7 +1346,11 @@ static int insert_nexthop(struct net *net, struct nexthop *new_nh,
rb_link_node_rcu(&new_nh->rb_node, parent, pp);
rb_insert_color(&new_nh->rb_node, root);
- rc = 0;
+
+ rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
+ if (rc)
+ rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
+
out:
if (!rc) {
nh_base_seq_inc(net);
@@ -1957,10 +2139,40 @@ static struct notifier_block nh_netdev_notifier = {
.notifier_call = nh_netdev_event,
};
-int register_nexthop_notifier(struct net *net, struct notifier_block *nb)
+static int nexthops_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
+{
+ struct rb_root *root = &net->nexthop.rb_root;
+ struct rb_node *node;
+ int err = 0;
+
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ struct nexthop *nh;
+
+ nh = rb_entry(node, struct nexthop, rb_node);
+ err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
+ extack);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
- return blocking_notifier_chain_register(&net->nexthop.notifier_chain,
- nb);
+ int err;
+
+ rtnl_lock();
+ err = nexthops_dump(net, nb, extack);
+ if (err)
+ goto unlock;
+ err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
+ nb);
+unlock:
+ rtnl_unlock();
+ return err;
}
EXPORT_SYMBOL(register_nexthop_notifier);
@@ -1971,6 +2183,27 @@ int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_nexthop_notifier);
+void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
+{
+ struct nexthop *nexthop;
+
+ rcu_read_lock();
+
+ nexthop = nexthop_find_by_id(net, id);
+ if (!nexthop)
+ goto out;
+
+ nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
+ if (offload)
+ nexthop->nh_flags |= RTNH_F_OFFLOAD;
+ if (trap)
+ nexthop->nh_flags |= RTNH_F_TRAP;
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(nexthop_set_hw_flags);
+
static void __net_exit nexthop_net_exit(struct net *net)
{
rtnl_lock();
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8d5e1695b9aa..63cd370ea29d 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -167,6 +167,7 @@ static const struct snmp_mib snmp4_udp_list[] = {
SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI),
+ SNMP_MIB_ITEM("MemErrors", UDP_MIB_MEMERRORS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 9f43abeac3a8..e26652ff7059 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1741,7 +1741,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
flags |= RTCF_LOCAL;
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
+ IN_DEV_ORCONF(in_dev, NOPOLICY), false);
if (!rth)
return -ENOBUFS;
@@ -1857,8 +1857,8 @@ static int __mkroute_input(struct sk_buff *skb,
}
rth = rt_dst_alloc(out_dev->dev, 0, res->type,
- IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(out_dev, NOXFRM));
+ IN_DEV_ORCONF(in_dev, NOPOLICY),
+ IN_DEV_ORCONF(out_dev, NOXFRM));
if (!rth) {
err = -ENOBUFS;
goto cleanup;
@@ -2227,7 +2227,7 @@ local_input:
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
flags | RTCF_LOCAL, res->type,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
+ IN_DEV_ORCONF(in_dev, NOPOLICY), false);
if (!rth)
goto e_nobufs;
@@ -2450,8 +2450,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
add:
rth = rt_dst_alloc(dev_out, flags, type,
- IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(in_dev, NOXFRM));
+ IN_DEV_ORCONF(in_dev, NOPOLICY),
+ IN_DEV_ORCONF(in_dev, NOXFRM));
if (!rth)
return ERR_PTR(-ENOBUFS);
@@ -2872,6 +2872,9 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
if (rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
+ if (rt->dst.lwtstate &&
+ lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid &&
nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b2bc3d7fe9e8..ed42d2193c5c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -954,7 +954,7 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
* importantly be able to generate EPOLLOUT for Edge Trigger epoll()
* users.
*/
-static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
+void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
{
if (skb && !skb->len) {
tcp_unlink_write_queue(skb, sk);
@@ -964,6 +964,68 @@ static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
}
}
+struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
+ struct page *page, int offset, size_t *size)
+{
+ struct sk_buff *skb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ bool can_coalesce;
+ int copy, i;
+
+ if (!skb || (copy = size_goal - skb->len) <= 0 ||
+ !tcp_skb_can_collapse_to(skb)) {
+new_segment:
+ if (!sk_stream_memory_free(sk))
+ return NULL;
+
+ skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ tcp_rtx_and_write_queues_empty(sk));
+ if (!skb)
+ return NULL;
+
+#ifdef CONFIG_TLS_DEVICE
+ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
+ skb_entail(sk, skb);
+ copy = size_goal;
+ }
+
+ if (copy > *size)
+ copy = *size;
+
+ i = skb_shinfo(skb)->nr_frags;
+ can_coalesce = skb_can_coalesce(skb, i, page, offset);
+ if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ tcp_mark_push(tp, skb);
+ goto new_segment;
+ }
+ if (!sk_wmem_schedule(sk, copy))
+ return NULL;
+
+ if (can_coalesce) {
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+ } else {
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, copy);
+ }
+
+ if (!(flags & MSG_NO_SHARED_FRAGS))
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+ sk_wmem_queued_add(sk, copy);
+ sk_mem_charge(sk, copy);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
+ TCP_SKB_CB(skb)->end_seq += copy;
+ tcp_skb_pcount_set(skb, 0);
+
+ *size = copy;
+ return skb;
+}
+
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
@@ -999,60 +1061,13 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
goto out_err;
while (size > 0) {
- struct sk_buff *skb = tcp_write_queue_tail(sk);
- int copy, i;
- bool can_coalesce;
-
- if (!skb || (copy = size_goal - skb->len) <= 0 ||
- !tcp_skb_can_collapse_to(skb)) {
-new_segment:
- if (!sk_stream_memory_free(sk))
- goto wait_for_space;
-
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
- tcp_rtx_and_write_queues_empty(sk));
- if (!skb)
- goto wait_for_space;
-
-#ifdef CONFIG_TLS_DEVICE
- skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
-#endif
- skb_entail(sk, skb);
- copy = size_goal;
- }
-
- if (copy > size)
- copy = size;
+ struct sk_buff *skb;
+ size_t copy = size;
- i = skb_shinfo(skb)->nr_frags;
- can_coalesce = skb_can_coalesce(skb, i, page, offset);
- if (!can_coalesce && i >= sysctl_max_skb_frags) {
- tcp_mark_push(tp, skb);
- goto new_segment;
- }
- if (!sk_wmem_schedule(sk, copy))
+ skb = tcp_build_frag(sk, size_goal, flags, page, offset, &copy);
+ if (!skb)
goto wait_for_space;
- if (can_coalesce) {
- skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
- } else {
- get_page(page);
- skb_fill_page_desc(skb, i, page, offset, copy);
- }
-
- if (!(flags & MSG_NO_SHARED_FRAGS))
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
-
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
- sk_wmem_queued_add(sk, copy);
- sk_mem_charge(sk, copy);
- skb->ip_summed = CHECKSUM_PARTIAL;
- WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
- TCP_SKB_CB(skb)->end_seq += copy;
- tcp_skb_pcount_set(skb, 0);
-
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
@@ -1743,52 +1758,272 @@ int tcp_mmap(struct file *file, struct socket *sock,
}
EXPORT_SYMBOL(tcp_mmap);
+static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
+ u32 *offset_frag)
+{
+ skb_frag_t *frag;
+
+ offset_skb -= skb_headlen(skb);
+ if ((int)offset_skb < 0 || skb_has_frag_list(skb))
+ return NULL;
+
+ frag = skb_shinfo(skb)->frags;
+ while (offset_skb) {
+ if (skb_frag_size(frag) > offset_skb) {
+ *offset_frag = offset_skb;
+ return frag;
+ }
+ offset_skb -= skb_frag_size(frag);
+ ++frag;
+ }
+ *offset_frag = 0;
+ return frag;
+}
+
+static bool can_map_frag(const skb_frag_t *frag)
+{
+ return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag);
+}
+
+static int find_next_mappable_frag(const skb_frag_t *frag,
+ int remaining_in_skb)
+{
+ int offset = 0;
+
+ if (likely(can_map_frag(frag)))
+ return 0;
+
+ while (offset < remaining_in_skb && !can_map_frag(frag)) {
+ offset += skb_frag_size(frag);
+ ++frag;
+ }
+ return offset;
+}
+
+static void tcp_zerocopy_set_hint_for_skb(struct sock *sk,
+ struct tcp_zerocopy_receive *zc,
+ struct sk_buff *skb, u32 offset)
+{
+ u32 frag_offset, partial_frag_remainder = 0;
+ int mappable_offset;
+ skb_frag_t *frag;
+
+ /* worst case: skip to next skb. try to improve on this case below */
+ zc->recv_skip_hint = skb->len - offset;
+
+ /* Find the frag containing this offset (and how far into that frag) */
+ frag = skb_advance_to_frag(skb, offset, &frag_offset);
+ if (!frag)
+ return;
+
+ if (frag_offset) {
+ struct skb_shared_info *info = skb_shinfo(skb);
+
+ /* We read part of the last frag, must recvmsg() rest of skb. */
+ if (frag == &info->frags[info->nr_frags - 1])
+ return;
+
+ /* Else, we must at least read the remainder in this frag. */
+ partial_frag_remainder = skb_frag_size(frag) - frag_offset;
+ zc->recv_skip_hint -= partial_frag_remainder;
+ ++frag;
+ }
+
+ /* partial_frag_remainder: If part way through a frag, must read rest.
+ * mappable_offset: Bytes till next mappable frag, *not* counting bytes
+ * in partial_frag_remainder.
+ */
+ mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint);
+ zc->recv_skip_hint = mappable_offset + partial_frag_remainder;
+}
+
+static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags,
+ struct scm_timestamping_internal *tss,
+ int *cmsg_flags);
+static int receive_fallback_to_copy(struct sock *sk,
+ struct tcp_zerocopy_receive *zc, int inq)
+{
+ unsigned long copy_address = (unsigned long)zc->copybuf_address;
+ struct scm_timestamping_internal tss_unused;
+ int err, cmsg_flags_unused;
+ struct msghdr msg = {};
+ struct iovec iov;
+
+ zc->length = 0;
+ zc->recv_skip_hint = 0;
+
+ if (copy_address != zc->copybuf_address)
+ return -EINVAL;
+
+ err = import_single_range(READ, (void __user *)copy_address,
+ inq, &iov, &msg.msg_iter);
+ if (err)
+ return err;
+
+ err = tcp_recvmsg_locked(sk, &msg, inq, /*nonblock=*/1, /*flags=*/0,
+ &tss_unused, &cmsg_flags_unused);
+ if (err < 0)
+ return err;
+
+ zc->copybuf_len = err;
+ if (likely(zc->copybuf_len)) {
+ struct sk_buff *skb;
+ u32 offset;
+
+ skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset);
+ if (skb)
+ tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset);
+ }
+ return 0;
+}
+
+static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
+ struct sk_buff *skb, u32 copylen,
+ u32 *offset, u32 *seq)
+{
+ unsigned long copy_address = (unsigned long)zc->copybuf_address;
+ struct msghdr msg = {};
+ struct iovec iov;
+ int err;
+
+ if (copy_address != zc->copybuf_address)
+ return -EINVAL;
+
+ err = import_single_range(READ, (void __user *)copy_address,
+ copylen, &iov, &msg.msg_iter);
+ if (err)
+ return err;
+ err = skb_copy_datagram_msg(skb, *offset, &msg, copylen);
+ if (err)
+ return err;
+ zc->recv_skip_hint -= copylen;
+ *offset += copylen;
+ *seq += copylen;
+ return (__s32)copylen;
+}
+
+static int tcp_zerocopy_handle_leftover_data(struct tcp_zerocopy_receive *zc,
+ struct sock *sk,
+ struct sk_buff *skb,
+ u32 *seq,
+ s32 copybuf_len)
+{
+ u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint);
+
+ if (!copylen)
+ return 0;
+ /* skb is null if inq < PAGE_SIZE. */
+ if (skb)
+ offset = *seq - TCP_SKB_CB(skb)->seq;
+ else
+ skb = tcp_recv_skb(sk, *seq, &offset);
+
+ zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset,
+ seq);
+ return zc->copybuf_len < 0 ? 0 : copylen;
+}
+
+static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
+ struct page **pending_pages,
+ unsigned long pages_remaining,
+ unsigned long *address,
+ u32 *length,
+ u32 *seq,
+ struct tcp_zerocopy_receive *zc,
+ u32 total_bytes_to_map,
+ int err)
+{
+ /* At least one page did not map. Try zapping if we skipped earlier. */
+ if (err == -EBUSY &&
+ zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) {
+ u32 maybe_zap_len;
+
+ maybe_zap_len = total_bytes_to_map - /* All bytes to map */
+ *length + /* Mapped or pending */
+ (pages_remaining * PAGE_SIZE); /* Failed map. */
+ zap_page_range(vma, *address, maybe_zap_len);
+ err = 0;
+ }
+
+ if (!err) {
+ unsigned long leftover_pages = pages_remaining;
+ int bytes_mapped;
+
+ /* We called zap_page_range, try to reinsert. */
+ err = vm_insert_pages(vma, *address,
+ pending_pages,
+ &pages_remaining);
+ bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining);
+ *seq += bytes_mapped;
+ *address += bytes_mapped;
+ }
+ if (err) {
+ /* Either we were unable to zap, OR we zapped, retried an
+ * insert, and still had an issue. Either ways, pages_remaining
+ * is the number of pages we were unable to map, and we unroll
+ * some state we speculatively touched before.
+ */
+ const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
+
+ *length -= bytes_not_mapped;
+ zc->recv_skip_hint += bytes_not_mapped;
+ }
+ return err;
+}
+
static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
struct page **pages,
- unsigned long pages_to_map,
- unsigned long *insert_addr,
- u32 *length_with_pending,
+ unsigned int pages_to_map,
+ unsigned long *address,
+ u32 *length,
u32 *seq,
- struct tcp_zerocopy_receive *zc)
+ struct tcp_zerocopy_receive *zc,
+ u32 total_bytes_to_map)
{
unsigned long pages_remaining = pages_to_map;
- int bytes_mapped;
- int ret;
+ unsigned int pages_mapped;
+ unsigned int bytes_mapped;
+ int err;
- ret = vm_insert_pages(vma, *insert_addr, pages, &pages_remaining);
- bytes_mapped = PAGE_SIZE * (pages_to_map - pages_remaining);
+ err = vm_insert_pages(vma, *address, pages, &pages_remaining);
+ pages_mapped = pages_to_map - (unsigned int)pages_remaining;
+ bytes_mapped = PAGE_SIZE * pages_mapped;
/* Even if vm_insert_pages fails, it may have partially succeeded in
* mapping (some but not all of the pages).
*/
*seq += bytes_mapped;
- *insert_addr += bytes_mapped;
- if (ret) {
- /* But if vm_insert_pages did fail, we have to unroll some state
- * we speculatively touched before.
- */
- const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
- *length_with_pending -= bytes_not_mapped;
- zc->recv_skip_hint += bytes_not_mapped;
- }
- return ret;
+ *address += bytes_mapped;
+
+ if (likely(!err))
+ return 0;
+
+ /* Error: maybe zap and retry + rollback state for failed inserts. */
+ return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped,
+ pages_remaining, address, length, seq, zc, total_bytes_to_map,
+ err);
}
+#define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32
static int tcp_zerocopy_receive(struct sock *sk,
struct tcp_zerocopy_receive *zc)
{
+ u32 length = 0, offset, vma_len, avail_len, copylen = 0;
unsigned long address = (unsigned long)zc->address;
- u32 length = 0, seq, offset, zap_len;
- #define PAGE_BATCH_SIZE 8
- struct page *pages[PAGE_BATCH_SIZE];
+ struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE];
+ s32 copybuf_len = zc->copybuf_len;
+ struct tcp_sock *tp = tcp_sk(sk);
const skb_frag_t *frags = NULL;
+ unsigned int pages_to_map = 0;
struct vm_area_struct *vma;
struct sk_buff *skb = NULL;
- unsigned long pg_idx = 0;
- unsigned long curr_addr;
- struct tcp_sock *tp;
- int inq;
+ u32 seq = tp->copied_seq;
+ u32 total_bytes_to_map;
+ int inq = tcp_inq(sk);
int ret;
+ zc->copybuf_len = 0;
+
if (address & (PAGE_SIZE - 1) || address != zc->address)
return -EINVAL;
@@ -1797,7 +2032,16 @@ static int tcp_zerocopy_receive(struct sock *sk,
sock_rps_record_flow(sk);
- tp = tcp_sk(sk);
+ if (inq && inq <= copybuf_len)
+ return receive_fallback_to_copy(sk, zc, inq);
+
+ if (inq < PAGE_SIZE) {
+ zc->length = 0;
+ zc->recv_skip_hint = inq;
+ if (!inq && sock_flag(sk, SOCK_DONE))
+ return -EIO;
+ return 0;
+ }
mmap_read_lock(current->mm);
@@ -1806,33 +2050,26 @@ static int tcp_zerocopy_receive(struct sock *sk,
mmap_read_unlock(current->mm);
return -EINVAL;
}
- zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
-
- seq = tp->copied_seq;
- inq = tcp_inq(sk);
- zc->length = min_t(u32, zc->length, inq);
- zap_len = zc->length & ~(PAGE_SIZE - 1);
- if (zap_len) {
- zap_page_range(vma, address, zap_len);
+ vma_len = min_t(unsigned long, zc->length, vma->vm_end - address);
+ avail_len = min_t(u32, vma_len, inq);
+ total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
+ if (total_bytes_to_map) {
+ if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
+ zap_page_range(vma, address, total_bytes_to_map);
+ zc->length = total_bytes_to_map;
zc->recv_skip_hint = 0;
} else {
- zc->recv_skip_hint = zc->length;
+ zc->length = avail_len;
+ zc->recv_skip_hint = avail_len;
}
ret = 0;
- curr_addr = address;
while (length + PAGE_SIZE <= zc->length) {
+ int mappable_offset;
+ struct page *page;
+
if (zc->recv_skip_hint < PAGE_SIZE) {
- /* If we're here, finish the current batch. */
- if (pg_idx) {
- ret = tcp_zerocopy_vm_insert_batch(vma, pages,
- pg_idx,
- &curr_addr,
- &length,
- &seq, zc);
- if (ret)
- goto out;
- pg_idx = 0;
- }
+ u32 offset_frag;
+
if (skb) {
if (zc->recv_skip_hint > 0)
break;
@@ -1842,56 +2079,57 @@ static int tcp_zerocopy_receive(struct sock *sk,
skb = tcp_recv_skb(sk, seq, &offset);
}
zc->recv_skip_hint = skb->len - offset;
- offset -= skb_headlen(skb);
- if ((int)offset < 0 || skb_has_frag_list(skb))
+ frags = skb_advance_to_frag(skb, offset, &offset_frag);
+ if (!frags || offset_frag)
break;
- frags = skb_shinfo(skb)->frags;
- while (offset) {
- if (skb_frag_size(frags) > offset)
- goto out;
- offset -= skb_frag_size(frags);
- frags++;
- }
}
- if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
- int remaining = zc->recv_skip_hint;
- while (remaining && (skb_frag_size(frags) != PAGE_SIZE ||
- skb_frag_off(frags))) {
- remaining -= skb_frag_size(frags);
- frags++;
- }
- zc->recv_skip_hint -= remaining;
+ mappable_offset = find_next_mappable_frag(frags,
+ zc->recv_skip_hint);
+ if (mappable_offset) {
+ zc->recv_skip_hint = mappable_offset;
break;
}
- pages[pg_idx] = skb_frag_page(frags);
- pg_idx++;
+ page = skb_frag_page(frags);
+ prefetchw(page);
+ pages[pages_to_map++] = page;
length += PAGE_SIZE;
zc->recv_skip_hint -= PAGE_SIZE;
frags++;
- if (pg_idx == PAGE_BATCH_SIZE) {
- ret = tcp_zerocopy_vm_insert_batch(vma, pages, pg_idx,
- &curr_addr, &length,
- &seq, zc);
+ if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE ||
+ zc->recv_skip_hint < PAGE_SIZE) {
+ /* Either full batch, or we're about to go to next skb
+ * (and we cannot unroll failed ops across skbs).
+ */
+ ret = tcp_zerocopy_vm_insert_batch(vma, pages,
+ pages_to_map,
+ &address, &length,
+ &seq, zc,
+ total_bytes_to_map);
if (ret)
goto out;
- pg_idx = 0;
+ pages_to_map = 0;
}
}
- if (pg_idx) {
- ret = tcp_zerocopy_vm_insert_batch(vma, pages, pg_idx,
- &curr_addr, &length, &seq,
- zc);
+ if (pages_to_map) {
+ ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map,
+ &address, &length, &seq,
+ zc, total_bytes_to_map);
}
out:
mmap_read_unlock(current->mm);
- if (length) {
+ /* Try to copy straggler data. */
+ if (!ret)
+ copylen = tcp_zerocopy_handle_leftover_data(zc, sk, skb, &seq,
+ copybuf_len);
+
+ if (length + copylen) {
WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
tcp_recv_skb(sk, seq, &offset);
- tcp_cleanup_rbuf(sk, length);
+ tcp_cleanup_rbuf(sk, length + copylen);
ret = 0;
if (length == zc->length)
zc->recv_skip_hint = 0;
@@ -2013,36 +2251,28 @@ static int tcp_inq_hint(struct sock *sk)
* Probably, code can be easily improved even more.
*/
-int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
- int flags, int *addr_len)
+static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags,
+ struct scm_timestamping_internal *tss,
+ int *cmsg_flags)
{
struct tcp_sock *tp = tcp_sk(sk);
int copied = 0;
u32 peek_seq;
u32 *seq;
unsigned long used;
- int err, inq;
+ int err;
int target; /* Read at least this many bytes */
long timeo;
struct sk_buff *skb, *last;
u32 urg_hole = 0;
- struct scm_timestamping_internal tss;
- int cmsg_flags;
-
- if (unlikely(flags & MSG_ERRQUEUE))
- return inet_recv_error(sk, msg, len, addr_len);
-
- if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
- (sk->sk_state == TCP_ESTABLISHED))
- sk_busy_loop(sk, nonblock);
-
- lock_sock(sk);
err = -ENOTCONN;
if (sk->sk_state == TCP_LISTEN)
goto out;
- cmsg_flags = tp->recvmsg_inq ? 1 : 0;
+ if (tp->recvmsg_inq)
+ *cmsg_flags = 1;
timeo = sock_rcvtimeo(sk, nonblock);
/* Urgent data needs to be handled specially. */
@@ -2222,8 +2452,8 @@ skip_copy:
}
if (TCP_SKB_CB(skb)->has_rxtstamp) {
- tcp_update_recv_tstamps(skb, &tss);
- cmsg_flags |= 2;
+ tcp_update_recv_tstamps(skb, tss);
+ *cmsg_flags |= 2;
}
if (used + offset < skb->len)
@@ -2249,22 +2479,9 @@ found_fin_ok:
/* Clean up data we have read: This will do ACK frames. */
tcp_cleanup_rbuf(sk, copied);
-
- release_sock(sk);
-
- if (cmsg_flags) {
- if (cmsg_flags & 2)
- tcp_recv_timestamp(msg, sk, &tss);
- if (cmsg_flags & 1) {
- inq = tcp_inq_hint(sk);
- put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
- }
- }
-
return copied;
out:
- release_sock(sk);
return err;
recv_urg:
@@ -2275,6 +2492,36 @@ recv_sndq:
err = tcp_peek_sndq(sk, msg, len);
goto out;
}
+
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ int flags, int *addr_len)
+{
+ int cmsg_flags = 0, ret, inq;
+ struct scm_timestamping_internal tss;
+
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+
+ if (sk_can_busy_loop(sk) &&
+ skb_queue_empty_lockless(&sk->sk_receive_queue) &&
+ sk->sk_state == TCP_ESTABLISHED)
+ sk_busy_loop(sk, nonblock);
+
+ lock_sock(sk);
+ ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss,
+ &cmsg_flags);
+ release_sock(sk);
+
+ if (cmsg_flags && ret >= 0) {
+ if (cmsg_flags & 2)
+ tcp_recv_timestamp(msg, sk, &tss);
+ if (cmsg_flags & 1) {
+ inq = tcp_inq_hint(sk);
+ put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
+ }
+ }
+ return ret;
+}
EXPORT_SYMBOL(tcp_recvmsg);
void tcp_set_state(struct sock *sk, int state)
@@ -2405,13 +2652,12 @@ bool tcp_check_oom(struct sock *sk, int shift)
return too_many_orphans || out_of_socket_memory;
}
-void tcp_close(struct sock *sk, long timeout)
+void __tcp_close(struct sock *sk, long timeout)
{
struct sk_buff *skb;
int data_was_unread = 0;
int state;
- lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if (sk->sk_state == TCP_LISTEN) {
@@ -2575,6 +2821,12 @@ adjudge_to_death:
out:
bh_unlock_sock(sk);
local_bh_enable();
+}
+
+void tcp_close(struct sock *sk, long timeout)
+{
+ lock_sock(sk);
+ __tcp_close(sk, timeout);
release_sock(sk);
sock_put(sk);
}
@@ -3022,6 +3274,21 @@ int tcp_sock_set_keepcnt(struct sock *sk, int val)
}
EXPORT_SYMBOL(tcp_sock_set_keepcnt);
+int tcp_set_window_clamp(struct sock *sk, int val)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!val) {
+ if (sk->sk_state != TCP_CLOSE)
+ return -EINVAL;
+ tp->window_clamp = 0;
+ } else {
+ tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
+ SOCK_MIN_RCVBUF / 2 : val;
+ }
+ return 0;
+}
+
/*
* Socket option code for TCP.
*/
@@ -3235,15 +3502,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
break;
case TCP_WINDOW_CLAMP:
- if (!val) {
- if (sk->sk_state != TCP_CLOSE) {
- err = -EINVAL;
- break;
- }
- tp->window_clamp = 0;
- } else
- tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
- SOCK_MIN_RCVBUF / 2 : val;
+ err = tcp_set_window_clamp(sk, val);
break;
case TCP_QUICKACK:
@@ -3823,7 +4082,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
}
#ifdef CONFIG_MMU
case TCP_ZEROCOPY_RECEIVE: {
- struct tcp_zerocopy_receive zc;
+ struct tcp_zerocopy_receive zc = {};
int err;
if (get_user(len, optlen))
@@ -3840,7 +4099,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
lock_sock(sk);
err = tcp_zerocopy_receive(sk, &zc);
release_sock(sk);
- if (len == sizeof(zc))
+ if (len >= offsetofend(struct tcp_zerocopy_receive, err))
goto zerocopy_rcv_sk_err;
switch (len) {
case offsetofend(struct tcp_zerocopy_receive, err):
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ef4bdb038a4b..c7e16b0ed791 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2547,7 +2547,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
* cwnd reductions across a full RTT.
* 2) Otherwise PRR uses packet conservation to send as much as delivered.
- * But when the retransmits are acked without further losses, PRR
+ * But when SND_UNA is acked without further losses,
* slow starts cwnd up to ssthresh to speed up the recovery.
*/
static void tcp_init_cwnd_reduction(struct sock *sk)
@@ -2564,7 +2564,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
tcp_ecn_queue_cwr(tp);
}
-void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int sndcnt = 0;
@@ -2578,8 +2578,7 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
tp->prior_cwnd - 1;
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
- } else if ((flag & (FLAG_RETRANS_DATA_ACKED | FLAG_LOST_RETRANS)) ==
- FLAG_RETRANS_DATA_ACKED) {
+ } else if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost) {
sndcnt = min_t(int, delta,
max_t(int, tp->prr_delivered - tp->prr_out,
newly_acked_sacked) + 1);
@@ -2690,7 +2689,22 @@ void tcp_simple_retransmit(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- unsigned int mss = tcp_current_mss(sk);
+ int mss;
+
+ /* A fastopen SYN request is stored as two separate packets within
+ * the retransmit queue, this is done by tcp_send_syn_data().
+ * As a result simply checking the MSS of the frames in the queue
+ * will not work for the SYN packet.
+ *
+ * Us being here is an indication of a path MTU issue so we can
+ * assume that the fastopen SYN was lost and just mark all the
+ * frames in the retransmit queue as lost. We will use an MSS of
+ * -1 to mark all frames as lost, otherwise compute the current MSS.
+ */
+ if (tp->syn_data && sk->sk_state == TCP_SYN_SENT)
+ mss = -1;
+ else
+ mss = tcp_current_mss(sk);
skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
if (tcp_skb_seglen(skb) > mss)
@@ -3420,7 +3434,7 @@ static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
if (tcp_in_cwnd_reduction(sk)) {
/* Reduce cwnd if state mandates */
- tcp_cwnd_reduction(sk, acked_sacked, flag);
+ tcp_cwnd_reduction(sk, acked_sacked, rs->losses, flag);
} else if (tcp_may_raise_cwnd(sk, flag)) {
/* Advance cwnd if state allows */
tcp_cong_avoid(sk, ack, acked_sacked);
@@ -4219,10 +4233,13 @@ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
}
/* When we get a reset we do this. */
-void tcp_reset(struct sock *sk)
+void tcp_reset(struct sock *sk, struct sk_buff *skb)
{
trace_tcp_receive_reset(sk);
+ if (sk_is_mptcp(sk))
+ mptcp_incoming_options(sk, skb);
+
/* We want the right error as BSD sees it (and indeed as we do). */
switch (sk->sk_state) {
case TCP_SYN_SENT:
@@ -5605,7 +5622,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
&tp->last_oow_ack_time))
tcp_send_dupack(sk, skb);
} else if (tcp_reset_check(sk, skb)) {
- tcp_reset(sk);
+ tcp_reset(sk, skb);
}
goto discard;
}
@@ -5641,7 +5658,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
}
if (rst_seq_match)
- tcp_reset(sk);
+ tcp_reset(sk, skb);
else {
/* Disable TFO if RST is out-of-order
* and no data has been received
@@ -6078,7 +6095,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
*/
if (th->rst) {
- tcp_reset(sk);
+ tcp_reset(sk, skb);
goto discard;
}
@@ -6520,7 +6537,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- tcp_reset(sk);
+ tcp_reset(sk, skb);
return 1;
}
}
@@ -6801,18 +6818,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
/* Note: tcp_v6_init_req() might override ir_iif for link locals */
inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
- af_ops->init_req(req, sk, skb);
-
- if (security_inet_conn_request(sk, skb, req))
+ dst = af_ops->route_req(sk, skb, &fl, req);
+ if (!dst)
goto drop_and_free;
if (tmp_opt.tstamp_ok)
tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
- dst = af_ops->route_req(sk, &fl, req);
- if (!dst)
- goto drop_and_free;
-
if (!want_cookie && !isn) {
/* Kill the following clause, if you dislike this way. */
if (!net->ipv4.sysctl_tcp_syncookies &&
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 595dcc3afac5..58207c7769d0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1445,9 +1445,15 @@ static void tcp_v4_init_req(struct request_sock *req,
}
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
+ struct sk_buff *skb,
struct flowi *fl,
- const struct request_sock *req)
+ struct request_sock *req)
{
+ tcp_v4_init_req(req, sk, skb);
+
+ if (security_inet_conn_request(sk, skb, req))
+ return NULL;
+
return inet_csk_route_req(sk, &fl->u.ip4, req);
}
@@ -1467,7 +1473,6 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.req_md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
#endif
- .init_req = tcp_v4_init_req,
#ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v4_init_sequence,
#endif
@@ -2759,6 +2764,20 @@ void tcp4_proc_exit(void)
}
#endif /* CONFIG_PROC_FS */
+/* @wake is one when sk_stream_write_space() calls us.
+ * This sends EPOLLOUT only if notsent_bytes is half the limit.
+ * This mimics the strategy used in sock_def_write_space().
+ */
+bool tcp_stream_memory_free(const struct sock *sk, int wake)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u32 notsent_bytes = READ_ONCE(tp->write_seq) -
+ READ_ONCE(tp->snd_nxt);
+
+ return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
+}
+EXPORT_SYMBOL(tcp_stream_memory_free);
+
struct proto tcp_prot = {
.name = "TCP",
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 8c643a4ffad1..e6459537d4d2 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -89,6 +89,7 @@ struct lp {
/**
* tcp_lp_init
+ * @sk: socket to initialize congestion control algorithm for
*
* Init all required variables.
* Clone the handling from Vegas module implementation.
@@ -111,6 +112,7 @@ static void tcp_lp_init(struct sock *sk)
/**
* tcp_lp_cong_avoid
+ * @sk: socket to avoid congesting
*
* Implementation of cong_avoid.
* Will only call newReno CA when away from inference.
@@ -126,6 +128,7 @@ static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
/**
* tcp_lp_remote_hz_estimator
+ * @sk: socket which needs an estimate for the remote HZs
*
* Estimate remote HZ.
* We keep on updating the estimated value, where original TCP-LP
@@ -176,6 +179,7 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
/**
* tcp_lp_owd_calculator
+ * @sk: socket to calculate one way delay for
*
* Calculate one way delay (in relative format).
* Original implement OWD as minus of remote time difference to local time
@@ -210,6 +214,8 @@ static u32 tcp_lp_owd_calculator(struct sock *sk)
/**
* tcp_lp_rtt_sample
+ * @sk: socket to add a rtt sample to
+ * @rtt: round trip time, which is ignored!
*
* Implementation or rtt_sample.
* Will take the following action,
@@ -254,6 +260,7 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
/**
* tcp_lp_pkts_acked
+ * @sk: socket requiring congestion avoidance calculations
*
* Implementation of pkts_acked.
* Deal with active drop under Early Congestion Indication.
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 495dda2449fe..0055ae0a3bf8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -801,7 +801,7 @@ embryonic_reset:
req->rsk_ops->send_reset(sk, skb);
} else if (fastopen) { /* received a valid RST pkt */
reqsk_fastopen_remove(sk, req, true);
- tcp_reset(sk);
+ tcp_reset(sk, skb);
}
if (!fastopen) {
inet_csk_reqsk_queue_drop(sk, req);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 99011768c264..f322e798a351 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -445,11 +445,12 @@ struct tcp_out_options {
struct mptcp_out_options mptcp;
};
-static void mptcp_options_write(__be32 *ptr, struct tcp_out_options *opts)
+static void mptcp_options_write(__be32 *ptr, const struct tcp_sock *tp,
+ struct tcp_out_options *opts)
{
#if IS_ENABLED(CONFIG_MPTCP)
if (unlikely(OPTION_MPTCP & opts->options))
- mptcp_write_options(ptr, &opts->mptcp);
+ mptcp_write_options(ptr, tp, &opts->mptcp);
#endif
}
@@ -701,7 +702,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
smc_options_write(ptr, &options);
- mptcp_options_write(ptr, opts);
+ mptcp_options_write(ptr, tp, opts);
}
static void smc_set_option(const struct tcp_sock *tp,
@@ -1038,9 +1039,9 @@ static void tcp_tsq_handler(struct sock *sk)
* transferring tsq->head because tcp_wfree() might
* interrupt us (non NAPI drivers)
*/
-static void tcp_tasklet_func(unsigned long data)
+static void tcp_tasklet_func(struct tasklet_struct *t)
{
- struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
+ struct tsq_tasklet *tsq = from_tasklet(tsq, t, tasklet);
LIST_HEAD(list);
unsigned long flags;
struct list_head *q, *n;
@@ -1125,9 +1126,7 @@ void __init tcp_tasklet_init(void)
struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
INIT_LIST_HEAD(&tsq->head);
- tasklet_init(&tsq->tasklet,
- tcp_tasklet_func,
- (unsigned long)tsq);
+ tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
}
}
@@ -1348,7 +1347,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
}
}
- tcp_options_write((__be32 *)(th + 1), tp, &opts);
skb_shinfo(skb)->gso_type = sk->sk_gso_type;
if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
th->window = htons(tcp_select_window(sk));
@@ -1359,6 +1357,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
*/
th->window = htons(min(tp->rcv_wnd, 65535U));
}
+
+ tcp_options_write((__be32 *)(th + 1), tp, &opts);
+
#ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
@@ -1569,6 +1570,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
if (!buff)
return -ENOMEM; /* We'll just try again later. */
skb_copy_decrypted(buff, skb);
+ mptcp_skb_ext_copy(buff, skb);
sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
@@ -2124,6 +2126,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
if (unlikely(!buff))
return -ENOMEM;
skb_copy_decrypted(buff, skb);
+ mptcp_skb_ext_copy(buff, skb);
sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
@@ -2394,6 +2397,7 @@ static int tcp_mtu_probe(struct sock *sk)
skb = tcp_send_head(sk);
skb_copy_decrypted(nskb, skb);
+ mptcp_skb_ext_copy(nskb, skb);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index f65a3ddd0d58..177307a3081f 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -153,6 +153,7 @@ void tcp_rack_reo_timeout(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout, prior_inflight;
+ u32 lost = tp->lost;
prior_inflight = tcp_packets_in_flight(tp);
tcp_rack_detect_loss(sk, &timeout);
@@ -160,7 +161,7 @@ void tcp_rack_reo_timeout(struct sock *sk)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
tcp_enter_recovery(sk, false);
if (!inet_csk(sk)->icsk_ca_ops->cong_control)
- tcp_cwnd_reduction(sk, 1, 0);
+ tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
}
tcp_xmit_retransmit_queue(sk);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9eeebd4a0054..dece195f212c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -541,7 +541,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
inet_sdif(skb), udptable, skb);
}
-struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
const struct iphdr *iph = ip_hdr(skb);
@@ -550,7 +550,6 @@ struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
iph->daddr, dport, inet_iif(skb),
inet_sdif(skb), &udp_table, NULL);
}
-EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
/* Must be called under rcu_read_lock().
* Does increment socket refcount.
@@ -702,7 +701,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
iph->saddr, uh->source, skb->dev->ifindex,
inet_sdif(skb), udptable, NULL);
- if (!sk) {
+ if (!sk || udp_sk(sk)->encap_type) {
/* No socket for error: try tunnels before discarding */
sk = ERR_PTR(-ENOENT);
if (static_branch_unlikely(&udp_encap_needed_key)) {
@@ -874,7 +873,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct udphdr *uh;
- int err = 0;
+ int err;
int is_udplite = IS_UDPLITE(sk);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
@@ -2038,6 +2037,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (rc == -ENOMEM)
UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
+ else
+ UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS,
+ is_udplite);
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
trace_udp_fail_queue_rcv_skb(rc, sk);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 1dbece34496e..b2cee9a307d4 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -30,7 +30,7 @@ static int udp_dump_one(struct udp_table *tbl,
const struct inet_diag_req_v2 *req)
{
struct sk_buff *in_skb = cb->skb;
- int err = -EINVAL;
+ int err;
struct sock *sk = NULL;
struct sk_buff *rep;
struct net *net = sock_net(in_skb->sk);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index c62805cd3131..ff39e94781bf 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -49,6 +49,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
__skb_pull(skb, tnl_hlen);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
+ skb_set_transport_header(skb, skb_inner_transport_offset(skb));
skb->mac_len = skb_inner_network_offset(skb);
skb->protocol = new_protocol;
@@ -67,6 +68,8 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
(NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
features &= skb->dev->hw_enc_features;
+ /* CRC checksum can't be handled by HW when it's a UDP tunneling packet. */
+ features &= ~NETIF_F_SCTP_CRC;
/* The only checksum offload we care about from here on out is the
* outer one so strip the existing checksum feature flags and
@@ -564,8 +567,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
{
__be16 newlen = htons(skb->len - nhoff);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
- int err = -ENOSYS;
struct sock *sk;
+ int err;
uh->len = newlen;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8b6eb384bac7..eff2cacd5209 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1997,6 +1997,7 @@ EXPORT_SYMBOL(ipv6_chk_prefix);
* ipv6_dev_find - find the first device with a given source address.
* @net: the net namespace
* @addr: the source address
+ * @dev: used to find the L3 domain of interest
*
* The caller should be protected by RCU, or RTNL.
*/
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e648fbebb167..a7e3d170af51 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -451,7 +451,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* BPF prog is run before any checks are done so that if the prog
* changes context in a wrong way it will be caught.
*/
- err = BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr);
+ err = BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr);
if (err)
return err;
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 78f766019b7e..51184a70ac7e 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -423,7 +423,7 @@ static void calipso_doi_free_rcu(struct rcu_head *entry)
/**
* calipso_doi_remove - Remove an existing DOI from the CALIPSO protocol engine
* @doi: the DOI value
- * @audit_secid: the LSM secid to use in the audit message
+ * @audit_info: NetLabel audit information
*
* Description:
* Removes a DOI definition from the CALIPSO engine. The NetLabel routines will
@@ -1226,7 +1226,7 @@ static int calipso_req_setattr(struct request_sock *req,
/**
* calipso_req_delattr - Delete the CALIPSO option from a request socket
- * @reg: the request socket
+ * @req: the request socket
*
* Description:
* Removes the CALIPSO option from a request socket, if present.
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 374105e4394f..6126f8bf94b3 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -906,11 +906,6 @@ void ipv6_exthdrs_exit(void)
/*
* Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
*/
-static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
-{
- return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
-}
-
static inline struct net *ipv6_skb_net(struct sk_buff *skb)
{
return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index cf6e1380b527..c3bc89b6b1a1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1401,7 +1401,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
.ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_do_ioctl = ip6gre_tunnel_ioctl,
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
@@ -1838,7 +1838,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
@@ -1906,7 +1906,7 @@ static const struct net_device_ops ip6erspan_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 648db3fe508f..a7950baa05e5 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -94,36 +94,6 @@ static inline int ip6_tnl_mpls_supported(void)
return IS_ENABLED(CONFIG_MPLS);
}
-static struct net_device_stats *ip6_get_stats(struct net_device *dev)
-{
- struct pcpu_sw_netstats tmp, sum = { 0 };
- int i;
-
- for_each_possible_cpu(i) {
- unsigned int start;
- const struct pcpu_sw_netstats *tstats =
- per_cpu_ptr(dev->tstats, i);
-
- do {
- start = u64_stats_fetch_begin_irq(&tstats->syncp);
- tmp.rx_packets = tstats->rx_packets;
- tmp.rx_bytes = tstats->rx_bytes;
- tmp.tx_packets = tstats->tx_packets;
- tmp.tx_bytes = tstats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
-
- sum.rx_packets += tmp.rx_packets;
- sum.rx_bytes += tmp.rx_bytes;
- sum.tx_packets += tmp.tx_packets;
- sum.tx_bytes += tmp.tx_bytes;
- }
- dev->stats.rx_packets = sum.rx_packets;
- dev->stats.rx_bytes = sum.rx_bytes;
- dev->stats.tx_packets = sum.tx_packets;
- dev->stats.tx_bytes = sum.tx_bytes;
- return &dev->stats;
-}
-
#define for_each_ip6_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -204,6 +174,7 @@ ip6_tnl_lookup(struct net *net, int link,
/**
* ip6_tnl_bucket - get head of list matching given tunnel parameters
+ * @ip6n: the private data for ip6_vti in the netns
* @p: parameters containing tunnel end-points
*
* Description:
@@ -230,6 +201,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
/**
* ip6_tnl_link - add tunnel to hash table
+ * @ip6n: the private data for ip6_vti in the netns
* @t: tunnel to be added
**/
@@ -246,6 +218,7 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
/**
* ip6_tnl_unlink - remove tunnel from hash table
+ * @ip6n: the private data for ip6_vti in the netns
* @t: tunnel to be removed
**/
@@ -417,6 +390,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
/**
* parse_tvl_tnl_enc_lim - handle encapsulation limit option
* @skb: received socket buffer
+ * @raw: the ICMPv6 error message data
*
* Return:
* 0 if none was found,
@@ -485,14 +459,9 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
}
EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
-/**
- * ip6_tnl_err - tunnel error handler
- *
- * Description:
- * ip6_tnl_err() should handle errors in the tunnel according
- * to the specifications in RFC 2473.
- **/
-
+/* ip6_tnl_err() should handle errors in the tunnel according to the
+ * specifications in RFC 2473.
+ */
static int
ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
u8 *type, u8 *code, int *msg, __u32 *info, int offset)
@@ -1835,7 +1804,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
.ndo_start_xmit = ip6_tnl_start_xmit,
.ndo_do_ioctl = ip6_tnl_ioctl,
.ndo_change_mtu = ip6_tnl_change_mtu,
- .ndo_get_stats = ip6_get_stats,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5f9c4fdc120d..0225fd694192 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -125,6 +125,7 @@ vti6_tnl_lookup(struct net *net, const struct in6_addr *remote,
/**
* vti6_tnl_bucket - get head of list matching given tunnel parameters
+ * @ip6n: the private data for ip6_vti in the netns
* @p: parameters containing tunnel end-points
*
* Description:
@@ -889,7 +890,7 @@ static const struct net_device_ops vti6_netdev_ops = {
.ndo_uninit = vti6_dev_uninit,
.ndo_start_xmit = vti6_tnl_xmit,
.ndo_do_ioctl = vti6_ioctl,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
};
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 43a894bf9a1b..a6804a7e34c1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1148,7 +1148,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
- msg.msg_control = optval;
+ msg.msg_control_user = optval;
msg.msg_controllen = len;
msg.msg_flags = flags;
msg.msg_control_is_user = true;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 8cd2782a31e4..6c8604390266 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -548,7 +548,7 @@ done:
}
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
- struct sockaddr_storage *p)
+ struct sockaddr_storage __user *p)
{
int err, i, count, copycount;
const struct in6_addr *group;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 3ac5485049f0..a35019d2e480 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -61,7 +61,7 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
/* Do nothing */
break;
case IP6T_TCP_RESET:
- nf_send_reset6(net, skb, xt_hooknum(par));
+ nf_send_reset6(net, par->state->sk, skb, xt_hooknum(par));
break;
case IP6T_ICMP6_POLICY_FAIL:
nf_send_unreach6(net, skb, ICMPV6_POLICY_FAIL, xt_hooknum(par));
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 4aef6baaa55e..570d1d76c44d 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -12,6 +12,140 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
+static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int thoff;
+ __be16 fo;
+ u8 proto = ip6h->nexthdr;
+
+ if (skb_csum_unnecessary(skb))
+ return true;
+
+ if (ip6h->payload_len &&
+ pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
+ return false;
+
+ ip6h = ipv6_hdr(skb);
+ thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+ if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
+ return false;
+
+ if (!nf_reject_verify_csum(proto))
+ return true;
+
+ return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
+}
+
+static int nf_reject_ip6hdr_validate(struct sk_buff *skb)
+{
+ struct ipv6hdr *hdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+
+ hdr = ipv6_hdr(skb);
+ if (hdr->version != 6)
+ return 0;
+
+ pkt_len = ntohs(hdr->payload_len);
+ if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
+ return 0;
+
+ return 1;
+}
+
+struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ struct sk_buff *nskb;
+ const struct tcphdr *oth;
+ struct tcphdr _oth;
+ unsigned int otcplen;
+ struct ipv6hdr *nip6h;
+
+ if (!nf_reject_ip6hdr_validate(oldskb))
+ return NULL;
+
+ oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
+ if (!oth)
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
+ LL_MAX_HEADER, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+ net->ipv6.devconf_all->hop_limit);
+ nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
+ nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v6_tcp_reset);
+
+struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *nip6h;
+ struct icmp6hdr *icmp6h;
+ unsigned int len;
+
+ if (!nf_reject_ip6hdr_validate(oldskb))
+ return NULL;
+
+ /* Include "As much of invoking packet as possible without the ICMPv6
+ * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
+ */
+ len = min_t(unsigned int, 1220, oldskb->len);
+
+ if (!pskb_may_pull(oldskb, len))
+ return NULL;
+
+ if (!nf_reject_v6_csum_ok(oldskb, hook))
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
+ LL_MAX_HEADER + len, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
+ net->ipv6.devconf_all->hop_limit);
+
+ skb_reset_transport_header(nskb);
+ icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
+ icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
+ icmp6h->icmp6_code = code;
+
+ skb_put_data(nskb, skb_network_header(oldskb), len);
+ nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
+
+ icmp6h->icmp6_cksum =
+ csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
+ nskb->len - sizeof(struct ipv6hdr),
+ IPPROTO_ICMPV6,
+ csum_partial(icmp6h,
+ nskb->len - sizeof(struct ipv6hdr),
+ 0));
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v6_unreach);
+
const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *otcph,
unsigned int *otcplen, int hook)
@@ -141,7 +275,8 @@ static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
return 0;
}
-void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ int hook)
{
struct net_device *br_indev __maybe_unused;
struct sk_buff *nskb;
@@ -170,7 +305,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
- if (hook == NF_INET_PRE_ROUTING) {
+ if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
if (!dst)
return;
@@ -233,7 +368,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
dev_queue_xmit(nskb);
} else
#endif
- ip6_local_out(net, nskb->sk, nskb);
+ ip6_local_out(net, sk, nskb);
}
EXPORT_SYMBOL_GPL(nf_send_reset6);
@@ -268,7 +403,8 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
- if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in))
+ if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+ nf_reject6_fill_skb_dst(skb_in) < 0)
return;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index c1098a1968e1..7969d1f3018d 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -28,7 +28,8 @@ static void nft_reject_ipv6_eval(const struct nft_expr *expr,
nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt));
+ nf_send_reset6(nft_net(pkt), pkt->xt.state->sk, pkt->skb,
+ nft_hook(pkt));
break;
default:
break;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index bbff3e02e302..d6306aa46bb1 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -126,6 +126,7 @@ static const struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI),
+ SNMP_MIB_ITEM("Udp6MemErrors", UDP_MIB_MEMERRORS),
SNMP_MIB_SENTINEL
};
@@ -137,6 +138,7 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("UdpLite6InCsumErrors", UDP_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("UdpLite6MemErrors", UDP_MIB_MEMERRORS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7e0ce7af8234..188e114b29b4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5558,6 +5558,10 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
goto nla_put_failure;
+
+ if (dst->lwtstate &&
+ lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ goto nla_put_failure;
} else if (rt->fib6_nsiblings) {
struct fib6_info *sibling, *next_sibling;
struct nlattr *mp;
@@ -6039,11 +6043,6 @@ void fib6_rt_update(struct net *net, struct fib6_info *rt,
struct sk_buff *skb;
int err = -ENOBUFS;
- /* call_fib6_entry_notifiers will be removed when in-kernel notifier
- * is implemented and supported for nexthop objects
- */
- call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
-
skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
if (!skb)
goto errout;
diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c
index 307f336b5353..488aec9e1a74 100644
--- a/net/ipv6/rpl.c
+++ b/net/ipv6/rpl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Authors:
* (C) 2020 Alexander Aring <alex.aring@gmail.com>
*/
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index 5fdf3ebb953f..ff691d9f4a04 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Authors:
* (C) 2020 Alexander Aring <alex.aring@gmail.com>
*/
@@ -190,18 +190,13 @@ static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
{
struct dst_entry *dst = skb_dst(skb);
struct rpl_iptunnel_encap *tinfo;
- int err = 0;
if (skb->protocol != htons(ETH_P_IPV6))
return -EINVAL;
tinfo = rpl_encap_lwtunnel(dst->lwtstate);
- err = rpl_do_srh_inline(skb, rlwt, tinfo->srh);
- if (err)
- return err;
-
- return 0;
+ return rpl_do_srh_inline(skb, rlwt, tinfo->srh);
}
static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index eba23279912d..b07f7c1c82a4 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -33,11 +33,35 @@
struct seg6_local_lwt;
+/* callbacks used for customizing the creation and destruction of a behavior */
+struct seg6_local_lwtunnel_ops {
+ int (*build_state)(struct seg6_local_lwt *slwt, const void *cfg,
+ struct netlink_ext_ack *extack);
+ void (*destroy_state)(struct seg6_local_lwt *slwt);
+};
+
struct seg6_action_desc {
int action;
unsigned long attrs;
+
+ /* The optattrs field is used for specifying all the optional
+ * attributes supported by a specific behavior.
+ * It means that if one of these attributes is not provided in the
+ * netlink message during the behavior creation, no errors will be
+ * returned to the userspace.
+ *
+ * Each attribute can be only of two types (mutually exclusive):
+ * 1) required or 2) optional.
+ * Every user MUST obey to this rule! If you set an attribute as
+ * required the same attribute CANNOT be set as optional and vice
+ * versa.
+ */
+ unsigned long optattrs;
+
int (*input)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
int static_headroom;
+
+ struct seg6_local_lwtunnel_ops slwt_ops;
};
struct bpf_lwt_prog {
@@ -45,6 +69,28 @@ struct bpf_lwt_prog {
char *name;
};
+enum seg6_end_dt_mode {
+ DT_INVALID_MODE = -EINVAL,
+ DT_LEGACY_MODE = 0,
+ DT_VRF_MODE = 1,
+};
+
+struct seg6_end_dt_info {
+ enum seg6_end_dt_mode mode;
+
+ struct net *net;
+ /* VRF device associated to the routing table used by the SRv6
+ * End.DT4/DT6 behavior for routing IPv4/IPv6 packets.
+ */
+ int vrf_ifindex;
+ int vrf_table;
+
+ /* tunneled packet proto and family (IPv4 or IPv6) */
+ __be16 proto;
+ u16 family;
+ int hdrlen;
+};
+
struct seg6_local_lwt {
int action;
struct ipv6_sr_hdr *srh;
@@ -54,9 +100,16 @@ struct seg6_local_lwt {
int iif;
int oif;
struct bpf_lwt_prog bpf;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ struct seg6_end_dt_info dt_info;
+#endif
int headroom;
struct seg6_action_desc *desc;
+ /* unlike the required attrs, we have to track the optional attributes
+ * that have been effectively parsed.
+ */
+ unsigned long parsed_optattrs;
};
static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
@@ -401,6 +454,248 @@ drop:
return -EINVAL;
}
+#ifdef CONFIG_NET_L3_MASTER_DEV
+static struct net *fib6_config_get_net(const struct fib6_config *fib6_cfg)
+{
+ const struct nl_info *nli = &fib6_cfg->fc_nlinfo;
+
+ return nli->nl_net;
+}
+
+static int __seg6_end_dt_vrf_build(struct seg6_local_lwt *slwt, const void *cfg,
+ u16 family, struct netlink_ext_ack *extack)
+{
+ struct seg6_end_dt_info *info = &slwt->dt_info;
+ int vrf_ifindex;
+ struct net *net;
+
+ net = fib6_config_get_net(cfg);
+
+ /* note that vrf_table was already set by parse_nla_vrftable() */
+ vrf_ifindex = l3mdev_ifindex_lookup_by_table_id(L3MDEV_TYPE_VRF, net,
+ info->vrf_table);
+ if (vrf_ifindex < 0) {
+ if (vrf_ifindex == -EPERM) {
+ NL_SET_ERR_MSG(extack,
+ "Strict mode for VRF is disabled");
+ } else if (vrf_ifindex == -ENODEV) {
+ NL_SET_ERR_MSG(extack,
+ "Table has no associated VRF device");
+ } else {
+ pr_debug("seg6local: SRv6 End.DT* creation error=%d\n",
+ vrf_ifindex);
+ }
+
+ return vrf_ifindex;
+ }
+
+ info->net = net;
+ info->vrf_ifindex = vrf_ifindex;
+
+ switch (family) {
+ case AF_INET:
+ info->proto = htons(ETH_P_IP);
+ info->hdrlen = sizeof(struct iphdr);
+ break;
+ case AF_INET6:
+ info->proto = htons(ETH_P_IPV6);
+ info->hdrlen = sizeof(struct ipv6hdr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ info->family = family;
+ info->mode = DT_VRF_MODE;
+
+ return 0;
+}
+
+/* The SRv6 End.DT4/DT6 behavior extracts the inner (IPv4/IPv6) packet and
+ * routes the IPv4/IPv6 packet by looking at the configured routing table.
+ *
+ * In the SRv6 End.DT4/DT6 use case, we can receive traffic (IPv6+Segment
+ * Routing Header packets) from several interfaces and the outer IPv6
+ * destination address (DA) is used for retrieving the specific instance of the
+ * End.DT4/DT6 behavior that should process the packets.
+ *
+ * However, the inner IPv4/IPv6 packet is not really bound to any receiving
+ * interface and thus the End.DT4/DT6 sets the VRF (associated with the
+ * corresponding routing table) as the *receiving* interface.
+ * In other words, the End.DT4/DT6 processes a packet as if it has been received
+ * directly by the VRF (and not by one of its slave devices, if any).
+ * In this way, the VRF interface is used for routing the IPv4/IPv6 packet in
+ * according to the routing table configured by the End.DT4/DT6 instance.
+ *
+ * This design allows you to get some interesting features like:
+ * 1) the statistics on rx packets;
+ * 2) the possibility to install a packet sniffer on the receiving interface
+ * (the VRF one) for looking at the incoming packets;
+ * 3) the possibility to leverage the netfilter prerouting hook for the inner
+ * IPv4 packet.
+ *
+ * This function returns:
+ * - the sk_buff* when the VRF rcv handler has processed the packet correctly;
+ * - NULL when the skb is consumed by the VRF rcv handler;
+ * - a pointer which encodes a negative error number in case of error.
+ * Note that in this case, the function takes care of freeing the skb.
+ */
+static struct sk_buff *end_dt_vrf_rcv(struct sk_buff *skb, u16 family,
+ struct net_device *dev)
+{
+ /* based on l3mdev_ip_rcv; we are only interested in the master */
+ if (unlikely(!netif_is_l3_master(dev) && !netif_has_l3_rx_handler(dev)))
+ goto drop;
+
+ if (unlikely(!dev->l3mdev_ops->l3mdev_l3_rcv))
+ goto drop;
+
+ /* the decap packet IPv4/IPv6 does not come with any mac header info.
+ * We must unset the mac header to allow the VRF device to rebuild it,
+ * just in case there is a sniffer attached on the device.
+ */
+ skb_unset_mac_header(skb);
+
+ skb = dev->l3mdev_ops->l3mdev_l3_rcv(dev, skb, family);
+ if (!skb)
+ /* the skb buffer was consumed by the handler */
+ return NULL;
+
+ /* when a packet is received by a VRF or by one of its slaves, the
+ * master device reference is set into the skb.
+ */
+ if (unlikely(skb->dev != dev || skb->skb_iif != dev->ifindex))
+ goto drop;
+
+ return skb;
+
+drop:
+ kfree_skb(skb);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct net_device *end_dt_get_vrf_rcu(struct sk_buff *skb,
+ struct seg6_end_dt_info *info)
+{
+ int vrf_ifindex = info->vrf_ifindex;
+ struct net *net = info->net;
+
+ if (unlikely(vrf_ifindex < 0))
+ goto error;
+
+ if (unlikely(!net_eq(dev_net(skb->dev), net)))
+ goto error;
+
+ return dev_get_by_index_rcu(net, vrf_ifindex);
+
+error:
+ return NULL;
+}
+
+static struct sk_buff *end_dt_vrf_core(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+{
+ struct seg6_end_dt_info *info = &slwt->dt_info;
+ struct net_device *vrf;
+
+ vrf = end_dt_get_vrf_rcu(skb, info);
+ if (unlikely(!vrf))
+ goto drop;
+
+ skb->protocol = info->proto;
+
+ skb_dst_drop(skb);
+
+ skb_set_transport_header(skb, info->hdrlen);
+
+ return end_dt_vrf_rcv(skb, info->family, vrf);
+
+drop:
+ kfree_skb(skb);
+ return ERR_PTR(-EINVAL);
+}
+
+static int input_action_end_dt4(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+{
+ struct iphdr *iph;
+ int err;
+
+ if (!decap_and_validate(skb, IPPROTO_IPIP))
+ goto drop;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ goto drop;
+
+ skb = end_dt_vrf_core(skb, slwt);
+ if (!skb)
+ /* packet has been processed and consumed by the VRF */
+ return 0;
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ iph = ip_hdr(skb);
+
+ err = ip_route_input(skb, iph->daddr, iph->saddr, 0, skb->dev);
+ if (unlikely(err))
+ goto drop;
+
+ return dst_input(skb);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int seg6_end_dt4_build(struct seg6_local_lwt *slwt, const void *cfg,
+ struct netlink_ext_ack *extack)
+{
+ return __seg6_end_dt_vrf_build(slwt, cfg, AF_INET, extack);
+}
+
+static enum
+seg6_end_dt_mode seg6_end_dt6_parse_mode(struct seg6_local_lwt *slwt)
+{
+ unsigned long parsed_optattrs = slwt->parsed_optattrs;
+ bool legacy, vrfmode;
+
+ legacy = !!(parsed_optattrs & (1 << SEG6_LOCAL_TABLE));
+ vrfmode = !!(parsed_optattrs & (1 << SEG6_LOCAL_VRFTABLE));
+
+ if (!(legacy ^ vrfmode))
+ /* both are absent or present: invalid DT6 mode */
+ return DT_INVALID_MODE;
+
+ return legacy ? DT_LEGACY_MODE : DT_VRF_MODE;
+}
+
+static enum seg6_end_dt_mode seg6_end_dt6_get_mode(struct seg6_local_lwt *slwt)
+{
+ struct seg6_end_dt_info *info = &slwt->dt_info;
+
+ return info->mode;
+}
+
+static int seg6_end_dt6_build(struct seg6_local_lwt *slwt, const void *cfg,
+ struct netlink_ext_ack *extack)
+{
+ enum seg6_end_dt_mode mode = seg6_end_dt6_parse_mode(slwt);
+ struct seg6_end_dt_info *info = &slwt->dt_info;
+
+ switch (mode) {
+ case DT_LEGACY_MODE:
+ info->mode = DT_LEGACY_MODE;
+ return 0;
+ case DT_VRF_MODE:
+ return __seg6_end_dt_vrf_build(slwt, cfg, AF_INET6, extack);
+ default:
+ NL_SET_ERR_MSG(extack, "table or vrftable must be specified");
+ return -EINVAL;
+ }
+}
+#endif
+
static int input_action_end_dt6(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
@@ -410,6 +705,28 @@ static int input_action_end_dt6(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ if (seg6_end_dt6_get_mode(slwt) == DT_LEGACY_MODE)
+ goto legacy_mode;
+
+ /* DT6_VRF_MODE */
+ skb = end_dt_vrf_core(skb, slwt);
+ if (!skb)
+ /* packet has been processed and consumed by the VRF */
+ return 0;
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* note: this time we do not need to specify the table because the VRF
+ * takes care of selecting the correct table.
+ */
+ seg6_lookup_any_nexthop(skb, NULL, 0, true);
+
+ return dst_input(skb);
+
+legacy_mode:
+#endif
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
seg6_lookup_any_nexthop(skb, NULL, slwt->table, true);
@@ -590,8 +907,27 @@ static struct seg6_action_desc seg6_action_table[] = {
.input = input_action_end_dx4,
},
{
+ .action = SEG6_LOCAL_ACTION_END_DT4,
+ .attrs = (1 << SEG6_LOCAL_VRFTABLE),
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ .input = input_action_end_dt4,
+ .slwt_ops = {
+ .build_state = seg6_end_dt4_build,
+ },
+#endif
+ },
+ {
.action = SEG6_LOCAL_ACTION_END_DT6,
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ .attrs = 0,
+ .optattrs = (1 << SEG6_LOCAL_TABLE) |
+ (1 << SEG6_LOCAL_VRFTABLE),
+ .slwt_ops = {
+ .build_state = seg6_end_dt6_build,
+ },
+#else
.attrs = (1 << SEG6_LOCAL_TABLE),
+#endif
.input = input_action_end_dt6,
},
{
@@ -649,6 +985,7 @@ static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = {
[SEG6_LOCAL_ACTION] = { .type = NLA_U32 },
[SEG6_LOCAL_SRH] = { .type = NLA_BINARY },
[SEG6_LOCAL_TABLE] = { .type = NLA_U32 },
+ [SEG6_LOCAL_VRFTABLE] = { .type = NLA_U32 },
[SEG6_LOCAL_NH4] = { .type = NLA_BINARY,
.len = sizeof(struct in_addr) },
[SEG6_LOCAL_NH6] = { .type = NLA_BINARY,
@@ -710,6 +1047,11 @@ static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
return memcmp(a->srh, b->srh, len);
}
+static void destroy_attr_srh(struct seg6_local_lwt *slwt)
+{
+ kfree(slwt->srh);
+}
+
static int parse_nla_table(struct nlattr **attrs, struct seg6_local_lwt *slwt)
{
slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]);
@@ -733,6 +1075,53 @@ static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
return 0;
}
+static struct
+seg6_end_dt_info *seg6_possible_end_dt_info(struct seg6_local_lwt *slwt)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ return &slwt->dt_info;
+#else
+ return ERR_PTR(-EOPNOTSUPP);
+#endif
+}
+
+static int parse_nla_vrftable(struct nlattr **attrs,
+ struct seg6_local_lwt *slwt)
+{
+ struct seg6_end_dt_info *info = seg6_possible_end_dt_info(slwt);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->vrf_table = nla_get_u32(attrs[SEG6_LOCAL_VRFTABLE]);
+
+ return 0;
+}
+
+static int put_nla_vrftable(struct sk_buff *skb, struct seg6_local_lwt *slwt)
+{
+ struct seg6_end_dt_info *info = seg6_possible_end_dt_info(slwt);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (nla_put_u32(skb, SEG6_LOCAL_VRFTABLE, info->vrf_table))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int cmp_nla_vrftable(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
+{
+ struct seg6_end_dt_info *info_a = seg6_possible_end_dt_info(a);
+ struct seg6_end_dt_info *info_b = seg6_possible_end_dt_info(b);
+
+ if (info_a->vrf_table != info_b->vrf_table)
+ return 1;
+
+ return 0;
+}
+
static int parse_nla_nh4(struct nlattr **attrs, struct seg6_local_lwt *slwt)
{
memcpy(&slwt->nh4, nla_data(attrs[SEG6_LOCAL_NH4]),
@@ -901,16 +1290,30 @@ static int cmp_nla_bpf(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
return strcmp(a->bpf.name, b->bpf.name);
}
+static void destroy_attr_bpf(struct seg6_local_lwt *slwt)
+{
+ kfree(slwt->bpf.name);
+ if (slwt->bpf.prog)
+ bpf_prog_put(slwt->bpf.prog);
+}
+
struct seg6_action_param {
int (*parse)(struct nlattr **attrs, struct seg6_local_lwt *slwt);
int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
int (*cmp)(struct seg6_local_lwt *a, struct seg6_local_lwt *b);
+
+ /* optional destroy() callback useful for releasing resources which
+ * have been previously acquired in the corresponding parse()
+ * function.
+ */
+ void (*destroy)(struct seg6_local_lwt *slwt);
};
static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = {
[SEG6_LOCAL_SRH] = { .parse = parse_nla_srh,
.put = put_nla_srh,
- .cmp = cmp_nla_srh },
+ .cmp = cmp_nla_srh,
+ .destroy = destroy_attr_srh },
[SEG6_LOCAL_TABLE] = { .parse = parse_nla_table,
.put = put_nla_table,
@@ -934,14 +1337,130 @@ static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = {
[SEG6_LOCAL_BPF] = { .parse = parse_nla_bpf,
.put = put_nla_bpf,
- .cmp = cmp_nla_bpf },
+ .cmp = cmp_nla_bpf,
+ .destroy = destroy_attr_bpf },
+
+ [SEG6_LOCAL_VRFTABLE] = { .parse = parse_nla_vrftable,
+ .put = put_nla_vrftable,
+ .cmp = cmp_nla_vrftable },
};
+/* call the destroy() callback (if available) for each set attribute in
+ * @parsed_attrs, starting from the first attribute up to the @max_parsed
+ * (excluded) attribute.
+ */
+static void __destroy_attrs(unsigned long parsed_attrs, int max_parsed,
+ struct seg6_local_lwt *slwt)
+{
+ struct seg6_action_param *param;
+ int i;
+
+ /* Every required seg6local attribute is identified by an ID which is
+ * encoded as a flag (i.e: 1 << ID) in the 'attrs' bitmask;
+ *
+ * We scan the 'parsed_attrs' bitmask, starting from the first attribute
+ * up to the @max_parsed (excluded) attribute.
+ * For each set attribute, we retrieve the corresponding destroy()
+ * callback. If the callback is not available, then we skip to the next
+ * attribute; otherwise, we call the destroy() callback.
+ */
+ for (i = 0; i < max_parsed; ++i) {
+ if (!(parsed_attrs & (1 << i)))
+ continue;
+
+ param = &seg6_action_params[i];
+
+ if (param->destroy)
+ param->destroy(slwt);
+ }
+}
+
+/* release all the resources that may have been acquired during parsing
+ * operations.
+ */
+static void destroy_attrs(struct seg6_local_lwt *slwt)
+{
+ unsigned long attrs = slwt->desc->attrs | slwt->parsed_optattrs;
+
+ __destroy_attrs(attrs, SEG6_LOCAL_MAX + 1, slwt);
+}
+
+static int parse_nla_optional_attrs(struct nlattr **attrs,
+ struct seg6_local_lwt *slwt)
+{
+ struct seg6_action_desc *desc = slwt->desc;
+ unsigned long parsed_optattrs = 0;
+ struct seg6_action_param *param;
+ int err, i;
+
+ for (i = 0; i < SEG6_LOCAL_MAX + 1; ++i) {
+ if (!(desc->optattrs & (1 << i)) || !attrs[i])
+ continue;
+
+ /* once here, the i-th attribute is provided by the
+ * userspace AND it is identified optional as well.
+ */
+ param = &seg6_action_params[i];
+
+ err = param->parse(attrs, slwt);
+ if (err < 0)
+ goto parse_optattrs_err;
+
+ /* current attribute has been correctly parsed */
+ parsed_optattrs |= (1 << i);
+ }
+
+ /* store in the tunnel state all the optional attributed successfully
+ * parsed.
+ */
+ slwt->parsed_optattrs = parsed_optattrs;
+
+ return 0;
+
+parse_optattrs_err:
+ __destroy_attrs(parsed_optattrs, i, slwt);
+
+ return err;
+}
+
+/* call the custom constructor of the behavior during its initialization phase
+ * and after that all its attributes have been parsed successfully.
+ */
+static int
+seg6_local_lwtunnel_build_state(struct seg6_local_lwt *slwt, const void *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct seg6_action_desc *desc = slwt->desc;
+ struct seg6_local_lwtunnel_ops *ops;
+
+ ops = &desc->slwt_ops;
+ if (!ops->build_state)
+ return 0;
+
+ return ops->build_state(slwt, cfg, extack);
+}
+
+/* call the custom destructor of the behavior which is invoked before the
+ * tunnel is going to be destroyed.
+ */
+static void seg6_local_lwtunnel_destroy_state(struct seg6_local_lwt *slwt)
+{
+ struct seg6_action_desc *desc = slwt->desc;
+ struct seg6_local_lwtunnel_ops *ops;
+
+ ops = &desc->slwt_ops;
+ if (!ops->destroy_state)
+ return;
+
+ ops->destroy_state(slwt);
+}
+
static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
{
struct seg6_action_param *param;
struct seg6_action_desc *desc;
+ unsigned long invalid_attrs;
int i, err;
desc = __get_action_desc(slwt->action);
@@ -954,6 +1473,26 @@ static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
slwt->desc = desc;
slwt->headroom += desc->static_headroom;
+ /* Forcing the desc->optattrs *set* and the desc->attrs *set* to be
+ * disjoined, this allow us to release acquired resources by optional
+ * attributes and by required attributes independently from each other
+ * without any interfarence.
+ * In other terms, we are sure that we do not release some the acquired
+ * resources twice.
+ *
+ * Note that if an attribute is configured both as required and as
+ * optional, it means that the user has messed something up in the
+ * seg6_action_table. Therefore, this check is required for SRv6
+ * behaviors to work properly.
+ */
+ invalid_attrs = desc->attrs & desc->optattrs;
+ if (invalid_attrs) {
+ WARN_ONCE(1,
+ "An attribute cannot be both required AND optional");
+ return -EINVAL;
+ }
+
+ /* parse the required attributes */
for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
if (desc->attrs & (1 << i)) {
if (!attrs[i])
@@ -963,11 +1502,24 @@ static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
err = param->parse(attrs, slwt);
if (err < 0)
- return err;
+ goto parse_attrs_err;
}
}
+ /* parse the optional attributes, if any */
+ err = parse_nla_optional_attrs(attrs, slwt);
+ if (err < 0)
+ goto parse_attrs_err;
+
return 0;
+
+parse_attrs_err:
+ /* release any resource that may have been acquired during the i-1
+ * parse() operations.
+ */
+ __destroy_attrs(desc->attrs, i, slwt);
+
+ return err;
}
static int seg6_local_build_state(struct net *net, struct nlattr *nla,
@@ -1003,6 +1555,10 @@ static int seg6_local_build_state(struct net *net, struct nlattr *nla,
if (err < 0)
goto out_free;
+ err = seg6_local_lwtunnel_build_state(slwt, cfg, extack);
+ if (err < 0)
+ goto out_destroy_attrs;
+
newts->type = LWTUNNEL_ENCAP_SEG6_LOCAL;
newts->flags = LWTUNNEL_STATE_INPUT_REDIRECT;
newts->headroom = slwt->headroom;
@@ -1011,8 +1567,9 @@ static int seg6_local_build_state(struct net *net, struct nlattr *nla,
return 0;
+out_destroy_attrs:
+ destroy_attrs(slwt);
out_free:
- kfree(slwt->srh);
kfree(newts);
return err;
}
@@ -1021,12 +1578,9 @@ static void seg6_local_destroy_state(struct lwtunnel_state *lwt)
{
struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
- kfree(slwt->srh);
+ seg6_local_lwtunnel_destroy_state(slwt);
- if (slwt->desc->attrs & (1 << SEG6_LOCAL_BPF)) {
- kfree(slwt->bpf.name);
- bpf_prog_put(slwt->bpf.prog);
- }
+ destroy_attrs(slwt);
return;
}
@@ -1036,13 +1590,16 @@ static int seg6_local_fill_encap(struct sk_buff *skb,
{
struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
struct seg6_action_param *param;
+ unsigned long attrs;
int i, err;
if (nla_put_u32(skb, SEG6_LOCAL_ACTION, slwt->action))
return -EMSGSIZE;
+ attrs = slwt->desc->attrs | slwt->parsed_optattrs;
+
for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
- if (slwt->desc->attrs & (1 << i)) {
+ if (attrs & (1 << i)) {
param = &seg6_action_params[i];
err = param->put(skb, slwt);
if (err < 0)
@@ -1061,7 +1618,7 @@ static int seg6_local_get_encap_size(struct lwtunnel_state *lwt)
nlsize = nla_total_size(4); /* action */
- attrs = slwt->desc->attrs;
+ attrs = slwt->desc->attrs | slwt->parsed_optattrs;
if (attrs & (1 << SEG6_LOCAL_SRH))
nlsize += nla_total_size((slwt->srh->hdrlen + 1) << 3);
@@ -1086,6 +1643,9 @@ static int seg6_local_get_encap_size(struct lwtunnel_state *lwt)
nla_total_size(MAX_PROG_NAME) +
nla_total_size(4);
+ if (attrs & (1 << SEG6_LOCAL_VRFTABLE))
+ nlsize += nla_total_size(4);
+
return nlsize;
}
@@ -1094,6 +1654,7 @@ static int seg6_local_cmp_encap(struct lwtunnel_state *a,
{
struct seg6_local_lwt *slwt_a, *slwt_b;
struct seg6_action_param *param;
+ unsigned long attrs_a, attrs_b;
int i;
slwt_a = seg6_local_lwtunnel(a);
@@ -1102,11 +1663,14 @@ static int seg6_local_cmp_encap(struct lwtunnel_state *a,
if (slwt_a->action != slwt_b->action)
return 1;
- if (slwt_a->desc->attrs != slwt_b->desc->attrs)
+ attrs_a = slwt_a->desc->attrs | slwt_a->parsed_optattrs;
+ attrs_b = slwt_b->desc->attrs | slwt_b->parsed_optattrs;
+
+ if (attrs_a != attrs_b)
return 1;
for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
- if (slwt_a->desc->attrs & (1 << i)) {
+ if (attrs_a & (1 << i)) {
param = &seg6_action_params[i];
if (param->cmp(slwt_a, slwt_b))
return 1;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 5e7983cb6154..2da0ee703779 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1395,7 +1395,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_uninit = ipip6_tunnel_uninit,
.ndo_start_xmit = sit_tunnel_xmit,
.ndo_do_ioctl = ipip6_tunnel_ioctl,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipip6_tunnel_ctl,
};
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 991dc36f95ff..e254569a3005 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -829,9 +829,15 @@ static void tcp_v6_init_req(struct request_sock *req,
}
static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
+ struct sk_buff *skb,
struct flowi *fl,
- const struct request_sock *req)
+ struct request_sock *req)
{
+ tcp_v6_init_req(req, sk, skb);
+
+ if (security_inet_conn_request(sk, skb, req))
+ return NULL;
+
return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
}
@@ -852,7 +858,6 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.req_md5_lookup = tcp_v6_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
#endif
- .init_req = tcp_v6_init_req,
#ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v6_init_sequence,
#endif
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 29d9691359b9..9008f5796ad4 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -276,7 +276,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
inet6_sdif(skb), udptable, skb);
}
-struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -285,7 +285,6 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
&iph->daddr, dport, inet6_iif(skb),
inet6_sdif(skb), &udp_table, NULL);
}
-EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
/* Must be called under rcu_read_lock().
* Does increment socket refcount.
@@ -560,7 +559,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
- if (!sk) {
+ if (!sk || udp_sk(sk)->encap_type) {
/* No socket for error: try tunnels before discarding */
sk = ERR_PTR(-ENOENT);
if (static_branch_unlikely(&udpv6_encap_needed_key)) {
@@ -637,6 +636,9 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (rc == -ENOMEM)
UDP6_INC_STATS(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
+ else
+ UDP6_INC_STATS(sock_net(sk),
+ UDP_MIB_MEMERRORS, is_udplite);
UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index f9e888d1b9af..c7bd7b1a04c1 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -28,10 +28,6 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
int tnl_hlen;
int err;
- mss = skb_shinfo(skb)->gso_size;
- if (unlikely(skb->len <= mss))
- goto out;
-
if (skb->encapsulation && skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
segs = skb_udp_tunnel_segment(skb, features, true);
@@ -48,6 +44,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
return __udp_gso_segment(skb, features);
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index db7d888914fa..882f028992c3 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -587,7 +587,7 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
char uid[sizeof(sa->siucv_user_id)];
struct sock *sk = sock->sk;
struct iucv_sock *iucv;
@@ -691,7 +691,7 @@ static int iucv_sock_autobind(struct sock *sk)
static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned char user_data[16];
@@ -738,7 +738,7 @@ done:
static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
int err;
@@ -874,7 +874,7 @@ done:
static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
int peer)
{
- struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index 864326f150e2..ad7730b68772 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -241,6 +241,7 @@ EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
* L3 master device
* @net: network namespace for device index lookup
* @fl: flow struct
+ * @arg: store the table the rule matched with here
*/
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 3c03f6512c5f..213ea7abc9ab 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -418,14 +418,94 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
return used;
}
+/* Handle device status changes. */
+static int lapb_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct lapb_cb *lapb;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ if (dev->type != ARPHRD_X25)
+ return NOTIFY_DONE;
+
+ lapb = lapb_devtostruct(dev);
+ if (!lapb)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name);
+
+ if (netif_carrier_ok(dev)) {
+ lapb_dbg(0, "(%p): Carrier is already up: %s\n", dev,
+ dev->name);
+ if (lapb->mode & LAPB_DCE) {
+ lapb_start_t1timer(lapb);
+ } else {
+ if (lapb->state == LAPB_STATE_0) {
+ lapb->state = LAPB_STATE_1;
+ lapb_establish_data_link(lapb);
+ }
+ }
+ }
+ break;
+ case NETDEV_GOING_DOWN:
+ if (netif_carrier_ok(dev))
+ lapb_disconnect_request(dev);
+ break;
+ case NETDEV_DOWN:
+ lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name);
+ lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state);
+ lapb_clear_queues(lapb);
+ lapb->state = LAPB_STATE_0;
+ lapb->n2count = 0;
+ lapb_stop_t1timer(lapb);
+ lapb_stop_t2timer(lapb);
+ break;
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev)) {
+ lapb_dbg(0, "(%p): Carrier detected: %s\n", dev,
+ dev->name);
+ if (lapb->mode & LAPB_DCE) {
+ lapb_start_t1timer(lapb);
+ } else {
+ if (lapb->state == LAPB_STATE_0) {
+ lapb->state = LAPB_STATE_1;
+ lapb_establish_data_link(lapb);
+ }
+ }
+ } else {
+ lapb_dbg(0, "(%p) Carrier lost: %s\n", dev, dev->name);
+ lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state);
+ lapb_clear_queues(lapb);
+ lapb->state = LAPB_STATE_0;
+ lapb->n2count = 0;
+ lapb_stop_t1timer(lapb);
+ lapb_stop_t2timer(lapb);
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lapb_dev_notifier = {
+ .notifier_call = lapb_device_event,
+};
+
static int __init lapb_init(void)
{
- return 0;
+ return register_netdevice_notifier(&lapb_dev_notifier);
}
static void __exit lapb_exit(void)
{
WARN_ON(!list_empty(&lapb_list));
+
+ unregister_netdevice_notifier(&lapb_dev_notifier);
}
MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index 8f5b17001a07..baa247fe4ed0 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -85,11 +85,18 @@ static void lapb_t1timer_expiry(struct timer_list *t)
switch (lapb->state) {
/*
- * If we are a DCE, keep going DM .. DM .. DM
+ * If we are a DCE, send DM up to N2 times, then switch to
+ * STATE_1 and send SABM(E).
*/
case LAPB_STATE_0:
- if (lapb->mode & LAPB_DCE)
+ if (lapb->mode & LAPB_DCE &&
+ lapb->n2count != lapb->n2) {
+ lapb->n2count++;
lapb_send_control(lapb, LAPB_DM, LAPB_POLLOFF, LAPB_RESPONSE);
+ } else {
+ lapb->state = LAPB_STATE_1;
+ lapb_establish_data_link(lapb);
+ }
break;
/*
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 1144cda2a0fc..912aa9bd5e29 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -909,6 +909,8 @@ static void llc_sk_init(struct sock *sk)
* @net: network namespace
* @family: upper layer protocol family
* @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ * @prot: struct proto associated with this new sock instance
+ * @kern: is this to be a kernel socket?
*
* Allocates a LLC sock and initializes it. Returns the new LLC sock
* or %NULL if there's no memory available for one
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index cd4cf84a7f99..cce28e3b2232 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -250,10 +250,10 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
- capab = (u16)(amsdu << 0); /* bit 0 A-MSDU support */
- capab |= (u16)(policy << 1); /* bit 1 aggregation policy */
- capab |= (u16)(tid << 2); /* bit 5:2 TID number */
- capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */
+ capab = u16_encode_bits(amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK);
+ capab |= u16_encode_bits(policy, IEEE80211_ADDBA_PARAM_POLICY_MASK);
+ capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK);
+ capab |= u16_encode_bits(buf_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b37c8a983d88..430a58587538 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -95,10 +95,10 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
mgmt->u.action.u.addba_req.dialog_token = dialog_token;
- capab = (u16)(1 << 0); /* bit 0 A-MSDU support */
- capab |= (u16)(1 << 1); /* bit 1 aggregation policy */
- capab |= (u16)(tid << 2); /* bit 5:2 TID number */
- capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
+ capab = IEEE80211_ADDBA_PARAM_AMSDU_MASK;
+ capab |= IEEE80211_ADDBA_PARAM_POLICY_MASK;
+ capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK);
+ capab |= u16_encode_bits(agg_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
@@ -950,8 +950,8 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
- tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
- buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
+ tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK);
+ buf_size = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
txq = sta->sta.txq[tid];
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7276e66ae435..c4c70e30ad7f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -405,6 +405,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_WEP104:
if (WARN_ON_ONCE(fips_enabled))
return -EINVAL;
+ break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
@@ -1121,10 +1122,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->vif.bss_conf.enable_beacon = true;
sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p;
sdata->vif.bss_conf.twt_responder = params->twt_responder;
- memcpy(&sdata->vif.bss_conf.he_obss_pd, &params->he_obss_pd,
- sizeof(struct ieee80211_he_obss_pd));
- memcpy(&sdata->vif.bss_conf.he_bss_color, &params->he_bss_color,
- sizeof(struct ieee80211_he_bss_color));
+ sdata->vif.bss_conf.he_obss_pd = params->he_obss_pd;
+ sdata->vif.bss_conf.he_bss_color = params->he_bss_color;
sdata->vif.bss_conf.s1g = params->chandef.chan->band ==
NL80211_BAND_S1GHZ;
@@ -2708,16 +2707,6 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
return 0;
}
-static int ieee80211_set_wds_peer(struct wiphy *wiphy, struct net_device *dev,
- const u8 *addr)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- memcpy(&sdata->u.wds.remote_addr, addr, ETH_ALEN);
-
- return 0;
-}
-
static void ieee80211_rfkill_poll(struct wiphy *wiphy)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
@@ -3307,6 +3296,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
if (cfg80211_get_chandef_type(&params->chandef) !=
cfg80211_get_chandef_type(&sdata->u.ibss.chandef))
return -EINVAL;
+ break;
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -3458,7 +3448,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
IEEE80211_QUEUE_STOP_REASON_CSA);
cfg80211_ch_switch_started_notify(sdata->dev, &sdata->csa_chandef,
- params->count);
+ params->count, params->block_tx);
if (changed) {
ieee80211_bss_info_change_notify(sdata, changed);
@@ -4083,6 +4073,17 @@ static int ieee80211_reset_tid_config(struct wiphy *wiphy,
return ret;
}
+static int ieee80211_set_sar_specs(struct wiphy *wiphy,
+ struct cfg80211_sar_specs *sar)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ if (!local->ops->set_sar_specs)
+ return -EOPNOTSUPP;
+
+ return local->ops->set_sar_specs(&local->hw, sar);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -4138,7 +4139,6 @@ const struct cfg80211_ops mac80211_config_ops = {
.set_wiphy_params = ieee80211_set_wiphy_params,
.set_tx_power = ieee80211_set_tx_power,
.get_tx_power = ieee80211_get_tx_power,
- .set_wds_peer = ieee80211_set_wds_peer,
.rfkill_poll = ieee80211_rfkill_poll,
CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump)
@@ -4186,4 +4186,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.probe_mesh_link = ieee80211_probe_mesh_link,
.set_tid_config = ieee80211_set_tid_config,
.reset_tid_config = ieee80211_reset_tid_config,
+ .set_sar_specs = ieee80211_set_sar_specs,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 8f48aff74c7b..907bb1f748a1 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -9,6 +9,7 @@
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
+#include "rate.h"
static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
@@ -191,11 +192,13 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
return NULL;
}
-enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
+static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta)
{
- switch (sta->bandwidth) {
+ enum ieee80211_sta_rx_bandwidth width = ieee80211_sta_cap_rx_bw(sta);
+
+ switch (width) {
case IEEE80211_STA_RX_BW_20:
- if (sta->ht_cap.ht_supported)
+ if (sta->sta.ht_cap.ht_supported)
return NL80211_CHAN_WIDTH_20;
else
return NL80211_CHAN_WIDTH_20_NOHT;
@@ -232,7 +235,7 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
!(sta->sdata->bss && sta->sdata->bss == sdata->bss))
continue;
- max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
+ max_bw = max(max_bw, ieee80211_get_sta_bw(sta));
}
rcu_read_unlock();
@@ -275,11 +278,11 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
case NL80211_IFTYPE_NAN:
continue;
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
width = vif->bss_conf.chandef.width;
break;
+ case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
case NL80211_IFTYPE_MONITOR:
@@ -343,10 +346,42 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH);
}
+static void ieee80211_chan_bw_change(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
+{
+ struct sta_info *sta;
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[ctx->conf.def.chan->band];
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &local->sta_list,
+ list) {
+ enum ieee80211_sta_rx_bandwidth new_sta_bw;
+
+ if (!ieee80211_sdata_running(sta->sdata))
+ continue;
+
+ if (rcu_access_pointer(sta->sdata->vif.chanctx_conf) !=
+ &ctx->conf)
+ continue;
+
+ new_sta_bw = ieee80211_sta_cur_vht_bw(sta);
+ if (new_sta_bw == sta->sta.bandwidth)
+ continue;
+
+ sta->sta.bandwidth = new_sta_bw;
+ rate_control_rate_update(local, sband, sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+ rcu_read_unlock();
+}
+
static void ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
const struct cfg80211_chan_def *chandef)
{
+ enum nl80211_chan_width width;
+
if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
ieee80211_recalc_chanctx_min_def(local, ctx);
return;
@@ -354,7 +389,25 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+ width = ctx->conf.def.width;
ctx->conf.def = *chandef;
+
+ /* expected to handle only 20/40/80/160 channel widths */
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ if (chandef->width < width)
+ ieee80211_chan_bw_change(local, ctx);
+
drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
ieee80211_recalc_chanctx_min_def(local, ctx);
@@ -362,6 +415,9 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
local->_oper_chandef = *chandef;
ieee80211_hw_config(local, 0);
}
+
+ if (chandef->width > width)
+ ieee80211_chan_bw_change(local, ctx);
}
static struct ieee80211_chanctx *
@@ -743,7 +799,6 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
continue;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
break;
@@ -1052,8 +1107,14 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
if (WARN_ON(!chandef))
return -EINVAL;
+ if (old_ctx->conf.def.width > new_ctx->conf.def.width)
+ ieee80211_chan_bw_change(local, new_ctx);
+
ieee80211_change_chanctx(local, new_ctx, chandef);
+ if (old_ctx->conf.def.width < new_ctx->conf.def.width)
+ ieee80211_chan_bw_change(local, new_ctx);
+
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
vif_chsw[0].new_ctx = &new_ctx->conf;
@@ -1444,6 +1505,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
ieee80211_recalc_smps_chanctx(local, ctx);
ieee80211_recalc_radar_chanctx(local, ctx);
ieee80211_recalc_chanctx_min_def(local, ctx);
+ ieee80211_chan_bw_change(local, ctx);
list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
reserved_chanctx_list) {
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 90470392fdaa..48f144f107d5 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -53,7 +53,7 @@ static const struct file_operations name## _ops = { \
DEBUGFS_READONLY_FILE_OPS(name)
#define DEBUGFS_ADD(name) \
- debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
+ debugfs_create_file(#name, 0400, phyd, local, &name## _ops)
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, phyd, local, &name## _ops);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 98a713475e0f..f53dec8a3d5c 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -319,7 +319,7 @@ KEY_OPS(key);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, key->debugfs.dir, \
- key, &key_##name##_ops);
+ key, &key_##name##_ops)
#define DEBUGFS_ADD_W(name) \
debugfs_create_file(#name, 0600, key->debugfs.dir, \
key, &key_##name##_ops);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index fe8a7a87e513..0ad3860852ff 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -574,9 +574,6 @@ static ssize_t ieee80211_if_parse_tsf(
IEEE80211_IF_FILE_RW(tsf);
-/* WDS attributes */
-IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
-
#ifdef CONFIG_MAC80211_MESH
IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC);
@@ -645,7 +642,7 @@ IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer,
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \
- sdata, &name##_ops);
+ sdata, &name##_ops)
#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
@@ -701,11 +698,6 @@ static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD_MODE(tsf, 0600);
}
-static void add_wds_files(struct ieee80211_sub_if_data *sdata)
-{
- DEBUGFS_ADD(peer);
-}
-
#ifdef CONFIG_MAC80211_MESH
static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
@@ -719,7 +711,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
struct dentry *dir = debugfs_create_dir("mesh_stats",
sdata->vif.debugfs_dir);
#define MESHSTATS_ADD(name)\
- debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
+ debugfs_create_file(#name, 0400, dir, sdata, &name##_ops)
MESHSTATS_ADD(fwded_mcast);
MESHSTATS_ADD(fwded_unicast);
@@ -736,7 +728,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
sdata->vif.debugfs_dir);
#define MESHPARAMS_ADD(name) \
- debugfs_create_file(#name, 0600, dir, sdata, &name##_ops);
+ debugfs_create_file(#name, 0600, dir, sdata, &name##_ops)
MESHPARAMS_ADD(dot11MeshMaxRetries);
MESHPARAMS_ADD(dot11MeshRetryTimeout);
@@ -805,9 +797,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
case NL80211_IFTYPE_AP_VLAN:
add_vlan_files(sdata);
break;
- case NL80211_IFTYPE_WDS:
- add_wds_files(sdata);
- break;
default:
break;
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 829dcad69c2c..eb4bb79d936a 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -274,7 +274,7 @@ static ssize_t sta_aql_read(struct file *file, char __user *userbuf,
"Q limit[low/high]: VO: %u/%u VI: %u/%u BE: %u/%u BK: %u/%u\n",
q_depth[0], q_depth[1], q_depth[2], q_depth[3],
q_limit_l[0], q_limit_h[0], q_limit_l[1], q_limit_h[1],
- q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]),
+ q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]);
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
@@ -985,7 +985,7 @@ STA_OPS(he_capa);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
- sta->debugfs_dir, sta, &sta_ ##name## _ops);
+ sta->debugfs_dir, sta, &sta_ ##name## _ops)
#define DEBUGFS_ADD_COUNTER(name, field) \
debugfs_create_ulong(#name, 0400, sta->debugfs_dir, &sta->field);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2a21226fb518..8bf9c0e974d6 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -311,11 +311,6 @@ struct ieee80211_if_ap {
bool multicast_to_unicast;
};
-struct ieee80211_if_wds {
- struct sta_info *sta;
- u8 remote_addr[ETH_ALEN];
-};
-
struct ieee80211_if_vlan {
struct list_head list; /* write-protected with RTNL and local->mtx */
@@ -457,7 +452,9 @@ struct ieee80211_if_managed {
unsigned long probe_timeout;
int probe_send_count;
bool nullfunc_failed;
- bool connection_loss;
+ u8 connection_loss:1,
+ driver_disconnect:1,
+ reconnect:1;
struct cfg80211_bss *associated;
struct ieee80211_mgd_auth_data *auth_data;
@@ -985,7 +982,6 @@ struct ieee80211_sub_if_data {
union {
struct ieee80211_if_ap ap;
- struct ieee80211_if_wds wds;
struct ieee80211_if_vlan vlan;
struct ieee80211_if_managed mgd;
struct ieee80211_if_ibss ibss;
@@ -1593,13 +1589,8 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
{
WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
status->flag & RX_FLAG_MACTIME_END);
- if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END))
- return true;
- /* can't handle non-legacy preamble yet */
- if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
- status->encoding == RX_ENC_LEGACY)
- return true;
- return false;
+ return !!(status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END |
+ RX_FLAG_MACTIME_PLCP_START));
}
void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata);
@@ -1795,7 +1786,7 @@ static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
/* tx handling */
void ieee80211_clear_tx_pending(struct ieee80211_local *local);
-void ieee80211_tx_pending(unsigned long data);
+void ieee80211_tx_pending(struct tasklet_struct *t);
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev);
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
@@ -2146,7 +2137,7 @@ void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats,
struct txq_info *txqi);
-void ieee80211_wake_txqs(unsigned long data);
+void ieee80211_wake_txqs(struct tasklet_struct *t);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
const u8 *extra, size_t extra_len, const u8 *bssid,
@@ -2286,7 +2277,6 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
enum ieee80211_chanctx_mode chanmode,
u8 radar_detect);
int ieee80211_max_num_channels(struct ieee80211_local *local);
-enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 44154cc596cd..3b9ec4ef81c3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -230,10 +230,6 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
type2 == NL80211_IFTYPE_MONITOR ||
type1 == NL80211_IFTYPE_P2P_DEVICE ||
type2 == NL80211_IFTYPE_P2P_DEVICE ||
- (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
- (type1 == NL80211_IFTYPE_WDS &&
- (type2 == NL80211_IFTYPE_WDS ||
- type2 == NL80211_IFTYPE_AP)) ||
(type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) ||
(type1 == NL80211_IFTYPE_AP_VLAN &&
(type2 == NL80211_IFTYPE_AP ||
@@ -417,15 +413,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
* (because if we remove a STA after ops->remove_interface()
* the driver will have removed the vif info already!)
*
- * In WDS mode a station must exist here and be flushed, for
- * AP_VLANs stations may exist since there's nothing else that
+ * For AP_VLANs stations may exist since there's nothing else that
* would have removed them, but in other modes there shouldn't
* be any stations.
*/
flushed = sta_info_flush(sdata);
- WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
- ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
- (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
+ WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && flushed > 0);
/* don't count this interface for allmulti while it is down */
if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
@@ -552,8 +545,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
* When we get here, the interface is marked down.
* Free the remaining keys, if there are any
* (which can happen in AP mode if userspace sets
- * keys before the interface is operating, and maybe
- * also in WDS mode)
+ * keys before the interface is operating)
*
* Force the key freeing to always synchronize_net()
* to wait for the RX path in case it is using this
@@ -1022,16 +1014,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct net_device *dev = wdev->netdev;
struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
u32 changed = 0;
int res;
u32 hw_reconf_flags = 0;
switch (sdata->vif.type) {
- case NL80211_IFTYPE_WDS:
- if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
- return -ENOLINK;
- break;
case NL80211_IFTYPE_AP_VLAN: {
struct ieee80211_sub_if_data *master;
@@ -1080,6 +1067,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
case NUM_NL80211_IFTYPES:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_WDS:
/* cannot happen */
WARN_ON(1);
break;
@@ -1198,7 +1186,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
case NL80211_IFTYPE_OCB:
netif_carrier_off(dev);
break;
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
break;
@@ -1220,28 +1207,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
set_bit(SDATA_STATE_RUNNING, &sdata->state);
switch (sdata->vif.type) {
- case NL80211_IFTYPE_WDS:
- /* Create STA entry for the WDS peer */
- sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
- GFP_KERNEL);
- if (!sta) {
- res = -ENOMEM;
- goto err_del_interface;
- }
-
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
-
- res = sta_info_insert(sta);
- if (res) {
- /* STA has been freed */
- goto err_del_interface;
- }
-
- rate_control_rate_init(sta);
- netif_carrier_on(dev);
- break;
case NL80211_IFTYPE_P2P_DEVICE:
rcu_assign_pointer(local->p2p_sdata, sdata);
break;
@@ -1358,6 +1323,7 @@ static void ieee80211_iface_work(struct work_struct *work)
while ((skb = skb_dequeue(&sdata->skb_queue))) {
struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ kcov_remote_start_common(skb_get_kcov_handle(skb));
if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK) {
int len = skb->len;
@@ -1467,6 +1433,7 @@ static void ieee80211_iface_work(struct work_struct *work)
}
kfree_skb(skb);
+ kcov_remote_stop();
}
/* then other type-dependent work */
@@ -1576,9 +1543,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->u.mntr.flags = MONITOR_FLAG_CONTROL |
MONITOR_FLAG_OTHER_BSS;
break;
- case NL80211_IFTYPE_WDS:
- sdata->vif.bss_conf.bssid = NULL;
- break;
case NL80211_IFTYPE_NAN:
idr_init(&sdata->u.nan.function_inst_ids);
spin_lock_init(&sdata->u.nan.func_lock);
@@ -1589,6 +1553,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.bssid = sdata->vif.addr;
break;
case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_WDS:
case NUM_NL80211_IFTYPES:
WARN_ON(1);
break;
@@ -1633,9 +1598,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_OCB:
/*
* Could probably support everything
- * but WDS here (WDS do_open can fail
- * under memory pressure, which this
- * code isn't prepared to handle).
+ * but here.
*/
break;
case NL80211_IFTYPE_P2P_CLIENT:
@@ -1728,7 +1691,6 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
case NL80211_IFTYPE_MONITOR:
/* doesn't matter */
break;
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_AP_VLAN:
/* match up with an AP interface */
list_for_each_entry(sdata, &local->interfaces, list) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 8c5f829ff6d7..a4817aa4b171 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -1300,3 +1300,52 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
return &key->conf;
}
EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add);
+
+void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf)
+{
+ struct ieee80211_key *key;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ key->u.aes_cmac.icverrors++;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ key->u.aes_gmac.icverrors++;
+ break;
+ default:
+ /* ignore the others for now, we don't keep counters now */
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ieee80211_key_mic_failure);
+
+void ieee80211_key_replay(struct ieee80211_key_conf *keyconf)
+{
+ struct ieee80211_key *key;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ key->u.ccmp.replays++;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ key->u.aes_cmac.replays++;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ key->u.aes_gmac.replays++;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key->u.gcmp.replays++;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ieee80211_key_replay);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 523380aed92e..dee88ec566ad 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -220,9 +220,9 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
BSS_CHANGED_ERP_SLOT;
}
-static void ieee80211_tasklet_handler(unsigned long data)
+static void ieee80211_tasklet_handler(struct tasklet_struct *t)
{
- struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct ieee80211_local *local = from_tasklet(local, t, tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -733,16 +733,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
skb_queue_head_init(&local->pending[i]);
atomic_set(&local->agg_queue_stop[i], 0);
}
- tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
- (unsigned long)local);
+ tasklet_setup(&local->tx_pending_tasklet, ieee80211_tx_pending);
if (ops->wake_tx_queue)
- tasklet_init(&local->wake_txqs_tasklet, ieee80211_wake_txqs,
- (unsigned long)local);
+ tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs);
- tasklet_init(&local->tasklet,
- ieee80211_tasklet_handler,
- (unsigned long) local);
+ tasklet_setup(&local->tasklet, ieee80211_tasklet_handler);
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
@@ -935,14 +931,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
return -EINVAL;
}
} else {
- /*
- * WDS is currently prohibited when channel contexts are used
- * because there's no clear definition of which channel WDS
- * type interfaces use
- */
- if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))
- return -EINVAL;
-
/* DFS is not supported with multi-channel combinations yet */
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index ce5825d6f1d1..97095b7c9c64 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -667,6 +667,35 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
}
}
+static void
+ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
+ u8 *ie, u8 ie_len)
+{
+ struct ieee80211_supported_band *sband;
+ const u8 *cap;
+ const struct ieee80211_he_operation *he_oper = NULL;
+
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return;
+
+ if (!ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT) ||
+ sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ return;
+
+ sdata->vif.bss_conf.he_support = true;
+
+ cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
+ if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3]))
+ he_oper = (void *)(cap + 3);
+
+ if (he_oper)
+ sdata->vif.bss_conf.he_oper.params =
+ __le32_to_cpu(he_oper->he_oper_params);
+}
+
/**
* ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
* @hdr: 802.11 frame header
@@ -943,6 +972,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
bcn->tail_len = skb->len;
memcpy(bcn->tail, skb->data, bcn->tail_len);
+ ieee80211_mesh_update_bss_params(sdata, bcn->tail, bcn->tail_len);
bcn->meshconf = (struct ieee80211_meshconf_ie *)
(bcn->tail + ifmsh->meshconf_offset);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 6adfcb9c06dc..0e4d950cf907 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1417,6 +1417,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
return;
}
+ if (sdata->vif.bss_conf.chandef.chan->band !=
+ csa_ie.chandef.chan->band) {
+ sdata_info(sdata,
+ "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+ ifmgd->associated->bssid,
+ csa_ie.chandef.chan->center_freq,
+ csa_ie.chandef.width, csa_ie.chandef.center_freq1,
+ csa_ie.chandef.center_freq2);
+ goto lock_and_drop_connection;
+ }
+
if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
@@ -1429,9 +1440,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
csa_ie.chandef.width, csa_ie.chandef.center_freq1,
csa_ie.chandef.freq1_offset,
csa_ie.chandef.center_freq2);
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- return;
+ goto lock_and_drop_connection;
}
if (cfg80211_chandef_identical(&csa_ie.chandef,
@@ -1493,6 +1502,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
sdata->csa_chandef = csa_ie.chandef;
sdata->csa_block_tx = csa_ie.mode;
ifmgd->csa_ignored_same_chan = false;
+ ifmgd->beacon_crc_valid = false;
if (sdata->csa_block_tx)
ieee80211_stop_vif_queues(local, sdata,
@@ -1500,7 +1510,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->mtx);
cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
- csa_ie.count);
+ csa_ie.count, csa_ie.mode);
if (local->ops->channel_switch) {
/* use driver's channel switch callback */
@@ -1516,6 +1526,9 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
TU_TO_EXP_TIME((csa_ie.count - 1) *
cbss->beacon_interval));
return;
+ lock_and_drop_connection:
+ mutex_lock(&local->mtx);
+ mutex_lock(&local->chanctx_mtx);
drop_connection:
/*
* This is just so that the disconnect flow will know that
@@ -1560,9 +1573,17 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata,
chan_increment = 1;
break;
case NL80211_BAND_5GHZ:
- case NL80211_BAND_6GHZ:
chan_increment = 4;
break;
+ case NL80211_BAND_6GHZ:
+ /*
+ * In the 6 GHz band, the "maximum transmit power level"
+ * field in the triplets is reserved, and thus will be
+ * zero and we shouldn't use it to control TX power.
+ * The actual TX power will be given in the transmit
+ * power envelope element instead.
+ */
+ return false;
}
/* find channel */
@@ -2382,6 +2403,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* clear bssid only after building the needed mgmt frames */
eth_zero_addr(ifmgd->bssid);
+ sdata->vif.bss_conf.ssid_len = 0;
+
/* remove AP and TDLS peers */
sta_info_flush(sdata);
@@ -2720,7 +2743,7 @@ EXPORT_SYMBOL(ieee80211_ap_probereq_get);
static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *buf, size_t len, bool tx,
- u16 reason)
+ u16 reason, bool reconnect)
{
struct ieee80211_event event = {
.type = MLME_EVENT,
@@ -2729,7 +2752,7 @@ static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata,
};
if (tx)
- cfg80211_tx_mlme_mgmt(sdata->dev, buf, len);
+ cfg80211_tx_mlme_mgmt(sdata->dev, buf, len, reconnect);
else
cfg80211_rx_mlme_mgmt(sdata->dev, buf, len);
@@ -2751,13 +2774,18 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
tx = !sdata->csa_block_tx;
- /* AP is probably out of range (or not reachable for another reason) so
- * remove the bss struct for that AP.
- */
- cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated);
+ if (!ifmgd->driver_disconnect) {
+ /*
+ * AP is probably out of range (or not reachable for another
+ * reason) so remove the bss struct for that AP.
+ */
+ cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated);
+ }
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+ ifmgd->driver_disconnect ?
+ WLAN_REASON_DEAUTH_LEAVING :
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
tx, frame_buf);
mutex_lock(&local->mtx);
sdata->vif.csa_active = false;
@@ -2770,7 +2798,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->mtx);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+ ifmgd->reconnect);
+ ifmgd->reconnect = false;
sdata_unlock(sdata);
}
@@ -2789,6 +2819,13 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
sdata_info(sdata, "Connection to AP %pM lost\n",
ifmgd->bssid);
__ieee80211_disconnect(sdata);
+ ifmgd->connection_loss = false;
+ } else if (ifmgd->driver_disconnect) {
+ sdata_info(sdata,
+ "Driver requested disconnection from AP %pM\n",
+ ifmgd->bssid);
+ __ieee80211_disconnect(sdata);
+ ifmgd->driver_disconnect = false;
} else {
ieee80211_mgd_probe_ap(sdata, true);
}
@@ -2827,6 +2864,21 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL(ieee80211_connection_loss);
+void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_hw *hw = &sdata->local->hw;
+
+ trace_api_disconnect(sdata, reconnect);
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
+ return;
+
+ sdata->u.mgd.driver_disconnect = true;
+ sdata->u.mgd.reconnect = reconnect;
+ ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
+}
+EXPORT_SYMBOL(ieee80211_disconnect);
static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
bool assoc)
@@ -3130,7 +3182,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false,
- reason_code);
+ reason_code, false);
return;
}
@@ -3179,7 +3231,8 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code);
+ ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code,
+ false);
}
static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
@@ -3199,8 +3252,8 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
*have_higher_than_11mbit = true;
/*
- * Skip HT, VHT and HE BSS membership selectors since they're
- * not rates.
+ * Skip HT, VHT, HE and SAE H2E only BSS membership selectors
+ * since they're not rates.
*
* Note: Even though the membership selector and the basic
* rate flag share the same bit, they are not exactly
@@ -3208,7 +3261,8 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
*/
if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) ||
supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) ||
- supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY))
+ supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) ||
+ supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E))
continue;
for (j = 0; j < sband->n_bitrates; j++) {
@@ -3494,14 +3548,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
- bss_conf->multi_sta_back_32bit =
- sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
- IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP;
-
- bss_conf->ack_enabled =
- sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
- IEEE80211_HE_MAC_CAP2_ACK_EN;
-
bss_conf->uora_exists = !!elems->uora_element;
if (elems->uora_element)
bss_conf->uora_ocw_range = elems->uora_element[0];
@@ -4199,7 +4245,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
true, deauth_buf);
ieee80211_report_disconnect(sdata, deauth_buf,
sizeof(deauth_buf), true,
- WLAN_REASON_DEAUTH_LEAVING);
+ WLAN_REASON_DEAUTH_LEAVING,
+ false);
return;
}
@@ -4344,7 +4391,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
tx, frame_buf);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
- reason);
+ reason, false);
}
static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
@@ -4716,7 +4763,8 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
if (ifmgd->auth_data)
ieee80211_destroy_auth_data(sdata, false);
cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- IEEE80211_DEAUTH_FRAME_LEN);
+ IEEE80211_DEAUTH_FRAME_LEN,
+ false);
}
/* This is a bit of a hack - we should find a better and more generic
@@ -5430,7 +5478,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
- WLAN_REASON_UNSPECIFIED);
+ WLAN_REASON_UNSPECIFIED,
+ false);
}
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
@@ -5471,6 +5520,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data;
const struct cfg80211_bss_ies *beacon_ies;
struct ieee80211_supported_band *sband;
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
const u8 *ssidie, *ht_ie, *vht_ie;
int i, err;
bool override = false;
@@ -5488,6 +5538,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]);
assoc_data->ssid_len = ssidie[1];
+ memcpy(bss_conf->ssid, assoc_data->ssid, assoc_data->ssid_len);
+ bss_conf->ssid_len = assoc_data->ssid_len;
rcu_read_unlock();
if (ifmgd->associated) {
@@ -5502,7 +5554,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
- WLAN_REASON_UNSPECIFIED);
+ WLAN_REASON_UNSPECIFIED,
+ false);
}
if (ifmgd->auth_data && !ifmgd->auth_data->done) {
@@ -5801,7 +5854,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_destroy_auth_data(sdata, false);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
- req->reason_code);
+ req->reason_code, false);
return 0;
}
@@ -5821,7 +5874,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_destroy_assoc_data(sdata, false, true);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
- req->reason_code);
+ req->reason_code, false);
return 0;
}
@@ -5836,7 +5889,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->reason_code, tx, frame_buf);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
- req->reason_code);
+ req->reason_code, false);
return 0;
}
@@ -5869,7 +5922,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
frame_buf);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
- req->reason_code);
+ req->reason_code, false);
return 0;
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 38c45e1dafd8..ae378a41c927 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -150,21 +150,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
case NL80211_IFTYPE_STATION:
ieee80211_mgd_quiesce(sdata);
break;
- case NL80211_IFTYPE_WDS:
- /* tear down aggregation sessions and remove STAs */
- mutex_lock(&local->sta_mtx);
- sta = sdata->u.wds.sta;
- if (sta && sta->uploaded) {
- enum ieee80211_sta_state state;
-
- state = sta->sta_state;
- for (; state > IEEE80211_STA_NOTEXIST; state--)
- WARN_ON(drv_sta_state(local, sta->sdata,
- sta, state,
- state - 1));
- }
- mutex_unlock(&local->sta_mtx);
- break;
default:
break;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1e2e5a406d58..13b9bcc4865d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -32,16 +32,6 @@
#include "wme.h"
#include "rate.h"
-static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
-{
- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += len;
- u64_stats_update_end(&tstats->syncp);
-}
-
/*
* monitor mode reception
*
@@ -842,7 +832,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (skb) {
skb->dev = sdata->dev;
- ieee80211_rx_stats(skb->dev, skb->len);
+ dev_sw_netstats_rx_add(skb->dev, skb->len);
netif_receive_skb(skb);
}
}
@@ -1477,7 +1467,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
if (unlikely((ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_pspoll(hdr->frame_control)) &&
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
- rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
/*
@@ -1758,7 +1747,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
sta->rx_stats.last_rx = jiffies;
} else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
- is_multicast_ether_addr(hdr->addr1)) {
+ !is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
@@ -2560,7 +2549,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
skb = rx->skb;
xmit_skb = NULL;
- ieee80211_rx_stats(dev, skb->len);
+ dev_sw_netstats_rx_add(dev, skb->len);
if (rx->sta) {
/* The seqno index has the same property as needed
@@ -3699,7 +3688,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
}
prev_dev = sdata->dev;
- ieee80211_rx_stats(sdata->dev, skb->len);
+ dev_sw_netstats_rx_add(sdata->dev, skb->len);
}
if (prev_dev) {
@@ -4080,10 +4069,6 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
return false;
return true;
- case NL80211_IFTYPE_WDS:
- if (bssid || !ieee80211_is_data(hdr->frame_control))
- return false;
- return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
case NL80211_IFTYPE_P2P_DEVICE:
return ieee80211_is_public_action(hdr, skb->len) ||
ieee80211_is_probe_req(hdr->frame_control) ||
@@ -4416,7 +4401,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
skb->dev = fast_rx->dev;
- ieee80211_rx_stats(fast_rx->dev, skb->len);
+ dev_sw_netstats_rx_add(fast_rx->dev, skb->len);
/* The seqno index has the same property as needed
* for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
@@ -4742,6 +4727,8 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
status->rx_flags = 0;
+ kcov_remote_start_common(skb_get_kcov_handle(skb));
+
/*
* Frames with failed FCS/PLCP checksum are not returned,
* all other frames are returned without radiotap header
@@ -4749,15 +4736,15 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
* Also, frames with less than 16 bytes are dropped.
*/
skb = ieee80211_rx_monitor(local, skb, rate);
- if (!skb)
- return;
-
- ieee80211_tpt_led_trig_rx(local,
- ((struct ieee80211_hdr *)skb->data)->frame_control,
- skb->len);
+ if (skb) {
+ ieee80211_tpt_led_trig_rx(local,
+ ((struct ieee80211_hdr *)skb->data)->frame_control,
+ skb->len);
- __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
+ __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
+ }
+ kcov_remote_stop();
return;
drop:
kfree_skb(skb);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 89723907a094..601322e16957 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2019 Intel Corporation
+* Copyright (C) 2018 - 2020 Intel Corporation
*/
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -2086,6 +2086,27 @@ TRACE_EVENT(api_connection_loss,
)
);
+TRACE_EVENT(api_disconnect,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, bool reconnect),
+
+ TP_ARGS(sdata, reconnect),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __field(int, reconnect)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ __entry->reconnect = reconnect;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " reconnect:%d",
+ VIF_PR_ARG, __entry->reconnect
+ )
+);
+
TRACE_EVENT(api_cqm_rssi_notify,
TP_PROTO(struct ieee80211_sub_if_data *sdata,
enum nl80211_cqm_rssi_threshold_event rssi_event,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 56a4d0d20a26..6422da6690f7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -38,16 +38,6 @@
/* misc utils */
-static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
-{
- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- tstats->tx_packets++;
- tstats->tx_bytes += len;
- u64_stats_update_end(&tstats->syncp);
-}
-
static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int group_addr,
int next_frag_len)
@@ -319,9 +309,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (tx->sdata->vif.type == NL80211_IFTYPE_OCB)
return TX_CONTINUE;
- if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
- return TX_CONTINUE;
-
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
return TX_CONTINUE;
@@ -2113,6 +2100,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
info->flags |= IEEE80211_TX_CTL_NO_ACK;
if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO)
info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
+ if (txflags & IEEE80211_RADIOTAP_F_TX_ORDER)
+ info->control.flags |=
+ IEEE80211_TX_CTRL_DONT_REORDER;
break;
case IEEE80211_RADIOTAP_RATE:
@@ -2279,11 +2269,13 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
payload[7]);
}
- /*
- * Initialize skb->priority for QoS frames. This is put in the TID field
- * of the frame before passing it to the driver.
+ /* Initialize skb->priority for QoS frames. If the DONT_REORDER flag
+ * is set, stick to the default value for skb->priority to assure
+ * frames injected with this flag are not reordered relative to each
+ * other.
*/
- if (ieee80211_is_data_qos(hdr->frame_control)) {
+ if (ieee80211_is_data_qos(hdr->frame_control) &&
+ !(info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
}
@@ -2295,8 +2287,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
* we handle as though they are non-injected frames.
* This code here isn't entirely correct, the local MAC address
* isn't always enough to find the interface to use; for proper
- * VLAN/WDS support we will need a different mechanism (which
- * likely isn't going to be monitor interfaces).
+ * VLAN support we have an nl80211-based mechanism.
*
* This is necessary, for example, for old hostapd versions that
* don't use nl80211-based management TX/RX.
@@ -2307,8 +2298,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (!ieee80211_sdata_running(tmp_sdata))
continue;
if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
+ tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
sdata = tmp_sdata;
@@ -2402,9 +2392,6 @@ int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
}
sta = sta_info_get_bss(sdata, skb->data);
break;
- case NL80211_IFTYPE_WDS:
- sta = sta_info_get(sdata, sdata->u.wds.remote_addr);
- break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
/* determined much later */
@@ -2580,20 +2567,6 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
hdrlen = 24;
band = chanctx_conf->def.chan->band;
break;
- case NL80211_IFTYPE_WDS:
- fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
- /* RA TA DA SA */
- memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
- memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
- memcpy(hdr.addr3, skb->data, ETH_ALEN);
- memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
- hdrlen = 30;
- /*
- * This is the exception! WDS style interfaces are prohibited
- * when channel contexts are in used so this must be valid
- */
- band = local->hw.conf.chandef.chan->band;
- break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
if (!is_multicast_ether_addr(skb->data)) {
@@ -3403,7 +3376,7 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
if (key)
info->control.hw_key = &key->conf;
- ieee80211_tx_stats(skb->dev, skb->len);
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
@@ -4021,7 +3994,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
goto out;
}
- ieee80211_tx_stats(dev, skb->len);
+ dev_sw_netstats_tx_add(dev, 1, skb->len);
ieee80211_xmit(sdata, sta, skb);
}
@@ -4248,7 +4221,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
- ieee80211_tx_stats(dev, skb->len);
+ dev_sw_netstats_tx_add(dev, 1, skb->len);
sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
@@ -4418,9 +4391,10 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
/*
* Transmit all pending packets. Called from tasklet.
*/
-void ieee80211_tx_pending(unsigned long data)
+void ieee80211_tx_pending(struct tasklet_struct *t)
{
- struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_local *local = from_tasklet(local, t,
+ tx_pending_tasklet);
unsigned long flags;
int i;
bool txok;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 94e624e9439b..8d3ae6b2f95f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -386,9 +386,10 @@ _ieee80211_wake_txqs(struct ieee80211_local *local, unsigned long *flags)
rcu_read_unlock();
}
-void ieee80211_wake_txqs(unsigned long data)
+void ieee80211_wake_txqs(struct tasklet_struct *t)
{
- struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_local *local = from_tasklet(local, t,
+ wake_txqs_tasklet);
unsigned long flags;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
@@ -2513,7 +2514,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
return res;
}
break;
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_P2P_DEVICE:
@@ -2523,6 +2523,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
case NUM_NL80211_IFTYPES:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_WDS:
WARN_ON(1);
break;
}
@@ -3665,6 +3666,7 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
u64 ts = status->mactime;
struct rate_info ri;
u16 rate;
+ u8 n_ltf;
if (WARN_ON(!ieee80211_have_rx_timestamp(status)))
return 0;
@@ -3675,11 +3677,58 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
/* Fill cfg80211 rate info */
switch (status->encoding) {
+ case RX_ENC_HE:
+ ri.flags |= RATE_INFO_FLAGS_HE_MCS;
+ ri.mcs = status->rate_idx;
+ ri.nss = status->nss;
+ ri.he_ru_alloc = status->he_ru;
+ if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
+ ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ /*
+ * See P802.11ax_D6.0, section 27.3.4 for
+ * VHT PPDU format.
+ */
+ if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
+ mpdu_offset += 2;
+ ts += 36;
+
+ /*
+ * TODO:
+ * For HE MU PPDU, add the HE-SIG-B.
+ * For HE ER PPDU, add 8us for the HE-SIG-A.
+ * For HE TB PPDU, add 4us for the HE-STF.
+ * Add the HE-LTF durations - variable.
+ */
+ }
+
+ break;
case RX_ENC_HT:
ri.mcs = status->rate_idx;
ri.flags |= RATE_INFO_FLAGS_MCS;
if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ /*
+ * See P802.11REVmd_D3.0, section 19.3.2 for
+ * HT PPDU format.
+ */
+ if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
+ mpdu_offset += 2;
+ if (status->enc_flags & RX_ENC_FLAG_HT_GF)
+ ts += 24;
+ else
+ ts += 32;
+
+ /*
+ * Add Data HT-LTFs per streams
+ * TODO: add Extension HT-LTFs, 4us per LTF
+ */
+ n_ltf = ((ri.mcs >> 3) & 3) + 1;
+ n_ltf = n_ltf == 3 ? 4 : n_ltf;
+ ts += n_ltf * 4;
+ }
+
break;
case RX_ENC_VHT:
ri.flags |= RATE_INFO_FLAGS_VHT_MCS;
@@ -3687,6 +3736,23 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.nss = status->nss;
if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ /*
+ * See P802.11REVmd_D3.0, section 21.3.2 for
+ * VHT PPDU format.
+ */
+ if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
+ mpdu_offset += 2;
+ ts += 36;
+
+ /*
+ * Add VHT-LTFs per streams
+ */
+ n_ltf = (ri.nss != 1) && (ri.nss % 2) ?
+ ri.nss + 1 : ri.nss;
+ ts += 4 * n_ltf;
+ }
+
break;
default:
WARN_ON(1);
@@ -3710,7 +3776,6 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
- /* TODO: handle HT/VHT preambles */
if (status->band == NL80211_BAND_5GHZ) {
ts += 20 << shift;
mpdu_offset += 2;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index fb0e3a657d2d..c3ca97373774 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -465,12 +465,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
* IEEE80211-2016 specification makes higher bandwidth operation
* possible on the TDLS link if the peers have wider bandwidth
* capability.
+ *
+ * However, in this case, and only if the TDLS peer is authorized,
+ * limit to the tdls_chandef so that the configuration here isn't
+ * wider than what's actually requested on the channel context.
*/
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
- test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
- return bw;
-
- bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+ test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) &&
+ test_sta_flag(sta, WLAN_STA_AUTHORIZED) &&
+ sta->tdls_chandef.chan)
+ bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width));
+ else
+ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
return bw;
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 2fb99325135a..9ea6004abe1b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -118,9 +118,11 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 *p;
- if (local->hw.queues < IEEE80211_NUM_ACS)
+ if ((info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER) ||
+ local->hw.queues < IEEE80211_NUM_ACS)
return 0;
if (!ieee80211_is_data(hdr->frame_control)) {
@@ -141,6 +143,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mac80211_qos_map *qos_map;
bool qos;
@@ -153,7 +156,7 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
else
qos = false;
- if (!qos) {
+ if (!qos || (info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
skb->priority = 0; /* required for correct WPA/11i MIC */
return IEEE80211_AC_BE;
}
@@ -202,9 +205,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP:
ra = skb->data;
break;
- case NL80211_IFTYPE_WDS:
- ra = sdata->u.wds.remote_addr;
- break;
case NL80211_IFTYPE_STATION:
/* might be a TDLS station */
sta = sta_info_get(sdata, skb->data);
@@ -249,6 +249,14 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
p = ieee80211_get_qos_ctl(hdr);
+ /* don't overwrite the QoS field of injected frames */
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ /* do take into account Ack policy of injected frames */
+ if (*p & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ return;
+ }
+
/* set up the first byte */
/*
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 06ea0f8bfd5c..520cedc594e1 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -20,9 +20,9 @@
#include "ieee802154_i.h"
#include "cfg.h"
-static void ieee802154_tasklet_handler(unsigned long data)
+static void ieee802154_tasklet_handler(struct tasklet_struct *t)
{
- struct ieee802154_local *local = (struct ieee802154_local *)data;
+ struct ieee802154_local *local = from_tasklet(local, t, tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue))) {
@@ -91,9 +91,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
INIT_LIST_HEAD(&local->interfaces);
mutex_init(&local->iflist_mtx);
- tasklet_init(&local->tasklet,
- ieee802154_tasklet_handler,
- (unsigned long)local);
+ tasklet_setup(&local->tasklet, ieee802154_tasklet_handler);
skb_queue_head_init(&local->skb_queue);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index f2868a8a50c3..47bab701555f 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -377,6 +377,8 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
if (!pskb_may_pull(skb, sizeof(*hdr)))
goto err;
+ skb_dst_drop(skb);
+
/* Read and decode the label */
hdr = mpls_hdr(skb);
dec = mpls_entry_decode(hdr);
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 54b888f94009..96ba616f59bf 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -18,6 +18,7 @@ struct mptcp_pernet {
struct ctl_table_header *ctl_table_hdr;
int mptcp_enabled;
+ unsigned int add_addr_timeout;
};
static struct mptcp_pernet *mptcp_get_pernet(struct net *net)
@@ -30,6 +31,11 @@ int mptcp_is_enabled(struct net *net)
return mptcp_get_pernet(net)->mptcp_enabled;
}
+unsigned int mptcp_get_add_addr_timeout(struct net *net)
+{
+ return mptcp_get_pernet(net)->add_addr_timeout;
+}
+
static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "enabled",
@@ -40,12 +46,19 @@ static struct ctl_table mptcp_sysctl_table[] = {
*/
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "add_addr_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{}
};
static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
+ pernet->add_addr_timeout = TCP_RTO_MAX;
}
static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
@@ -61,6 +74,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
}
table[0].data = &pernet->mptcp_enabled;
+ table[1].data = &pernet->add_addr_timeout;
hdr = register_net_sysctl(net, MPTCP_SYSCTL_PATH, table);
if (!hdr)
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index 5f390a97f556..b70ae4ba3000 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -140,7 +140,7 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
info->mptcpi_flags = flags;
info->mptcpi_token = READ_ONCE(msk->token);
info->mptcpi_write_seq = READ_ONCE(msk->write_seq);
- info->mptcpi_snd_una = atomic64_read(&msk->snd_una);
+ info->mptcpi_snd_una = READ_ONCE(msk->snd_una);
info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq);
unlock_sock_fast(sk, slow);
}
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 90cd52df99a6..c5328f407aab 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -242,7 +242,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->add_addr = 1;
mp_opt->addr_id = *ptr++;
- pr_debug("ADD_ADDR: id=%d, echo=%d", mp_opt->addr_id, mp_opt->echo);
if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4);
ptr += 4;
@@ -267,6 +266,9 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->ahmac = get_unaligned_be64(ptr);
ptr += 8;
}
+ pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
+ (mp_opt->family == MPTCP_ADDR_IPVERSION_6) ? "6" : "",
+ mp_opt->addr_id, mp_opt->ahmac, mp_opt->echo, mp_opt->port);
break;
case MPTCPOPT_RM_ADDR:
@@ -280,6 +282,16 @@ static void mptcp_parse_option(const struct sk_buff *skb,
pr_debug("RM_ADDR: id=%d", mp_opt->rm_id);
break;
+ case MPTCPOPT_MP_FASTCLOSE:
+ if (opsize != TCPOLEN_MPTCP_FASTCLOSE)
+ break;
+
+ ptr += 2;
+ mp_opt->rcvr_key = get_unaligned_be64(ptr);
+ ptr += 8;
+ mp_opt->fastclose = 1;
+ break;
+
default:
break;
}
@@ -297,6 +309,7 @@ void mptcp_get_options(const struct sk_buff *skb,
mp_opt->mp_join = 0;
mp_opt->add_addr = 0;
mp_opt->ahmac = 0;
+ mp_opt->fastclose = 0;
mp_opt->port = 0;
mp_opt->rm_addr = 0;
mp_opt->dss = 0;
@@ -492,7 +505,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
bool ret = false;
mpext = skb ? mptcp_get_ext(skb) : NULL;
- snd_data_fin_enable = READ_ONCE(msk->snd_data_fin_enable);
+ snd_data_fin_enable = mptcp_data_fin_enabled(msk);
if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
unsigned int map_size;
@@ -528,6 +541,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
opts->ext_copy.ack64 = 0;
}
opts->ext_copy.use_ack = 1;
+ WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
/* Add kind/length/subtype/flag overhead if mapping is not populated */
if (dss_size == 0)
@@ -573,27 +587,43 @@ static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
}
#endif
-static bool mptcp_established_options_add_addr(struct sock *sk,
+static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb,
unsigned int *size,
unsigned int remaining,
struct mptcp_out_options *opts)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ bool drop_other_suboptions = false;
+ unsigned int opt_size = *size;
struct mptcp_addr_info saddr;
bool echo;
+ bool port;
int len;
+ if ((mptcp_pm_should_add_signal_ipv6(msk) ||
+ mptcp_pm_should_add_signal_port(msk)) &&
+ skb && skb_is_tcp_pure_ack(skb)) {
+ pr_debug("drop other suboptions");
+ opts->suboptions = 0;
+ remaining += opt_size;
+ drop_other_suboptions = true;
+ }
+
if (!mptcp_pm_should_add_signal(msk) ||
- !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo)))
+ !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo, &port)))
return false;
- len = mptcp_add_addr_len(saddr.family, echo);
+ len = mptcp_add_addr_len(saddr.family, echo, port);
if (remaining < len)
return false;
*size = len;
+ if (drop_other_suboptions)
+ *size -= opt_size;
opts->addr_id = saddr.id;
+ if (port)
+ opts->port = ntohs(saddr.port);
if (saddr.family == AF_INET) {
opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
opts->addr = saddr.addr;
@@ -616,7 +646,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk,
}
}
#endif
- pr_debug("addr_id=%d, ahmac=%llu, echo=%d", opts->addr_id, opts->ahmac, echo);
+ pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
+ opts->addr_id, opts->ahmac, echo, opts->port);
return true;
}
@@ -678,7 +709,7 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
*size += opt_size;
remaining -= opt_size;
- if (mptcp_established_options_add_addr(sk, &opt_size, remaining, opts)) {
+ if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) {
*size += opt_size;
remaining -= opt_size;
ret = true;
@@ -759,6 +790,11 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
goto fully_established;
}
+ if (mp_opt->add_addr) {
+ WRITE_ONCE(msk->fully_established, true);
+ return true;
+ }
+
/* If the first established packet does not contain MP_CAPABLE + data
* then fallback to TCP. Fallback scenarios requires a reset for
* MP_JOIN subflows.
@@ -777,7 +813,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
mptcp_subflow_fully_established(subflow, mp_opt);
fully_established:
- if (likely(subflow->pm_notified))
+ /* if the subflow is not already linked into the conn_list, we can't
+ * notify the PM: this subflow is still on the listener queue
+ * and the PM possibly acquiring the subflow lock could race with
+ * the listener close
+ */
+ if (likely(subflow->pm_notified) || list_empty(&subflow->node))
return true;
subflow->pm_notified = 1;
@@ -809,31 +850,39 @@ static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
return cur_ack;
}
-static void update_una(struct mptcp_sock *msk,
- struct mptcp_options_received *mp_opt)
+static void ack_update_msk(struct mptcp_sock *msk,
+ struct sock *ssk,
+ struct mptcp_options_received *mp_opt)
{
- u64 new_snd_una, snd_una, old_snd_una = atomic64_read(&msk->snd_una);
- u64 write_seq = READ_ONCE(msk->write_seq);
+ u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
+ struct sock *sk = (struct sock *)msk;
+ u64 old_snd_una;
+
+ mptcp_data_lock(sk);
/* avoid ack expansion on update conflict, to reduce the risk of
* wrongly expanding to a future ack sequence number, which is way
* more dangerous than missing an ack
*/
+ old_snd_una = msk->snd_una;
new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
/* ACK for data not even sent yet? Ignore. */
- if (after64(new_snd_una, write_seq))
+ if (after64(new_snd_una, snd_nxt))
new_snd_una = old_snd_una;
- while (after64(new_snd_una, old_snd_una)) {
- snd_una = old_snd_una;
- old_snd_una = atomic64_cmpxchg(&msk->snd_una, snd_una,
- new_snd_una);
- if (old_snd_una == snd_una) {
- mptcp_data_acked((struct sock *)msk);
- break;
- }
+ new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
+
+ if (after64(new_wnd_end, msk->wnd_end)) {
+ msk->wnd_end = new_wnd_end;
+ __mptcp_wnd_updated(sk, ssk);
+ }
+
+ if (after64(new_snd_una, old_snd_una)) {
+ msk->snd_una = new_snd_una;
+ __mptcp_data_acked(sk);
}
+ mptcp_data_unlock(sk);
}
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
@@ -886,13 +935,30 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
struct mptcp_options_received mp_opt;
struct mptcp_ext *mpext;
- if (__mptcp_check_fallback(msk))
+ if (__mptcp_check_fallback(msk)) {
+ /* Keep it simple and unconditionally trigger send data cleanup and
+ * pending queue spooling. We will need to acquire the data lock
+ * for more accurate checks, and once the lock is acquired, such
+ * helpers are cheap.
+ */
+ mptcp_data_lock(subflow->conn);
+ if (mptcp_send_head(subflow->conn))
+ __mptcp_wnd_updated(subflow->conn, sk);
+ __mptcp_data_acked(subflow->conn);
+ mptcp_data_unlock(subflow->conn);
return;
+ }
mptcp_get_options(skb, &mp_opt);
if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
return;
+ if (mp_opt.fastclose &&
+ msk->local_key == mp_opt.rcvr_key) {
+ WRITE_ONCE(msk->rcv_fastclose, true);
+ mptcp_schedule_work((struct sock *)msk);
+ }
+
if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) {
struct mptcp_addr_info addr;
@@ -930,7 +996,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
* monodirectional flows will stuck
*/
if (mp_opt.use_ack)
- update_una(msk, &mp_opt);
+ ack_update_msk(msk, sk, &mp_opt);
/* Zero-data-length packets are dropped by the caller and not
* propagated to the MPTCP layer, so the skb extension does not
@@ -975,7 +1041,24 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
}
}
-void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts)
+static void mptcp_set_rwin(const struct tcp_sock *tp)
+{
+ const struct sock *ssk = (const struct sock *)tp;
+ const struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk;
+ u64 ack_seq;
+
+ subflow = mptcp_subflow_ctx(ssk);
+ msk = mptcp_sk(subflow->conn);
+
+ ack_seq = READ_ONCE(msk->ack_seq) + tp->rcv_wnd;
+
+ if (after64(ack_seq, READ_ONCE(msk->rcv_wnd_sent)))
+ WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
+}
+
+void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
+ struct mptcp_out_options *opts)
{
if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
@@ -1014,44 +1097,66 @@ void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts)
}
mp_capable_done:
- if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
- if (opts->ahmac)
- *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
- TCPOLEN_MPTCP_ADD_ADDR, 0,
- opts->addr_id);
- else
- *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
- TCPOLEN_MPTCP_ADD_ADDR_BASE,
- MPTCP_ADDR_ECHO,
- opts->addr_id);
- memcpy((u8 *)ptr, (u8 *)&opts->addr.s_addr, 4);
- ptr += 1;
+ if ((OPTION_MPTCP_ADD_ADDR
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ | OPTION_MPTCP_ADD_ADDR6
+#endif
+ ) & opts->suboptions) {
+ u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
+ u8 echo = MPTCP_ADDR_ECHO;
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions)
+ len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
+#endif
+
+ if (opts->port)
+ len += TCPOLEN_MPTCP_PORT_LEN;
+
if (opts->ahmac) {
- put_unaligned_be64(opts->ahmac, ptr);
- ptr += 2;
+ len += sizeof(opts->ahmac);
+ echo = 0;
}
- }
+ *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
+ len, echo, opts->addr_id);
+ if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
+ memcpy((u8 *)ptr, (u8 *)&opts->addr.s_addr, 4);
+ ptr += 1;
+ }
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) {
- if (opts->ahmac)
- *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
- TCPOLEN_MPTCP_ADD_ADDR6, 0,
- opts->addr_id);
- else
- *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
- TCPOLEN_MPTCP_ADD_ADDR6_BASE,
- MPTCP_ADDR_ECHO,
- opts->addr_id);
- memcpy((u8 *)ptr, opts->addr6.s6_addr, 16);
- ptr += 4;
- if (opts->ahmac) {
- put_unaligned_be64(opts->ahmac, ptr);
- ptr += 2;
+ else if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) {
+ memcpy((u8 *)ptr, opts->addr6.s6_addr, 16);
+ ptr += 4;
}
- }
#endif
+ if (!opts->port) {
+ if (opts->ahmac) {
+ put_unaligned_be64(opts->ahmac, ptr);
+ ptr += 2;
+ }
+ } else {
+ if (opts->ahmac) {
+ u8 *bptr = (u8 *)ptr;
+
+ put_unaligned_be16(opts->port, bptr);
+ bptr += 2;
+ put_unaligned_be64(opts->ahmac, bptr);
+ bptr += 8;
+ put_unaligned_be16(TCPOPT_NOP << 8 |
+ TCPOPT_NOP, bptr);
+
+ ptr += 3;
+ } else {
+ put_unaligned_be32(opts->port << 16 |
+ TCPOPT_NOP << 8 |
+ TCPOPT_NOP, ptr);
+ ptr += 1;
+ }
+ }
+ }
+
if (OPTION_MPTCP_RM_ADDR & opts->suboptions) {
*ptr++ = mptcp_option(MPTCPOPT_RM_ADDR,
TCPOLEN_MPTCP_RM_ADDR_BASE,
@@ -1132,4 +1237,7 @@ mp_capable_done:
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
}
}
+
+ if (tp)
+ mptcp_set_rwin(tp);
}
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index e19e1525ecbb..da2ed576f289 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -14,22 +14,43 @@
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
- bool echo)
+ bool echo, bool port)
{
+ u8 add_addr = READ_ONCE(msk->pm.addr_signal);
+
pr_debug("msk=%p, local_id=%d", msk, addr->id);
+ if (add_addr) {
+ pr_warn("addr_signal error, add_addr=%d", add_addr);
+ return -EINVAL;
+ }
+
msk->pm.local = *addr;
- WRITE_ONCE(msk->pm.add_addr_echo, echo);
- WRITE_ONCE(msk->pm.add_addr_signal, true);
+ add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
+ if (echo)
+ add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
+ if (addr->family == AF_INET6)
+ add_addr |= BIT(MPTCP_ADD_ADDR_IPV6);
+ if (port)
+ add_addr |= BIT(MPTCP_ADD_ADDR_PORT);
+ WRITE_ONCE(msk->pm.addr_signal, add_addr);
return 0;
}
int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
{
+ u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
+
pr_debug("msk=%p, local_id=%d", msk, local_id);
+ if (rm_addr) {
+ pr_warn("addr_signal error, rm_addr=%d", rm_addr);
+ return -EINVAL;
+ }
+
msk->pm.rm_id = local_id;
- WRITE_ONCE(msk->pm.rm_addr_signal, true);
+ rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
+ WRITE_ONCE(msk->pm.addr_signal, rm_addr);
return 0;
}
@@ -89,8 +110,7 @@ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
return false;
msk->pm.status |= BIT(new_status);
- if (schedule_work(&msk->work))
- sock_hold((struct sock *)msk);
+ mptcp_schedule_work((struct sock *)msk);
return true;
}
@@ -106,8 +126,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk)
spin_lock_bh(&pm->lock);
- if (READ_ONCE(pm->work_pending))
+ /* mptcp_pm_fully_established() can be invoked by multiple
+ * racing paths - accept() and check_fully_established()
+ * be sure to serve this event only once.
+ */
+ if (READ_ONCE(pm->work_pending) &&
+ !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
+ msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
spin_unlock_bh(&pm->lock);
}
@@ -150,14 +176,25 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
spin_lock_bh(&pm->lock);
- if (!READ_ONCE(pm->accept_addr))
- mptcp_pm_announce_addr(msk, addr, true);
- else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
+ if (!READ_ONCE(pm->accept_addr)) {
+ mptcp_pm_announce_addr(msk, addr, true, addr->port);
+ mptcp_pm_add_addr_send_ack(msk);
+ } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->remote = *addr;
+ }
spin_unlock_bh(&pm->lock);
}
+void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
+{
+ if (!mptcp_pm_should_add_signal_ipv6(msk) &&
+ !mptcp_pm_should_add_signal_port(msk))
+ return;
+
+ mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
+}
+
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
{
struct mptcp_pm_data *pm = &msk->pm;
@@ -173,7 +210,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
/* path manager helpers */
bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
- struct mptcp_addr_info *saddr, bool *echo)
+ struct mptcp_addr_info *saddr, bool *echo, bool *port)
{
int ret = false;
@@ -183,13 +220,14 @@ bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
if (!mptcp_pm_should_add_signal(msk))
goto out_unlock;
- *echo = READ_ONCE(msk->pm.add_addr_echo);
+ *echo = mptcp_pm_should_add_signal_echo(msk);
+ *port = mptcp_pm_should_add_signal_port(msk);
- if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo))
+ if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port))
goto out_unlock;
*saddr = msk->pm.local;
- WRITE_ONCE(msk->pm.add_addr_signal, false);
+ WRITE_ONCE(msk->pm.addr_signal, 0);
ret = true;
out_unlock:
@@ -212,7 +250,7 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
goto out_unlock;
*rm_id = msk->pm.rm_id;
- WRITE_ONCE(msk->pm.rm_addr_signal, false);
+ WRITE_ONCE(msk->pm.addr_signal, 0);
ret = true;
out_unlock:
@@ -233,11 +271,9 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
msk->pm.subflows = 0;
msk->pm.rm_id = 0;
WRITE_ONCE(msk->pm.work_pending, false);
- WRITE_ONCE(msk->pm.add_addr_signal, false);
- WRITE_ONCE(msk->pm.rm_addr_signal, false);
+ WRITE_ONCE(msk->pm.addr_signal, 0);
WRITE_ONCE(msk->pm.accept_addr, false);
WRITE_ONCE(msk->pm.accept_subflow, false);
- WRITE_ONCE(msk->pm.add_addr_echo, false);
msk->pm.status = 0;
spin_lock_init(&msk->pm.lock);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 0d6f3d912891..a6d983d80576 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -135,7 +135,7 @@ select_local_address(const struct pm_nl_pernet *pernet,
struct mptcp_pm_addr_entry *entry, *ret = NULL;
rcu_read_lock();
- spin_lock_bh(&msk->join_list_lock);
+ __mptcp_flush_join_list(msk);
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
if (!(entry->addr.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
continue;
@@ -144,13 +144,11 @@ select_local_address(const struct pm_nl_pernet *pernet,
* pending join
*/
if (entry->addr.family == ((struct sock *)msk)->sk_family &&
- !lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
- !lookup_subflow_by_saddr(&msk->join_list, &entry->addr)) {
+ !lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) {
ret = entry;
break;
}
}
- spin_unlock_bh(&msk->join_list_lock);
rcu_read_unlock();
return ret;
}
@@ -227,12 +225,14 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
if (!mptcp_pm_should_add_signal(msk)) {
pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
- mptcp_pm_announce_addr(msk, &entry->addr, false);
+ mptcp_pm_announce_addr(msk, &entry->addr, false, entry->addr.port);
+ mptcp_pm_add_addr_send_ack(msk);
entry->retrans_times++;
}
if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
- sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX);
+ sk_reset_timer(sk, timer,
+ jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
spin_unlock_bh(&msk->pm.lock);
@@ -264,6 +264,7 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *add_entry = NULL;
struct sock *sk = (struct sock *)msk;
+ struct net *net = sock_net(sk);
if (lookup_anno_list_by_saddr(msk, &entry->addr))
return false;
@@ -279,7 +280,8 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
add_entry->retrans_times = 0;
timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
- sk_reset_timer(sk, &add_entry->add_timer, jiffies + TCP_RTO_MAX);
+ sk_reset_timer(sk, &add_entry->add_timer,
+ jiffies + mptcp_get_add_addr_timeout(net));
return true;
}
@@ -309,7 +311,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
struct mptcp_pm_addr_entry *local;
struct pm_nl_pernet *pernet;
- pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+ pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
msk->pm.local_addr_used, msk->pm.local_addr_max,
@@ -324,7 +326,8 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
if (local) {
if (mptcp_pm_alloc_anno_list(msk, local)) {
msk->pm.add_addr_signaled++;
- mptcp_pm_announce_addr(msk, &local->addr, false);
+ mptcp_pm_announce_addr(msk, &local->addr, false, local->addr.port);
+ mptcp_pm_nl_add_addr_send_ack(msk);
}
} else {
/* pick failed, avoid fourther attempts later */
@@ -371,6 +374,7 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
struct sock *sk = (struct sock *)msk;
struct mptcp_addr_info remote;
struct mptcp_addr_info local;
+ bool use_port = false;
pr_debug("accepted %d:%d remote family %d",
msk->pm.add_addr_accepted, msk->pm.add_addr_accept_max,
@@ -387,14 +391,51 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
remote = msk->pm.remote;
if (!remote.port)
remote.port = sk->sk_dport;
+ else
+ use_port = true;
memset(&local, 0, sizeof(local));
local.family = remote.family;
spin_unlock_bh(&msk->pm.lock);
- __mptcp_subflow_connect((struct sock *)msk, &local, &remote);
+ __mptcp_subflow_connect(sk, &local, &remote);
spin_lock_bh(&msk->pm.lock);
- mptcp_pm_announce_addr(msk, &remote, true);
+ mptcp_pm_announce_addr(msk, &remote, true, use_port);
+ mptcp_pm_nl_add_addr_send_ack(msk);
+}
+
+void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ if (!mptcp_pm_should_add_signal_ipv6(msk) &&
+ !mptcp_pm_should_add_signal_port(msk))
+ return;
+
+ __mptcp_flush_join_list(msk);
+ subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+ if (subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ u8 add_addr;
+
+ spin_unlock_bh(&msk->pm.lock);
+ if (mptcp_pm_should_add_signal_ipv6(msk))
+ pr_debug("send ack for add_addr6");
+ if (mptcp_pm_should_add_signal_port(msk))
+ pr_debug("send ack for add_addr_port");
+
+ lock_sock(ssk);
+ tcp_send_ack(ssk);
+ release_sock(ssk);
+ spin_lock_bh(&msk->pm.lock);
+
+ add_addr = READ_ONCE(msk->pm.addr_signal);
+ if (mptcp_pm_should_add_signal_ipv6(msk))
+ add_addr &= ~BIT(MPTCP_ADD_ADDR_IPV6);
+ if (mptcp_pm_should_add_signal_port(msk))
+ add_addr &= ~BIT(MPTCP_ADD_ADDR_PORT);
+ WRITE_ONCE(msk->pm.addr_signal, add_addr);
+ }
}
void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
@@ -413,14 +454,13 @@ void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
- long timeout = 0;
if (msk->pm.rm_id != subflow->remote_id)
continue;
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
- __mptcp_close_ssk(sk, ssk, subflow, timeout);
+ __mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock);
msk->pm.add_addr_accepted--;
@@ -449,14 +489,13 @@ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id)
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
- long timeout = 0;
if (rm_id != subflow->local_id)
continue;
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
- __mptcp_close_ssk(sk, ssk, subflow, timeout);
+ __mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock);
msk->pm.local_addr_used--;
@@ -826,13 +865,14 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
return ret;
}
-static void __flush_addrs(struct pm_nl_pernet *pernet)
+static void __flush_addrs(struct net *net, struct list_head *list)
{
- while (!list_empty(&pernet->local_addr_list)) {
+ while (!list_empty(list)) {
struct mptcp_pm_addr_entry *cur;
- cur = list_entry(pernet->local_addr_list.next,
+ cur = list_entry(list->next,
struct mptcp_pm_addr_entry, list);
+ mptcp_nl_remove_subflow_and_signal_addr(net, &cur->addr);
list_del_rcu(&cur->list);
kfree_rcu(cur, rcu);
}
@@ -849,11 +889,13 @@ static void __reset_counters(struct pm_nl_pernet *pernet)
static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
{
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
+ LIST_HEAD(free_list);
spin_lock_bh(&pernet->lock);
- __flush_addrs(pernet);
+ list_splice_init(&pernet->local_addr_list, &free_list);
__reset_counters(pernet);
spin_unlock_bh(&pernet->lock);
+ __flush_addrs(sock_net(skb->sk), &free_list);
return 0;
}
@@ -1115,10 +1157,12 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
struct net *net;
list_for_each_entry(net, net_list, exit_list) {
+ struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
+
/* net is removed from namespace list, can't race with
* other modifiers
*/
- __flush_addrs(net_generic(net, pm_nl_pernet_id));
+ __flush_addrs(net, &pernet->local_addr_list);
}
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 88f2a7a0ccb8..b812aaae8044 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -21,6 +21,7 @@
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
+#include <net/xfrm.h>
#include "protocol.h"
#include "mib.h"
@@ -41,6 +42,9 @@ struct mptcp_skb_cb {
static struct percpu_counter mptcp_sockets_allocated;
+static void __mptcp_destroy_sock(struct sock *sk);
+static void __mptcp_check_send_data_fin(struct sock *sk);
+
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
* completed yet or has failed, return the subflow socket.
* Otherwise return NULL.
@@ -53,6 +57,12 @@ static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
return msk->subflow;
}
+/* Returns end sequence number of the receiver's advertised window */
+static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->wnd_end);
+}
+
static bool mptcp_is_tcpsk(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
@@ -102,6 +112,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
msk->subflow = ssock;
subflow = mptcp_subflow_ctx(ssock->sk);
list_add(&subflow->node, &msk->conn_list);
+ sock_hold(ssock->sk);
subflow->request_mptcp = 1;
/* accept() will wait on first subflow sk_wq, and we always wakes up
@@ -157,18 +168,19 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
struct rb_node **p, *parent;
u64 seq, end_seq, max_seq;
struct sk_buff *skb1;
- int space;
seq = MPTCP_SKB_CB(skb)->map_seq;
end_seq = MPTCP_SKB_CB(skb)->end_seq;
- space = tcp_space(sk);
- max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq;
+ max_seq = READ_ONCE(msk->rcv_wnd_sent);
pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
RB_EMPTY_ROOT(&msk->out_of_order_queue));
- if (after64(seq, max_seq)) {
+ if (after64(end_seq, max_seq)) {
/* out of window */
mptcp_drop(sk, skb);
+ pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
+ (unsigned long long)end_seq - (unsigned long)max_seq,
+ (unsigned long long)msk->rcv_wnd_sent);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
return;
}
@@ -323,17 +335,35 @@ static void mptcp_stop_timer(struct sock *sk)
mptcp_sk(sk)->timer_ival = 0;
}
-static void mptcp_check_data_fin_ack(struct sock *sk)
+static void mptcp_close_wake_up(struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_DEAD))
+ return;
+
+ sk->sk_state_change(sk);
+ if (sk->sk_shutdown == SHUTDOWN_MASK ||
+ sk->sk_state == TCP_CLOSE)
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
+ else
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+}
+
+static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (__mptcp_check_fallback(msk))
- return;
+ return !__mptcp_check_fallback(msk) &&
+ ((1 << sk->sk_state) &
+ (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
+ msk->write_seq == READ_ONCE(msk->snd_una);
+}
+
+static void mptcp_check_data_fin_ack(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
/* Look for an acknowledged DATA_FIN */
- if (((1 << sk->sk_state) &
- (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
- msk->write_seq == atomic64_read(&msk->snd_una)) {
+ if (mptcp_pending_data_fin_ack(sk)) {
mptcp_stop_timer(sk);
WRITE_ONCE(msk->snd_data_fin_enable, 0);
@@ -341,20 +371,14 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
switch (sk->sk_state) {
case TCP_FIN_WAIT1:
inet_sk_state_store(sk, TCP_FIN_WAIT2);
- sk->sk_state_change(sk);
break;
case TCP_CLOSING:
case TCP_LAST_ACK:
inet_sk_state_store(sk, TCP_CLOSE);
- sk->sk_state_change(sk);
break;
}
- if (sk->sk_shutdown == SHUTDOWN_MASK ||
- sk->sk_state == TCP_CLOSE)
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
- else
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ mptcp_close_wake_up(sk);
}
}
@@ -388,13 +412,79 @@ static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
}
-static void mptcp_check_data_fin(struct sock *sk)
+static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
+{
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ /* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
+ if (subflow->request_join && !subflow->fully_established)
+ return false;
+
+ /* only send if our side has not closed yet */
+ return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
+}
+
+static bool tcp_can_send_ack(const struct sock *ssk)
+{
+ return !((1 << inet_sk_state_load(ssk)) &
+ (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE));
+}
+
+static void mptcp_send_ack(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(ssk);
+ if (tcp_can_send_ack(ssk))
+ tcp_send_ack(ssk);
+ release_sock(ssk);
+ }
+}
+
+static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
+{
+ int ret;
+
+ lock_sock(ssk);
+ ret = tcp_can_send_ack(ssk);
+ if (ret)
+ tcp_cleanup_rbuf(ssk, 1);
+ release_sock(ssk);
+ return ret;
+}
+
+static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
+{
+ struct sock *ack_hint = READ_ONCE(msk->ack_hint);
+ struct mptcp_subflow_context *subflow;
+
+ /* if the hinted ssk is still active, try to use it */
+ if (likely(ack_hint)) {
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (ack_hint == ssk && mptcp_subflow_cleanup_rbuf(ssk))
+ return;
+ }
+ }
+
+ /* otherwise pick the first active subflow */
+ mptcp_for_each_subflow(msk, subflow)
+ if (mptcp_subflow_cleanup_rbuf(mptcp_subflow_tcp_sock(subflow)))
+ return;
+}
+
+static bool mptcp_check_data_fin(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
u64 rcv_data_fin_seq;
+ bool ret = false;
if (__mptcp_check_fallback(msk) || !msk->first)
- return;
+ return ret;
/* Need to ack a DATA_FIN received from a peer while this side
* of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
@@ -410,8 +500,6 @@ static void mptcp_check_data_fin(struct sock *sk)
*/
if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
- struct mptcp_subflow_context *subflow;
-
WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
WRITE_ONCE(msk->rcv_data_fin, 0);
@@ -428,7 +516,6 @@ static void mptcp_check_data_fin(struct sock *sk)
break;
case TCP_FIN_WAIT2:
inet_sk_state_store(sk, TCP_CLOSE);
- // @@ Close subflows now?
break;
default:
/* Other states not expected */
@@ -436,23 +523,12 @@ static void mptcp_check_data_fin(struct sock *sk)
break;
}
+ ret = true;
mptcp_set_timeout(sk, NULL);
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
- lock_sock(ssk);
- tcp_send_ack(ssk);
- release_sock(ssk);
- }
-
- sk->sk_state_change(sk);
-
- if (sk->sk_shutdown == SHUTDOWN_MASK ||
- sk->sk_state == TCP_CLOSE)
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
- else
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ mptcp_send_ack(msk);
+ mptcp_close_wake_up(sk);
}
+ return ret;
}
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
@@ -464,12 +540,22 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
unsigned int moved = 0;
bool more_data_avail;
struct tcp_sock *tp;
- u32 old_copied_seq;
bool done = false;
+ int sk_rbuf;
+
+ sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+
+ if (unlikely(ssk_rbuf > sk_rbuf)) {
+ WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
+ sk_rbuf = ssk_rbuf;
+ }
+ }
pr_debug("msk=%p ssk=%p", msk, ssk);
tp = tcp_sk(ssk);
- old_copied_seq = tp->copied_seq;
do {
u32 map_remaining, offset;
u32 seq = tp->copied_seq;
@@ -528,20 +614,18 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
WRITE_ONCE(tp->copied_seq, seq);
more_data_avail = mptcp_subflow_data_available(ssk);
- if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
+ if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
done = true;
break;
}
} while (more_data_avail);
+ WRITE_ONCE(msk->ack_hint, ssk);
*bytes += moved;
- if (tp->copied_seq != old_copied_seq)
- tcp_cleanup_rbuf(ssk, 1);
-
return done;
}
-static bool mptcp_ofo_queue(struct mptcp_sock *msk)
+static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
struct sk_buff *skb, *tail;
@@ -587,43 +671,43 @@ static bool mptcp_ofo_queue(struct mptcp_sock *msk)
/* In most cases we will be able to lock the mptcp socket. If its already
* owned, we need to defer to the work queue to avoid ABBA deadlock.
*/
-static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{
struct sock *sk = (struct sock *)msk;
unsigned int moved = 0;
- if (READ_ONCE(sk->sk_lock.owned))
- return false;
-
- if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
- return false;
-
- /* must re-check after taking the lock */
- if (!READ_ONCE(sk->sk_lock.owned)) {
- __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
- mptcp_ofo_queue(msk);
+ if (inet_sk_state_load(sk) == TCP_CLOSE)
+ return;
- /* If the moves have caught up with the DATA_FIN sequence number
- * it's time to ack the DATA_FIN and change socket state, but
- * this is not a good place to change state. Let the workqueue
- * do it.
- */
- if (mptcp_pending_data_fin(sk, NULL) &&
- schedule_work(&msk->work))
- sock_hold(sk);
- }
+ mptcp_data_lock(sk);
- spin_unlock_bh(&sk->sk_lock.slock);
+ __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ __mptcp_ofo_queue(msk);
- return moved > 0;
+ /* If the moves have caught up with the DATA_FIN sequence number
+ * it's time to ack the DATA_FIN and change socket state, but
+ * this is not a good place to change state. Let the workqueue
+ * do it.
+ */
+ if (mptcp_pending_data_fin(sk, NULL))
+ mptcp_schedule_work(sk);
+ mptcp_data_unlock(sk);
}
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
+ int sk_rbuf, ssk_rbuf;
bool wake;
+ /* The peer can send data while we are shutting down this
+ * subflow at msk destruction time, but we must avoid enqueuing
+ * more data to the msk receive queue
+ */
+ if (unlikely(subflow->disposable))
+ return;
+
/* move_skbs_to_msk below can legitly clear the data_avail flag,
* but we will need later to properly woke the reader, cache its
* value
@@ -632,30 +716,23 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
if (wake)
set_bit(MPTCP_DATA_READY, &msk->flags);
- if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
- move_skbs_to_msk(msk, ssk))
- goto wake;
+ ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+ sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+ if (unlikely(ssk_rbuf > sk_rbuf))
+ sk_rbuf = ssk_rbuf;
- /* don't schedule if mptcp sk is (still) over limit */
- if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
+ /* over limit? can't append more skbs to msk */
+ if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
goto wake;
- /* mptcp socket is owned, release_cb should retry */
- if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
- &sk->sk_tsq_flags)) {
- sock_hold(sk);
+ move_skbs_to_msk(msk, ssk);
- /* need to try again, its possible release_cb() has already
- * been called after the test_and_set_bit() above.
- */
- move_skbs_to_msk(msk, ssk);
- }
wake:
if (wake)
sk->sk_data_ready(sk);
}
-static void __mptcp_flush_join_list(struct mptcp_sock *msk)
+void __mptcp_flush_join_list(struct mptcp_sock *msk)
{
if (likely(list_empty(&msk->join_list)))
return;
@@ -675,6 +752,10 @@ static void mptcp_reset_timer(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned long tout;
+ /* prevent rescheduling on close */
+ if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
+ return;
+
/* should never be called with mptcp level timer cleared */
tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
if (WARN_ON_ONCE(!tout))
@@ -682,23 +763,23 @@ static void mptcp_reset_timer(struct sock *sk)
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
}
-void mptcp_data_acked(struct sock *sk)
+bool mptcp_schedule_work(struct sock *sk)
{
- mptcp_reset_timer(sk);
-
- if ((!test_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags) ||
- (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
- schedule_work(&mptcp_sk(sk)->work))
+ if (inet_sk_state_load(sk) != TCP_CLOSE &&
+ schedule_work(&mptcp_sk(sk)->work)) {
+ /* each subflow already holds a reference to the sk, and the
+ * workqueue is invoked by a subflow, so sk can't go away here.
+ */
sock_hold(sk);
+ return true;
+ }
+ return false;
}
void mptcp_subflow_eof(struct sock *sk)
{
- struct mptcp_sock *msk = mptcp_sk(sk);
-
- if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
- schedule_work(&msk->work))
- sock_hold(sk);
+ if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags))
+ mptcp_schedule_work(sk);
}
static void mptcp_check_for_eof(struct mptcp_sock *msk)
@@ -709,8 +790,10 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
mptcp_for_each_subflow(msk, subflow)
receivers += !subflow->rx_eof;
+ if (receivers)
+ return;
- if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
+ if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
/* hopefully temporary hack: propagate shutdown status
* to msk, when all subflows agree on it
*/
@@ -720,16 +803,21 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
set_bit(MPTCP_DATA_READY, &msk->flags);
sk->sk_data_ready(sk);
}
-}
-
-static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
-{
- const struct sock *sk = (const struct sock *)msk;
- if (!msk->cached_ext)
- msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
-
- return !!msk->cached_ext;
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+ inet_sk_state_store(sk, TCP_CLOSE_WAIT);
+ break;
+ case TCP_FIN_WAIT1:
+ inet_sk_state_store(sk, TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
+ inet_sk_state_store(sk, TCP_CLOSE);
+ break;
+ default:
+ return;
+ }
+ mptcp_close_wake_up(sk);
}
static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
@@ -754,8 +842,11 @@ static bool mptcp_skb_can_collapse_to(u64 write_seq,
if (!tcp_skb_can_collapse_to(skb))
return false;
- /* can collapse only if MPTCP level sequence is in order */
- return mpext && mpext->data_seq + mpext->data_len == write_seq;
+ /* can collapse only if MPTCP level sequence is in order and this
+ * mapping has not been xmitted yet
+ */
+ return mpext && mpext->data_seq + mpext->data_len == write_seq &&
+ !mpext->frozen;
}
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
@@ -763,9 +854,125 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
const struct mptcp_data_frag *df)
{
return df && pfrag->page == df->page &&
+ pfrag->size - pfrag->offset > 0 &&
df->data_seq + df->data_len == msk->write_seq;
}
+static int mptcp_wmem_with_overhead(struct sock *sk, int size)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ int ret, skbs;
+
+ ret = size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
+ skbs = (msk->tx_pending_data + size) / msk->size_goal_cache;
+ if (skbs < msk->skb_tx_cache.qlen)
+ return ret;
+
+ return ret + (skbs - msk->skb_tx_cache.qlen) * SKB_TRUESIZE(MAX_TCP_HEADER);
+}
+
+static void __mptcp_wmem_reserve(struct sock *sk, int size)
+{
+ int amount = mptcp_wmem_with_overhead(sk, size);
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ WARN_ON_ONCE(msk->wmem_reserved);
+ if (amount <= sk->sk_forward_alloc)
+ goto reserve;
+
+ /* under memory pressure try to reserve at most a single page
+ * otherwise try to reserve the full estimate and fallback
+ * to a single page before entering the error path
+ */
+ if ((tcp_under_memory_pressure(sk) && amount > PAGE_SIZE) ||
+ !sk_wmem_schedule(sk, amount)) {
+ if (amount <= PAGE_SIZE)
+ goto nomem;
+
+ amount = PAGE_SIZE;
+ if (!sk_wmem_schedule(sk, amount))
+ goto nomem;
+ }
+
+reserve:
+ msk->wmem_reserved = amount;
+ sk->sk_forward_alloc -= amount;
+ return;
+
+nomem:
+ /* we will wait for memory on next allocation */
+ msk->wmem_reserved = -1;
+}
+
+static void __mptcp_update_wmem(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (!msk->wmem_reserved)
+ return;
+
+ if (msk->wmem_reserved < 0)
+ msk->wmem_reserved = 0;
+ if (msk->wmem_reserved > 0) {
+ sk->sk_forward_alloc += msk->wmem_reserved;
+ msk->wmem_reserved = 0;
+ }
+}
+
+static bool mptcp_wmem_alloc(struct sock *sk, int size)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ /* check for pre-existing error condition */
+ if (msk->wmem_reserved < 0)
+ return false;
+
+ if (msk->wmem_reserved >= size)
+ goto account;
+
+ mptcp_data_lock(sk);
+ if (!sk_wmem_schedule(sk, size)) {
+ mptcp_data_unlock(sk);
+ return false;
+ }
+
+ sk->sk_forward_alloc -= size;
+ msk->wmem_reserved += size;
+ mptcp_data_unlock(sk);
+
+account:
+ msk->wmem_reserved -= size;
+ return true;
+}
+
+static void mptcp_wmem_uncharge(struct sock *sk, int size)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (msk->wmem_reserved < 0)
+ msk->wmem_reserved = 0;
+ msk->wmem_reserved += size;
+}
+
+static void mptcp_mem_reclaim_partial(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ /* if we are experiencing a transint allocation error,
+ * the forward allocation memory has been already
+ * released
+ */
+ if (msk->wmem_reserved < 0)
+ return;
+
+ mptcp_data_lock(sk);
+ sk->sk_forward_alloc += msk->wmem_reserved;
+ sk_mem_reclaim_partial(sk);
+ msk->wmem_reserved = sk->sk_forward_alloc;
+ sk->sk_forward_alloc = 0;
+ mptcp_data_unlock(sk);
+}
+
static void dfrag_uncharge(struct sock *sk, int len)
{
sk_mem_uncharge(sk, len);
@@ -781,21 +988,7 @@ static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
put_page(dfrag->page);
}
-static bool mptcp_is_writeable(struct mptcp_sock *msk)
-{
- struct mptcp_subflow_context *subflow;
-
- if (!sk_stream_is_writeable((struct sock *)msk))
- return false;
-
- mptcp_for_each_subflow(msk, subflow) {
- if (sk_stream_is_writeable(subflow->tcp_sock))
- return true;
- }
- return false;
-}
-
-static void mptcp_clean_una(struct sock *sk)
+static void __mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_data_frag *dtmp, *dfrag;
@@ -806,13 +999,15 @@ static void mptcp_clean_una(struct sock *sk)
* plain TCP
*/
if (__mptcp_check_fallback(msk))
- atomic64_set(&msk->snd_una, msk->write_seq);
- snd_una = atomic64_read(&msk->snd_una);
+ msk->snd_una = READ_ONCE(msk->snd_nxt);
+ snd_una = msk->snd_una;
list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
break;
+ if (WARN_ON_ONCE(dfrag == msk->first_pending))
+ break;
dfrag_clear(sk, dfrag);
cleaned = true;
}
@@ -821,12 +1016,13 @@ static void mptcp_clean_una(struct sock *sk)
if (dfrag && after64(snd_una, dfrag->data_seq)) {
u64 delta = snd_una - dfrag->data_seq;
- if (WARN_ON_ONCE(delta > dfrag->data_len))
+ if (WARN_ON_ONCE(delta > dfrag->already_sent))
goto out;
dfrag->data_seq += delta;
dfrag->offset += delta;
dfrag->data_len -= delta;
+ dfrag->already_sent -= delta;
dfrag_uncharge(sk, delta);
cleaned = true;
@@ -834,19 +1030,42 @@ static void mptcp_clean_una(struct sock *sk)
out:
if (cleaned) {
- sk_mem_reclaim_partial(sk);
-
- /* Only wake up writers if a subflow is ready */
- if (mptcp_is_writeable(msk)) {
- set_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags);
- smp_mb__after_atomic();
+ if (tcp_under_memory_pressure(sk)) {
+ __mptcp_update_wmem(sk);
+ sk_mem_reclaim_partial(sk);
+ }
- /* set SEND_SPACE before sk_stream_write_space clears
- * NOSPACE
- */
- sk_stream_write_space(sk);
+ if (sk_stream_is_writeable(sk)) {
+ /* pairs with memory barrier in mptcp_poll */
+ smp_mb();
+ if (test_and_clear_bit(MPTCP_NOSPACE, &msk->flags))
+ sk_stream_write_space(sk);
}
}
+
+ if (snd_una == READ_ONCE(msk->snd_nxt)) {
+ if (msk->timer_ival)
+ mptcp_stop_timer(sk);
+ } else {
+ mptcp_reset_timer(sk);
+ }
+}
+
+static void mptcp_enter_memory_pressure(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ bool first = true;
+
+ sk_stream_moderate_sndbuf(sk);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (first)
+ tcp_enter_memory_pressure(ssk);
+ sk_stream_moderate_sndbuf(ssk);
+ first = false;
+ }
}
/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
@@ -858,8 +1077,7 @@ static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
pfrag, sk->sk_allocation)))
return true;
- sk->sk_prot->enter_memory_pressure(sk);
- sk_stream_moderate_sndbuf(sk);
+ mptcp_enter_memory_pressure(sk);
return false;
}
@@ -875,149 +1093,241 @@ mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
dfrag->data_seq = msk->write_seq;
dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
dfrag->offset = offset + sizeof(struct mptcp_data_frag);
+ dfrag->already_sent = 0;
dfrag->page = pfrag->page;
return dfrag;
}
-static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
- struct msghdr *msg, struct mptcp_data_frag *dfrag,
- long *timeo, int *pmss_now,
- int *ps_goal)
+struct mptcp_sendmsg_info {
+ int mss_now;
+ int size_goal;
+ u16 limit;
+ u16 sent;
+ unsigned int flags;
+};
+
+static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
+ int avail_size)
+{
+ u64 window_end = mptcp_wnd_end(msk);
+
+ if (__mptcp_check_fallback(msk))
+ return avail_size;
+
+ if (!before64(data_seq + avail_size, window_end)) {
+ u64 allowed_size = window_end - data_seq;
+
+ return min_t(unsigned int, allowed_size, avail_size);
+ }
+
+ return avail_size;
+}
+
+static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
+{
+ struct skb_ext *mpext = __skb_ext_alloc(gfp);
+
+ if (!mpext)
+ return false;
+ __skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
+ return true;
+}
+
+static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
+ if (likely(skb)) {
+ if (likely(__mptcp_add_ext(skb, gfp))) {
+ skb_reserve(skb, MAX_TCP_HEADER);
+ skb->reserved_tailroom = skb->end - skb->tail;
+ return skb;
+ }
+ __kfree_skb(skb);
+ } else {
+ mptcp_enter_memory_pressure(sk);
+ }
+ return NULL;
+}
+
+static bool mptcp_tx_cache_refill(struct sock *sk, int size,
+ struct sk_buff_head *skbs, int *total_ts)
{
- int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
- bool dfrag_collapsed, can_collapse = false;
struct mptcp_sock *msk = mptcp_sk(sk);
- struct mptcp_ext *mpext = NULL;
- bool retransmission = !!dfrag;
- struct sk_buff *skb, *tail;
- struct page_frag *pfrag;
- struct page *page;
- u64 *write_seq;
- size_t psize;
-
- /* use the mptcp page cache so that we can easily move the data
- * from one substream to another, but do per subflow memory accounting
- * Note: pfrag is used only !retransmission, but the compiler if
- * fooled into a warning if we don't init here
- */
- pfrag = sk_page_frag(sk);
- if (!retransmission) {
- write_seq = &msk->write_seq;
- page = pfrag->page;
+ struct sk_buff *skb;
+ int space_needed;
+
+ if (unlikely(tcp_under_memory_pressure(sk))) {
+ mptcp_mem_reclaim_partial(sk);
+
+ /* under pressure pre-allocate at most a single skb */
+ if (msk->skb_tx_cache.qlen)
+ return true;
+ space_needed = msk->size_goal_cache;
} else {
- write_seq = &dfrag->data_seq;
- page = dfrag->page;
+ space_needed = msk->tx_pending_data + size -
+ msk->skb_tx_cache.qlen * msk->size_goal_cache;
}
- /* compute copy limit */
- mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
- *pmss_now = mss_now;
- *ps_goal = size_goal;
- avail_size = size_goal;
- skb = tcp_write_queue_tail(ssk);
+ while (space_needed > 0) {
+ skb = __mptcp_do_alloc_tx_skb(sk, sk->sk_allocation);
+ if (unlikely(!skb)) {
+ /* under memory pressure, try to pass the caller a
+ * single skb to allow forward progress
+ */
+ while (skbs->qlen > 1) {
+ skb = __skb_dequeue_tail(skbs);
+ __kfree_skb(skb);
+ }
+ return skbs->qlen > 0;
+ }
+
+ *total_ts += skb->truesize;
+ __skb_queue_tail(skbs, skb);
+ space_needed -= msk->size_goal_cache;
+ }
+ return true;
+}
+
+static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *skb;
+
+ if (ssk->sk_tx_skb_cache) {
+ skb = ssk->sk_tx_skb_cache;
+ if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) &&
+ !__mptcp_add_ext(skb, gfp)))
+ return false;
+ return true;
+ }
+
+ skb = skb_peek(&msk->skb_tx_cache);
if (skb) {
- mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+ if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
+ skb = __skb_dequeue(&msk->skb_tx_cache);
+ if (WARN_ON_ONCE(!skb))
+ return false;
+
+ mptcp_wmem_uncharge(sk, skb->truesize);
+ ssk->sk_tx_skb_cache = skb;
+ return true;
+ }
+
+ /* over memory limit, no point to try to allocate a new skb */
+ return false;
+ }
+
+ skb = __mptcp_do_alloc_tx_skb(sk, gfp);
+ if (!skb)
+ return false;
+
+ if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
+ ssk->sk_tx_skb_cache = skb;
+ return true;
+ }
+ kfree_skb(skb);
+ return false;
+}
+
+static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
+{
+ return !ssk->sk_tx_skb_cache &&
+ !skb_peek(&mptcp_sk(sk)->skb_tx_cache) &&
+ tcp_under_memory_pressure(sk);
+}
+
+static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
+{
+ if (unlikely(mptcp_must_reclaim_memory(sk, ssk)))
+ mptcp_mem_reclaim_partial(sk);
+ return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
+}
+static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ struct mptcp_data_frag *dfrag,
+ struct mptcp_sendmsg_info *info)
+{
+ u64 data_seq = dfrag->data_seq + info->sent;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ bool zero_window_probe = false;
+ struct mptcp_ext *mpext = NULL;
+ struct sk_buff *skb, *tail;
+ bool can_collapse = false;
+ int size_bias = 0;
+ int avail_size;
+ size_t ret = 0;
+
+ pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
+ msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+
+ /* compute send limit */
+ info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
+ avail_size = info->size_goal;
+ msk->size_goal_cache = info->size_goal;
+ skb = tcp_write_queue_tail(ssk);
+ if (skb) {
/* Limit the write to the size available in the
* current skb, if any, so that we create at most a new skb.
* Explicitly tells TCP internals to avoid collapsing on later
* queue management operation, to avoid breaking the ext <->
* SSN association set here
*/
- can_collapse = (size_goal - skb->len > 0) &&
- mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
- if (!can_collapse)
+ mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+ can_collapse = (info->size_goal - skb->len > 0) &&
+ mptcp_skb_can_collapse_to(data_seq, skb, mpext);
+ if (!can_collapse) {
TCP_SKB_CB(skb)->eor = 1;
- else
- avail_size = size_goal - skb->len;
- }
-
- if (!retransmission) {
- /* reuse tail pfrag, if possible, or carve a new one from the
- * page allocator
- */
- dfrag = mptcp_rtx_tail(sk);
- offset = pfrag->offset;
- dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
- if (!dfrag_collapsed) {
- dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
- offset = dfrag->offset;
- frag_truesize = dfrag->overhead;
- }
- psize = min_t(size_t, pfrag->size - offset, avail_size);
-
- /* Copy to page */
- pr_debug("left=%zu", msg_data_left(msg));
- psize = copy_page_from_iter(pfrag->page, offset,
- min_t(size_t, msg_data_left(msg),
- psize),
- &msg->msg_iter);
- pr_debug("left=%zu", msg_data_left(msg));
- if (!psize)
- return -EINVAL;
-
- if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
- iov_iter_revert(&msg->msg_iter, psize);
- return -ENOMEM;
+ } else {
+ size_bias = skb->len;
+ avail_size = info->size_goal - skb->len;
}
- } else {
- offset = dfrag->offset;
- psize = min_t(size_t, dfrag->data_len, avail_size);
}
- /* tell the TCP stack to delay the push so that we can safely
- * access the skb after the sendpages call
- */
- ret = do_tcp_sendpages(ssk, page, offset, psize,
- msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
- if (ret <= 0) {
- if (!retransmission)
- iov_iter_revert(&msg->msg_iter, psize);
- return ret;
- }
+ /* Zero window and all data acked? Probe. */
+ avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
+ if (avail_size == 0) {
+ u64 snd_una = READ_ONCE(msk->snd_una);
- frag_truesize += ret;
- if (!retransmission) {
- if (unlikely(ret < psize))
- iov_iter_revert(&msg->msg_iter, psize - ret);
+ if (skb || snd_una != msk->snd_nxt)
+ return 0;
+ zero_window_probe = true;
+ data_seq = snd_una - 1;
+ avail_size = 1;
+ }
- /* send successful, keep track of sent data for mptcp-level
- * retransmission
- */
- dfrag->data_len += ret;
- if (!dfrag_collapsed) {
- get_page(dfrag->page);
- list_add_tail(&dfrag->list, &msk->rtx_queue);
- sk_wmem_queued_add(sk, frag_truesize);
- } else {
- sk_wmem_queued_add(sk, ret);
- }
+ if (WARN_ON_ONCE(info->sent > info->limit ||
+ info->limit > dfrag->data_len))
+ return 0;
- /* charge data on mptcp rtx queue to the master socket
- * Note: we charge such data both to sk and ssk
- */
- sk->sk_forward_alloc -= frag_truesize;
+ ret = info->limit - info->sent;
+ tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags,
+ dfrag->page, dfrag->offset + info->sent, &ret);
+ if (!tail) {
+ tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk));
+ return -ENOMEM;
}
- /* if the tail skb extension is still the cached one, collapsing
- * really happened. Note: we can't check for 'same skb' as the sk_buff
- * hdr on tail can be transmitted, freed and re-allocated by the
- * do_tcp_sendpages() call
+ /* if the tail skb is still the cached one, collapsing really happened.
*/
- tail = tcp_write_queue_tail(ssk);
- if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
- WARN_ON_ONCE(!can_collapse);
+ if (skb == tail) {
+ TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
mpext->data_len += ret;
+ WARN_ON_ONCE(!can_collapse);
+ WARN_ON_ONCE(zero_window_probe);
goto out;
}
- skb = tcp_write_queue_tail(ssk);
- mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
- msk->cached_ext = NULL;
+ mpext = skb_ext_find(tail, SKB_EXT_MPTCP);
+ if (WARN_ON_ONCE(!mpext)) {
+ /* should never reach here, stream corrupted */
+ return -EINVAL;
+ }
memset(mpext, 0, sizeof(*mpext));
- mpext->data_seq = *write_seq;
+ mpext->data_seq = data_seq;
mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
mpext->data_len = ret;
mpext->use_map = 1;
@@ -1027,44 +1337,17 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
mpext->data_seq, mpext->subflow_seq, mpext->data_len,
mpext->dsn64);
+ if (zero_window_probe) {
+ mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
+ mpext->frozen = 1;
+ ret = 0;
+ tcp_push_pending_frames(ssk);
+ }
out:
- if (!retransmission)
- pfrag->offset += frag_truesize;
- WRITE_ONCE(*write_seq, *write_seq + ret);
mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
-
return ret;
}
-static void mptcp_nospace(struct mptcp_sock *msk)
-{
- struct mptcp_subflow_context *subflow;
-
- clear_bit(MPTCP_SEND_SPACE, &msk->flags);
- smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
-
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- struct socket *sock = READ_ONCE(ssk->sk_socket);
-
- /* enables ssk->write_space() callbacks */
- if (sock)
- set_bit(SOCK_NOSPACE, &sock->flags);
- }
-}
-
-static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
-{
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
- /* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
- if (subflow->request_join && !subflow->fully_established)
- return false;
-
- /* only send if our side has not closed yet */
- return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
-}
-
#define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
sizeof(struct tcphdr) - \
MAX_TCP_OPTION_SPACE - \
@@ -1089,9 +1372,6 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
sock_owned_by_me((struct sock *)msk);
*sndbuf = 0;
- if (!mptcp_ext_cache_refill(msk))
- return NULL;
-
if (__mptcp_check_fallback(msk)) {
if (!msk->first)
return NULL;
@@ -1154,27 +1434,160 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
return NULL;
}
-static void ssk_check_wmem(struct mptcp_sock *msk)
+static void mptcp_push_release(struct sock *sk, struct sock *ssk,
+ struct mptcp_sendmsg_info *info)
+{
+ mptcp_set_timeout(sk, ssk);
+ tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
+ release_sock(ssk);
+}
+
+static void mptcp_push_pending(struct sock *sk, unsigned int flags)
+{
+ struct sock *prev_ssk = NULL, *ssk = NULL;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct mptcp_sendmsg_info info = {
+ .flags = flags,
+ };
+ struct mptcp_data_frag *dfrag;
+ int len, copied = 0;
+ u32 sndbuf;
+
+ while ((dfrag = mptcp_send_head(sk))) {
+ info.sent = dfrag->already_sent;
+ info.limit = dfrag->data_len;
+ len = dfrag->data_len - dfrag->already_sent;
+ while (len > 0) {
+ int ret = 0;
+
+ prev_ssk = ssk;
+ __mptcp_flush_join_list(msk);
+ ssk = mptcp_subflow_get_send(msk, &sndbuf);
+
+ /* do auto tuning */
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
+ sndbuf > READ_ONCE(sk->sk_sndbuf))
+ WRITE_ONCE(sk->sk_sndbuf, sndbuf);
+
+ /* try to keep the subflow socket lock across
+ * consecutive xmit on the same socket
+ */
+ if (ssk != prev_ssk && prev_ssk)
+ mptcp_push_release(sk, prev_ssk, &info);
+ if (!ssk)
+ goto out;
+
+ if (ssk != prev_ssk || !prev_ssk)
+ lock_sock(ssk);
+
+ /* keep it simple and always provide a new skb for the
+ * subflow, even if we will not use it when collapsing
+ * on the pending one
+ */
+ if (!mptcp_alloc_tx_skb(sk, ssk)) {
+ mptcp_push_release(sk, ssk, &info);
+ goto out;
+ }
+
+ ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+ if (ret <= 0) {
+ mptcp_push_release(sk, ssk, &info);
+ goto out;
+ }
+
+ info.sent += ret;
+ dfrag->already_sent += ret;
+ msk->snd_nxt += ret;
+ msk->snd_burst -= ret;
+ msk->tx_pending_data -= ret;
+ copied += ret;
+ len -= ret;
+ }
+ WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+ }
+
+ /* at this point we held the socket lock for the last subflow we used */
+ if (ssk)
+ mptcp_push_release(sk, ssk, &info);
+
+out:
+ if (copied) {
+ /* start the timer, if it's not pending */
+ if (!mptcp_timer_pending(sk))
+ mptcp_reset_timer(sk);
+ __mptcp_check_send_data_fin(sk);
+ }
+}
+
+static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
{
- if (unlikely(!mptcp_is_writeable(msk)))
- mptcp_nospace(msk);
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct mptcp_sendmsg_info info;
+ struct mptcp_data_frag *dfrag;
+ int len, copied = 0;
+
+ info.flags = 0;
+ while ((dfrag = mptcp_send_head(sk))) {
+ info.sent = dfrag->already_sent;
+ info.limit = dfrag->data_len;
+ len = dfrag->data_len - dfrag->already_sent;
+ while (len > 0) {
+ int ret = 0;
+
+ /* do auto tuning */
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
+ ssk->sk_sndbuf > READ_ONCE(sk->sk_sndbuf))
+ WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
+
+ if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
+ __mptcp_update_wmem(sk);
+ sk_mem_reclaim_partial(sk);
+ }
+ if (!__mptcp_alloc_tx_skb(sk, ssk, GFP_ATOMIC))
+ goto out;
+
+ ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+ if (ret <= 0)
+ goto out;
+
+ info.sent += ret;
+ dfrag->already_sent += ret;
+ msk->snd_nxt += ret;
+ msk->snd_burst -= ret;
+ msk->tx_pending_data -= ret;
+ copied += ret;
+ len -= ret;
+ }
+ WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+ }
+
+out:
+ /* __mptcp_alloc_tx_skb could have released some wmem and we are
+ * not going to flush it via release_sock()
+ */
+ __mptcp_update_wmem(sk);
+ if (copied) {
+ mptcp_set_timeout(sk, ssk);
+ tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
+ info.size_goal);
+ if (msk->snd_data_fin_enable &&
+ msk->snd_nxt + 1 == msk->write_seq)
+ mptcp_schedule_work(sk);
+ }
}
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
- int mss_now = 0, size_goal = 0, ret = 0;
struct mptcp_sock *msk = mptcp_sk(sk);
struct page_frag *pfrag;
size_t copied = 0;
- struct sock *ssk;
- u32 sndbuf;
- bool tx_ok;
+ int ret = 0;
long timeo;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -EOPNOTSUPP;
- lock_sock(sk);
+ mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, len));
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -1185,130 +1598,97 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
pfrag = sk_page_frag(sk);
-restart:
- mptcp_clean_una(sk);
-
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
- ret = -EPIPE;
- goto out;
- }
- __mptcp_flush_join_list(msk);
- ssk = mptcp_subflow_get_send(msk, &sndbuf);
- while (!sk_stream_memory_free(sk) ||
- !ssk ||
- !mptcp_page_frag_refill(ssk, pfrag)) {
- if (ssk) {
- /* make sure retransmit timer is
- * running before we wait for memory.
- *
- * The retransmit timer might be needed
- * to make the peer send an up-to-date
- * MPTCP Ack.
- */
- mptcp_set_timeout(sk, ssk);
- if (!mptcp_timer_pending(sk))
- mptcp_reset_timer(sk);
- }
+ while (msg_data_left(msg)) {
+ int total_ts, frag_truesize = 0;
+ struct mptcp_data_frag *dfrag;
+ struct sk_buff_head skbs;
+ bool dfrag_collapsed;
+ size_t psize, offset;
- mptcp_nospace(msk);
- ret = sk_stream_wait_memory(sk, &timeo);
- if (ret)
- goto out;
-
- mptcp_clean_una(sk);
-
- ssk = mptcp_subflow_get_send(msk, &sndbuf);
- if (list_empty(&msk->conn_list)) {
- ret = -ENOTCONN;
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
+ ret = -EPIPE;
goto out;
}
- }
- /* do auto tuning */
- if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
- sndbuf > READ_ONCE(sk->sk_sndbuf))
- WRITE_ONCE(sk->sk_sndbuf, sndbuf);
+ /* reuse tail pfrag, if possible, or carve a new one from the
+ * page allocator
+ */
+ dfrag = mptcp_pending_tail(sk);
+ dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
+ if (!dfrag_collapsed) {
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_memory;
- pr_debug("conn_list->subflow=%p", ssk);
+ if (!mptcp_page_frag_refill(sk, pfrag))
+ goto wait_for_memory;
- lock_sock(ssk);
- tx_ok = msg_data_left(msg);
- while (tx_ok) {
- ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
- &size_goal);
- if (ret < 0) {
- if (ret == -EAGAIN && timeo > 0) {
- mptcp_set_timeout(sk, ssk);
- release_sock(ssk);
- goto restart;
- }
- break;
+ dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
+ frag_truesize = dfrag->overhead;
}
- /* burst can be negative, we will try move to the next subflow
- * at selection time, if possible.
+ /* we do not bound vs wspace, to allow a single packet.
+ * memory accounting will prevent execessive memory usage
+ * anyway
*/
- msk->snd_burst -= ret;
- copied += ret;
-
- tx_ok = msg_data_left(msg);
- if (!tx_ok)
- break;
+ offset = dfrag->offset + dfrag->data_len;
+ psize = pfrag->size - offset;
+ psize = min_t(size_t, psize, msg_data_left(msg));
+ total_ts = psize + frag_truesize;
+ __skb_queue_head_init(&skbs);
+ if (!mptcp_tx_cache_refill(sk, psize, &skbs, &total_ts))
+ goto wait_for_memory;
+
+ if (!mptcp_wmem_alloc(sk, total_ts)) {
+ __skb_queue_purge(&skbs);
+ goto wait_for_memory;
+ }
- if (!sk_stream_memory_free(ssk) ||
- !mptcp_page_frag_refill(ssk, pfrag) ||
- !mptcp_ext_cache_refill(msk)) {
- tcp_push(ssk, msg->msg_flags, mss_now,
- tcp_sk(ssk)->nonagle, size_goal);
- mptcp_set_timeout(sk, ssk);
- release_sock(ssk);
- goto restart;
+ skb_queue_splice_tail(&skbs, &msk->skb_tx_cache);
+ if (copy_page_from_iter(dfrag->page, offset, psize,
+ &msg->msg_iter) != psize) {
+ mptcp_wmem_uncharge(sk, psize + frag_truesize);
+ ret = -EFAULT;
+ goto out;
}
- /* memory is charged to mptcp level socket as well, i.e.
- * if msg is very large, mptcp socket may run out of buffer
- * space. mptcp_clean_una() will release data that has
- * been acked at mptcp level in the mean time, so there is
- * a good chance we can continue sending data right away.
- *
- * Normally, when the tcp subflow can accept more data, then
- * so can the MPTCP socket. However, we need to cope with
- * peers that might lag behind in their MPTCP-level
- * acknowledgements, i.e. data might have been acked at
- * tcp level only. So, we must also check the MPTCP socket
- * limits before we send more data.
+ /* data successfully copied into the write queue */
+ copied += psize;
+ dfrag->data_len += psize;
+ frag_truesize += psize;
+ pfrag->offset += frag_truesize;
+ WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
+
+ /* charge data on mptcp pending queue to the msk socket
+ * Note: we charge such data both to sk and ssk
*/
- if (unlikely(!sk_stream_memory_free(sk))) {
- tcp_push(ssk, msg->msg_flags, mss_now,
- tcp_sk(ssk)->nonagle, size_goal);
- mptcp_clean_una(sk);
- if (!sk_stream_memory_free(sk)) {
- /* can't send more for now, need to wait for
- * MPTCP-level ACKs from peer.
- *
- * Wakeup will happen via mptcp_clean_una().
- */
- mptcp_set_timeout(sk, ssk);
- release_sock(ssk);
- goto restart;
- }
+ sk_wmem_queued_add(sk, frag_truesize);
+ if (!dfrag_collapsed) {
+ get_page(dfrag->page);
+ list_add_tail(&dfrag->list, &msk->rtx_queue);
+ if (!msk->first_pending)
+ WRITE_ONCE(msk->first_pending, dfrag);
}
+ pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
+ dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+ !dfrag_collapsed);
+
+ continue;
+
+wait_for_memory:
+ set_bit(MPTCP_NOSPACE, &msk->flags);
+ mptcp_push_pending(sk, msg->msg_flags);
+ ret = sk_stream_wait_memory(sk, &timeo);
+ if (ret)
+ goto out;
}
- mptcp_set_timeout(sk, ssk);
if (copied) {
- tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
- size_goal);
-
- /* start the timer, if it's not pending */
- if (!mptcp_timer_pending(sk))
- mptcp_reset_timer(sk);
+ msk->tx_pending_data += copied;
+ mptcp_push_pending(sk, msg->msg_flags);
}
- release_sock(ssk);
out:
- ssk_check_wmem(msk);
release_sock(sk);
return copied ? : ret;
}
@@ -1332,11 +1712,10 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
struct msghdr *msg,
size_t len)
{
- struct sock *sk = (struct sock *)msk;
struct sk_buff *skb;
int copied = 0;
- while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
+ while ((skb = skb_peek(&msk->receive_queue)) != NULL) {
u32 offset = MPTCP_SKB_CB(skb)->offset;
u32 data_len = skb->len - offset;
u32 count = min_t(size_t, len - copied, data_len);
@@ -1356,7 +1735,10 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
break;
}
- __skb_unlink(skb, &sk->sk_receive_queue);
+ /* we will bulk release the skb memory later */
+ skb->destructor = NULL;
+ msk->rmem_released += skb->truesize;
+ __skb_unlink(skb, &msk->receive_queue);
__kfree_skb(skb);
if (copied >= len)
@@ -1464,32 +1846,68 @@ new_measure:
msk->rcvq_space.time = mstamp;
}
-static bool __mptcp_move_skbs(struct mptcp_sock *msk)
+static void __mptcp_update_rmem(struct sock *sk)
{
- unsigned int moved = 0;
- bool done;
+ struct mptcp_sock *msk = mptcp_sk(sk);
- /* avoid looping forever below on racing close */
- if (((struct sock *)msk)->sk_state == TCP_CLOSE)
- return false;
+ if (!msk->rmem_released)
+ return;
+
+ atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
+ sk_mem_uncharge(sk, msk->rmem_released);
+ msk->rmem_released = 0;
+}
+
+static void __mptcp_splice_receive_queue(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
+}
+
+static bool __mptcp_move_skbs(struct mptcp_sock *msk, unsigned int rcv)
+{
+ struct sock *sk = (struct sock *)msk;
+ unsigned int moved = 0;
+ bool ret, done;
__mptcp_flush_join_list(msk);
do {
struct sock *ssk = mptcp_subflow_recv_lookup(msk);
+ bool slowpath;
- if (!ssk)
+ /* we can have data pending in the subflows only if the msk
+ * receive buffer was full at subflow_data_ready() time,
+ * that is an unlikely slow path.
+ */
+ if (likely(!ssk))
break;
- lock_sock(ssk);
+ slowpath = lock_sock_fast(ssk);
+ mptcp_data_lock(sk);
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
- release_sock(ssk);
+ mptcp_data_unlock(sk);
+ if (moved && rcv) {
+ WRITE_ONCE(msk->rmem_pending, min(rcv, moved));
+ tcp_cleanup_rbuf(ssk, 1);
+ WRITE_ONCE(msk->rmem_pending, 0);
+ }
+ unlock_sock_fast(ssk, slowpath);
} while (!done);
- if (mptcp_ofo_queue(msk) || moved > 0) {
- mptcp_check_data_fin((struct sock *)msk);
- return true;
+ /* acquire the data lock only if some input data is pending */
+ ret = moved > 0;
+ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
+ !skb_queue_empty_lockless(&sk->sk_receive_queue)) {
+ mptcp_data_lock(sk);
+ __mptcp_update_rmem(sk);
+ ret |= __mptcp_ofo_queue(msk);
+ __mptcp_splice_receive_queue(sk);
+ mptcp_data_unlock(sk);
}
- return false;
+ if (ret)
+ mptcp_check_data_fin((struct sock *)msk);
+ return !skb_queue_empty(&msk->receive_queue);
}
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
@@ -1503,15 +1921,19 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
return -EOPNOTSUPP;
- lock_sock(sk);
+ mptcp_lock_sock(sk, __mptcp_splice_receive_queue(sk));
+ if (unlikely(sk->sk_state == TCP_LISTEN)) {
+ copied = -ENOTCONN;
+ goto out_err;
+ }
+
timeo = sock_rcvtimeo(sk, nonblock);
len = min_t(size_t, len, INT_MAX);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- __mptcp_flush_join_list(msk);
- while (len > (size_t)copied) {
- int bytes_read;
+ while (copied < len) {
+ int bytes_read, old_space;
bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
if (unlikely(bytes_read < 0)) {
@@ -1522,10 +1944,15 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
copied += bytes_read;
- if (skb_queue_empty(&sk->sk_receive_queue) &&
- __mptcp_move_skbs(msk))
+ if (skb_queue_empty(&msk->receive_queue) &&
+ __mptcp_move_skbs(msk, len - copied))
continue;
+ /* be sure to advertise window change */
+ old_space = READ_ONCE(msk->old_wspace);
+ if ((tcp_space(sk) - old_space) >= old_space)
+ mptcp_cleanup_rbuf(msk);
+
/* only the master socket status is relevant here. The exit
* conditions mirror closely tcp_recvmsg()
*/
@@ -1548,8 +1975,14 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
mptcp_check_for_eof(msk);
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ /* race breaker: the shutdown could be after the
+ * previous receive queue check
+ */
+ if (__mptcp_move_skbs(msk, len - copied))
+ continue;
break;
+ }
if (sk->sk_state == TCP_CLOSE) {
copied = -ENOTCONN;
@@ -1571,14 +2004,15 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
mptcp_wait_data(sk, &timeo);
}
- if (skb_queue_empty(&sk->sk_receive_queue)) {
+ if (skb_queue_empty_lockless(&sk->sk_receive_queue) &&
+ skb_queue_empty(&msk->receive_queue)) {
/* entire backlog drained, clear DATA_READY. */
clear_bit(MPTCP_DATA_READY, &msk->flags);
/* .. race-breaker: ssk might have gotten new data
* after last __mptcp_move_skbs() returned false.
*/
- if (unlikely(__mptcp_move_skbs(msk)))
+ if (unlikely(__mptcp_move_skbs(msk, 0)))
set_bit(MPTCP_DATA_READY, &msk->flags);
} else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
/* data to read but mptcp_wait_data() cleared DATA_READY */
@@ -1587,7 +2021,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
out_err:
pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
msk, test_bit(MPTCP_DATA_READY, &msk->flags),
- skb_queue_empty(&sk->sk_receive_queue), copied);
+ skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
mptcp_rcv_space_adjust(msk, copied);
release_sock(sk);
@@ -1598,13 +2032,8 @@ static void mptcp_retransmit_handler(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) {
- mptcp_stop_timer(sk);
- } else {
- set_bit(MPTCP_WORK_RTX, &msk->flags);
- if (schedule_work(&msk->work))
- sock_hold(sk);
- }
+ set_bit(MPTCP_WORK_RTX, &msk->flags);
+ mptcp_schedule_work(sk);
}
static void mptcp_retransmit_timer(struct timer_list *t)
@@ -1626,6 +2055,14 @@ static void mptcp_retransmit_timer(struct timer_list *t)
sock_put(sk);
}
+static void mptcp_timeout_timer(struct timer_list *t)
+{
+ struct sock *sk = from_timer(sk, t, sk_timer);
+
+ mptcp_schedule_work(sk);
+ sock_put(sk);
+}
+
/* Find an idle subflow. Return NULL if there is unacked data at tcp
* level.
*
@@ -1639,7 +2076,7 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
sock_owned_by_me((const struct sock *)msk);
if (__mptcp_check_fallback(msk))
- return msk->first;
+ return NULL;
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -1648,8 +2085,11 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
continue;
/* still data outstanding at TCP level? Don't retransmit. */
- if (!tcp_write_queue_empty(ssk))
+ if (!tcp_write_queue_empty(ssk)) {
+ if (inet_csk(ssk)->icsk_ca_state >= TCP_CA_Loss)
+ continue;
return NULL;
+ }
if (subflow->backup) {
if (!backup)
@@ -1672,20 +2112,44 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
* parent socket.
*/
void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
- struct mptcp_subflow_context *subflow,
- long timeout)
+ struct mptcp_subflow_context *subflow)
{
- struct socket *sock = READ_ONCE(ssk->sk_socket);
+ bool dispose_socket = false;
+ struct socket *sock;
list_del(&subflow->node);
- if (sock && sock != sk->sk_socket) {
- /* outgoing subflow */
- sock_release(sock);
+ lock_sock(ssk);
+
+ /* if we are invoked by the msk cleanup code, the subflow is
+ * already orphaned
+ */
+ sock = ssk->sk_socket;
+ if (sock) {
+ dispose_socket = sock != sk->sk_socket;
+ sock_orphan(ssk);
+ }
+
+ subflow->disposable = 1;
+
+ /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
+ * the ssk has been already destroyed, we just need to release the
+ * reference owned by msk;
+ */
+ if (!inet_csk(ssk)->icsk_ulp_ops) {
+ kfree_rcu(subflow, rcu);
} else {
- /* incoming subflow */
- tcp_close(ssk, timeout);
+ /* otherwise tcp will dispose of the ssk and subflow ctx */
+ __tcp_close(ssk, 0);
+
+ /* close acquired an extra ref */
+ __sock_put(ssk);
}
+ release_sock(ssk);
+ if (dispose_socket)
+ iput(SOCK_INODE(sock));
+
+ sock_put(ssk);
}
static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
@@ -1704,6 +2168,10 @@ static void pm_work(struct mptcp_sock *msk)
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
}
+ if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
+ pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
+ mptcp_pm_nl_add_addr_send_ack(msk);
+ }
if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
mptcp_pm_nl_rm_addr_received(msk);
@@ -1730,40 +2198,102 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
if (inet_sk_state_load(ssk) != TCP_CLOSE)
continue;
- __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
+ __mptcp_close_ssk((struct sock *)msk, ssk, subflow);
+ }
+}
+
+static bool mptcp_check_close_timeout(const struct sock *sk)
+{
+ s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
+ struct mptcp_subflow_context *subflow;
+
+ if (delta >= TCP_TIMEWAIT_LEN)
+ return true;
+
+ /* if all subflows are in closed status don't bother with additional
+ * timeout
+ */
+ mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
+ if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
+ TCP_CLOSE)
+ return false;
+ }
+ return true;
+}
+
+static void mptcp_check_fastclose(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct sock *sk = &msk->sk.icsk_inet.sk;
+
+ if (likely(!READ_ONCE(msk->rcv_fastclose)))
+ return;
+
+ mptcp_token_destroy(msk);
+
+ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+ struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(tcp_sk);
+ if (tcp_sk->sk_state != TCP_CLOSE) {
+ tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
+ tcp_set_state(tcp_sk, TCP_CLOSE);
+ }
+ release_sock(tcp_sk);
}
+
+ inet_sk_state_store(sk, TCP_CLOSE);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+ set_bit(MPTCP_DATA_READY, &msk->flags);
+ set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
+
+ mptcp_close_wake_up(sk);
}
static void mptcp_worker(struct work_struct *work)
{
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
- int orig_len, orig_offset, mss_now = 0, size_goal = 0;
+ struct mptcp_sendmsg_info info = {};
struct mptcp_data_frag *dfrag;
- u64 orig_write_seq;
size_t copied = 0;
- struct msghdr msg = {
- .msg_flags = MSG_DONTWAIT,
- };
- long timeo = 0;
+ int state, ret;
lock_sock(sk);
- mptcp_clean_una(sk);
+ state = sk->sk_state;
+ if (unlikely(state == TCP_CLOSE))
+ goto unlock;
+
mptcp_check_data_fin_ack(sk);
__mptcp_flush_join_list(msk);
+
+ mptcp_check_fastclose(msk);
+
if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
__mptcp_close_subflow(msk);
- __mptcp_move_skbs(msk);
-
if (msk->pm.status)
pm_work(msk);
if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
mptcp_check_for_eof(msk);
+ __mptcp_check_send_data_fin(sk);
mptcp_check_data_fin(sk);
+ /* if the msk data is completely acked, or the socket timedout,
+ * there is no point in keeping around an orphaned sk
+ */
+ if (sock_flag(sk, SOCK_DEAD) &&
+ (mptcp_check_close_timeout(sk) ||
+ (state != sk->sk_state &&
+ ((1 << inet_sk_state_load(sk)) & (TCPF_CLOSE | TCPF_FIN_WAIT2))))) {
+ inet_sk_state_store(sk, TCP_CLOSE);
+ __mptcp_destroy_sock(sk);
+ goto unlock;
+ }
+
if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
goto unlock;
@@ -1771,39 +2301,30 @@ static void mptcp_worker(struct work_struct *work)
if (!dfrag)
goto unlock;
- if (!mptcp_ext_cache_refill(msk))
- goto reset_unlock;
-
ssk = mptcp_subflow_get_retrans(msk);
if (!ssk)
goto reset_unlock;
lock_sock(ssk);
- orig_len = dfrag->data_len;
- orig_offset = dfrag->offset;
- orig_write_seq = dfrag->data_seq;
- while (dfrag->data_len > 0) {
- int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo,
- &mss_now, &size_goal);
- if (ret < 0)
+ /* limit retransmission to the bytes already sent on some subflows */
+ info.sent = 0;
+ info.limit = dfrag->already_sent;
+ while (info.sent < dfrag->already_sent) {
+ if (!mptcp_alloc_tx_skb(sk, ssk))
+ break;
+
+ ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+ if (ret <= 0)
break;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
copied += ret;
- dfrag->data_len -= ret;
- dfrag->offset += ret;
-
- if (!mptcp_ext_cache_refill(msk))
- break;
+ info.sent += ret;
}
if (copied)
- tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
- size_goal);
-
- dfrag->data_seq = orig_write_seq;
- dfrag->offset = orig_offset;
- dfrag->data_len = orig_len;
+ tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
+ info.size_goal);
mptcp_set_timeout(sk, ssk);
release_sock(ssk);
@@ -1826,10 +2347,17 @@ static int __mptcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&msk->conn_list);
INIT_LIST_HEAD(&msk->join_list);
INIT_LIST_HEAD(&msk->rtx_queue);
- __set_bit(MPTCP_SEND_SPACE, &msk->flags);
INIT_WORK(&msk->work, mptcp_worker);
+ __skb_queue_head_init(&msk->receive_queue);
+ __skb_queue_head_init(&msk->skb_tx_cache);
msk->out_of_order_queue = RB_ROOT;
+ msk->first_pending = NULL;
+ msk->wmem_reserved = 0;
+ msk->rmem_released = 0;
+ msk->tx_pending_data = 0;
+ msk->size_goal_cache = TCP_BASE_MSS;
+ msk->ack_hint = NULL;
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
@@ -1837,7 +2365,7 @@ static int __mptcp_init_sock(struct sock *sk)
/* re-use the csk retrans timer for MPTCP-level retrans */
timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
-
+ timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
return 0;
}
@@ -1871,11 +2399,15 @@ static void __mptcp_clear_xmit(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_data_frag *dtmp, *dfrag;
+ struct sk_buff *skb;
- sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
-
+ WRITE_ONCE(msk->first_pending, NULL);
list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
dfrag_clear(sk, dfrag);
+ while ((skb = __skb_dequeue(&msk->skb_tx_cache)) != NULL) {
+ sk->sk_forward_alloc += skb->truesize;
+ kfree_skb(skb);
+ }
}
static void mptcp_cancel_work(struct sock *sk)
@@ -1883,7 +2415,7 @@ static void mptcp_cancel_work(struct sock *sk)
struct mptcp_sock *msk = mptcp_sk(sk);
if (cancel_work_sync(&msk->work))
- sock_put(sk);
+ __sock_put(sk);
}
void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
@@ -1941,42 +2473,67 @@ static int mptcp_close_state(struct sock *sk)
return next & TCP_ACTION_FIN;
}
-static void mptcp_close(struct sock *sk, long timeout)
+static void __mptcp_check_send_data_fin(struct sock *sk)
{
- struct mptcp_subflow_context *subflow, *tmp;
+ struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- LIST_HEAD(conn_list);
- lock_sock(sk);
- sk->sk_shutdown = SHUTDOWN_MASK;
+ pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
+ msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
+ msk->snd_nxt, msk->write_seq);
- if (sk->sk_state == TCP_LISTEN) {
- inet_sk_state_store(sk, TCP_CLOSE);
- goto cleanup;
- } else if (sk->sk_state == TCP_CLOSE) {
- goto cleanup;
- }
+ /* we still need to enqueue subflows or not really shutting down,
+ * skip this
+ */
+ if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
+ mptcp_send_head(sk))
+ return;
+
+ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+ /* fallback socket will not get data_fin/ack, can move to the next
+ * state now
+ */
if (__mptcp_check_fallback(msk)) {
- goto update_state;
- } else if (mptcp_close_state(sk)) {
- pr_debug("Sending DATA_FIN sk=%p", sk);
- WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
- WRITE_ONCE(msk->snd_data_fin_enable, 1);
+ if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
+ inet_sk_state_store(sk, TCP_CLOSE);
+ mptcp_close_wake_up(sk);
+ } else if (sk->sk_state == TCP_FIN_WAIT1) {
+ inet_sk_state_store(sk, TCP_FIN_WAIT2);
+ }
+ }
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+ __mptcp_flush_join_list(msk);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
- mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK);
- }
+ mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
}
+}
- sk_stream_wait_close(sk, timeout);
+static void __mptcp_wr_shutdown(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
-update_state:
- inet_sk_state_store(sk, TCP_CLOSE);
+ pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
+ msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
+ !!mptcp_send_head(sk));
+
+ /* will be ignored by fallback sockets */
+ WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
+ WRITE_ONCE(msk->snd_data_fin_enable, 1);
+
+ __mptcp_check_send_data_fin(sk);
+}
+
+static void __mptcp_destroy_sock(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ LIST_HEAD(conn_list);
+
+ pr_debug("msk=%p", msk);
-cleanup:
/* be sure to always acquire the join list lock, to sync vs
* mptcp_finish_join().
*/
@@ -1985,20 +2542,77 @@ cleanup:
spin_unlock_bh(&msk->join_list_lock);
list_splice_init(&msk->conn_list, &conn_list);
- __mptcp_clear_xmit(sk);
-
- release_sock(sk);
+ sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
+ msk->pm.status = 0;
list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- __mptcp_close_ssk(sk, ssk, subflow, timeout);
+ __mptcp_close_ssk(sk, ssk, subflow);
}
- mptcp_cancel_work(sk);
+ sk->sk_prot->destroy(sk);
+
+ WARN_ON_ONCE(msk->wmem_reserved);
+ WARN_ON_ONCE(msk->rmem_released);
+ sk_stream_kill_queues(sk);
+ xfrm_sk_free_policy(sk);
+ sk_refcnt_debug_release(sk);
+ sock_put(sk);
+}
+
+static void mptcp_close(struct sock *sk, long timeout)
+{
+ struct mptcp_subflow_context *subflow;
+ bool do_cancel_work = false;
- __skb_queue_purge(&sk->sk_receive_queue);
+ lock_sock(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
- sk_common_release(sk);
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
+ inet_sk_state_store(sk, TCP_CLOSE);
+ goto cleanup;
+ }
+
+ if (mptcp_close_state(sk))
+ __mptcp_wr_shutdown(sk);
+
+ sk_stream_wait_close(sk, timeout);
+
+cleanup:
+ /* orphan all the subflows */
+ inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
+ list_for_each_entry(subflow, &mptcp_sk(sk)->conn_list, node) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow, dispose_socket;
+ struct socket *sock;
+
+ slow = lock_sock_fast(ssk);
+ sock = ssk->sk_socket;
+ dispose_socket = sock && sock != sk->sk_socket;
+ sock_orphan(ssk);
+ unlock_sock_fast(ssk, slow);
+
+ /* for the outgoing subflows we additionally need to free
+ * the associated socket
+ */
+ if (dispose_socket)
+ iput(SOCK_INODE(sock));
+ }
+ sock_orphan(sk);
+
+ sock_hold(sk);
+ pr_debug("msk=%p state=%d", sk, sk->sk_state);
+ if (sk->sk_state == TCP_CLOSE) {
+ __mptcp_destroy_sock(sk);
+ do_cancel_work = true;
+ } else {
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
+ }
+ release_sock(sk);
+ if (do_cancel_work)
+ mptcp_cancel_work(sk);
+ sock_put(sk);
}
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
@@ -2069,13 +2683,17 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
WRITE_ONCE(msk->fully_established, false);
msk->write_seq = subflow_req->idsn + 1;
- atomic64_set(&msk->snd_una, msk->write_seq);
+ msk->snd_nxt = msk->write_seq;
+ msk->snd_una = msk->write_seq;
+ msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
+
if (mp_opt->mp_capable) {
msk->can_ack = true;
msk->remote_key = mp_opt->sndr_key;
mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
ack_seq++;
WRITE_ONCE(msk->ack_seq, ack_seq);
+ WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
}
sock_reset_flag(nsk, SOCK_RCU_FREE);
@@ -2102,6 +2720,8 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
TCP_INIT_CWND * tp->advmss);
if (msk->rcvq_space.space == 0)
msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
+
+ WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
}
static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
@@ -2126,7 +2746,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
if (sk_is_mptcp(newsk)) {
struct mptcp_subflow_context *subflow;
struct sock *new_mptcp_sock;
- struct sock *ssk = newsk;
subflow = mptcp_subflow_ctx(newsk);
new_mptcp_sock = subflow->conn;
@@ -2141,21 +2760,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
/* acquire the 2nd reference for the owning socket */
sock_hold(new_mptcp_sock);
-
- local_bh_disable();
- bh_lock_sock(new_mptcp_sock);
- msk = mptcp_sk(new_mptcp_sock);
- msk->first = newsk;
-
newsk = new_mptcp_sock;
- mptcp_copy_inaddrs(newsk, ssk);
- list_add(&subflow->node, &msk->conn_list);
-
- mptcp_rcv_space_init(msk, ssk);
- bh_unlock_sock(new_mptcp_sock);
-
- __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
- local_bh_enable();
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
} else {
MPTCP_INC_STATS(sock_net(sk),
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
@@ -2166,6 +2772,13 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
void mptcp_destroy_common(struct mptcp_sock *msk)
{
+ struct sock *sk = (struct sock *)msk;
+
+ __mptcp_clear_xmit(sk);
+
+ /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
+ skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
+
skb_rbtree_purge(&msk->out_of_order_queue);
mptcp_token_destroy(msk);
mptcp_pm_free_anno_list(msk);
@@ -2175,9 +2788,6 @@ static void mptcp_destroy(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (msk->cached_ext)
- __skb_ext_put(msk->cached_ext);
-
mptcp_destroy_common(msk);
sk_sockets_allocated_dec(sk);
}
@@ -2292,16 +2902,58 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
return -EOPNOTSUPP;
}
-#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
- TCPF_WRITE_TIMER_DEFERRED)
+void __mptcp_data_acked(struct sock *sk)
+{
+ if (!sock_owned_by_user(sk))
+ __mptcp_clean_una(sk);
+ else
+ set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags);
-/* this is very alike tcp_release_cb() but we must handle differently a
- * different set of events
- */
+ if (mptcp_pending_data_fin_ack(sk))
+ mptcp_schedule_work(sk);
+}
+
+void __mptcp_wnd_updated(struct sock *sk, struct sock *ssk)
+{
+ if (!mptcp_send_head(sk))
+ return;
+
+ if (!sock_owned_by_user(sk))
+ __mptcp_subflow_push_pending(sk, ssk);
+ else
+ set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+}
+
+#define MPTCP_DEFERRED_ALL (TCPF_WRITE_TIMER_DEFERRED)
+
+/* processes deferred events and flush wmem */
static void mptcp_release_cb(struct sock *sk)
{
unsigned long flags, nflags;
+ /* push_pending may touch wmem_reserved, do it before the later
+ * cleanup
+ */
+ if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
+ __mptcp_clean_una(sk);
+ if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) {
+ /* mptcp_push_pending() acquires the subflow socket lock
+ *
+ * 1) can't be invoked in atomic scope
+ * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
+ * datapath acquires the msk socket spinlock while helding
+ * the subflow socket lock
+ */
+
+ spin_unlock_bh(&sk->sk_lock.slock);
+ mptcp_push_pending(sk, 0);
+ spin_lock_bh(&sk->sk_lock.slock);
+ }
+
+ /* clear any wmem reservation and errors */
+ __mptcp_update_wmem(sk);
+ __mptcp_update_rmem(sk);
+
do {
flags = sk->sk_tsq_flags;
if (!(flags & MPTCP_DEFERRED_ALL))
@@ -2311,15 +2963,6 @@ static void mptcp_release_cb(struct sock *sk)
sock_release_ownership(sk);
- if (flags & TCPF_DELACK_TIMER_DEFERRED) {
- struct mptcp_sock *msk = mptcp_sk(sk);
- struct sock *ssk;
-
- ssk = mptcp_subflow_recv_lookup(msk);
- if (!ssk || !schedule_work(&msk->work))
- __sock_put(sk);
- }
-
if (flags & TCPF_WRITE_TIMER_DEFERRED) {
mptcp_retransmit_handler(sk);
__sock_put(sk);
@@ -2377,9 +3020,11 @@ void mptcp_finish_connect(struct sock *ssk)
WRITE_ONCE(msk->remote_key, subflow->remote_key);
WRITE_ONCE(msk->local_key, subflow->local_key);
WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
+ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
WRITE_ONCE(msk->ack_seq, ack_seq);
+ WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
WRITE_ONCE(msk->can_ack, 1);
- atomic64_set(&msk->snd_una, msk->write_seq);
+ WRITE_ONCE(msk->snd_una, msk->write_seq);
mptcp_pm_new_connection(msk, 0);
@@ -2395,9 +3040,9 @@ static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
-bool mptcp_finish_join(struct sock *sk)
+bool mptcp_finish_join(struct sock *ssk)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct sock *parent = (void *)msk;
struct socket *parent_sock;
@@ -2418,12 +3063,14 @@ bool mptcp_finish_join(struct sock *sk)
/* active connections are already on conn_list, and we can't acquire
* msk lock here.
* use the join list lock as synchronization point and double-check
- * msk status to avoid racing with mptcp_close()
+ * msk status to avoid racing with __mptcp_destroy_sock()
*/
spin_lock_bh(&msk->join_list_lock);
ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
- if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
+ if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) {
list_add_tail(&subflow->node, &msk->join_list);
+ sock_hold(ssk);
+ }
spin_unlock_bh(&msk->join_list_lock);
if (!ret)
return false;
@@ -2432,19 +3079,12 @@ bool mptcp_finish_join(struct sock *sk)
* at close time
*/
parent_sock = READ_ONCE(parent->sk_socket);
- if (parent_sock && !sk->sk_socket)
- mptcp_sock_graft(sk, parent_sock);
+ if (parent_sock && !ssk->sk_socket)
+ mptcp_sock_graft(ssk, parent_sock);
subflow->map_seq = READ_ONCE(msk->ack_seq);
return true;
}
-static bool mptcp_memory_free(const struct sock *sk, int wake)
-{
- struct mptcp_sock *msk = mptcp_sk(sk);
-
- return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
-}
-
static struct proto mptcp_prot = {
.name = "MPTCP",
.owner = THIS_MODULE,
@@ -2465,7 +3105,6 @@ static struct proto mptcp_prot = {
.sockets_allocated = &mptcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
- .stream_memory_free = mptcp_memory_free,
.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
.sysctl_mem = sysctl_tcp_mem,
@@ -2610,6 +3249,23 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
struct mptcp_sock *msk = mptcp_sk(newsock->sk);
struct mptcp_subflow_context *subflow;
+ struct sock *newsk = newsock->sk;
+ bool slowpath;
+
+ slowpath = lock_sock_fast(newsk);
+
+ /* PM/worker can now acquire the first subflow socket
+ * lock without racing with listener queue cleanup,
+ * we can notify it, if needed.
+ */
+ subflow = mptcp_subflow_ctx(msk->first);
+ list_add(&subflow->node, &msk->conn_list);
+ sock_hold(msk->first);
+ if (mptcp_is_fully_established(newsk))
+ mptcp_pm_fully_established(msk);
+
+ mptcp_copy_inaddrs(newsk, msk->first);
+ mptcp_rcv_space_init(msk, msk->first);
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack.
@@ -2621,6 +3277,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
if (!ssk->sk_socket)
mptcp_sock_graft(ssk, newsock);
}
+ unlock_sock_fast(newsk, slowpath);
}
if (inet_csk_listen_poll(ssock->sk))
@@ -2639,6 +3296,24 @@ static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
0;
}
+static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
+{
+ struct sock *sk = (struct sock *)msk;
+
+ if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
+ return 0;
+
+ if (sk_stream_is_writeable(sk))
+ return EPOLLOUT | EPOLLWRNORM;
+
+ set_bit(MPTCP_NOSPACE, &msk->flags);
+ smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
+ if (sk_stream_is_writeable(sk))
+ return EPOLLOUT | EPOLLWRNORM;
+
+ return 0;
+}
+
static __poll_t mptcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{
@@ -2657,8 +3332,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
mask |= mptcp_check_readable(msk);
- if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
- mask |= EPOLLOUT | EPOLLWRNORM;
+ mask |= mptcp_check_writeable(msk);
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
@@ -2669,12 +3343,12 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
static int mptcp_shutdown(struct socket *sock, int how)
{
struct mptcp_sock *msk = mptcp_sk(sock->sk);
- struct mptcp_subflow_context *subflow;
+ struct sock *sk = sock->sk;
int ret = 0;
pr_debug("sk=%p, how=%d", msk, how);
- lock_sock(sock->sk);
+ lock_sock(sk);
how++;
if ((how & ~SHUTDOWN_MASK) || !how) {
@@ -2683,45 +3357,22 @@ static int mptcp_shutdown(struct socket *sock, int how)
}
if (sock->state == SS_CONNECTING) {
- if ((1 << sock->sk->sk_state) &
+ if ((1 << sk->sk_state) &
(TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
sock->state = SS_DISCONNECTING;
else
sock->state = SS_CONNECTED;
}
- /* If we've already sent a FIN, or it's a closed state, skip this. */
- if (__mptcp_check_fallback(msk)) {
- if (how == SHUT_WR || how == SHUT_RDWR)
- inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
-
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
-
- mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
- }
- } else if ((how & SEND_SHUTDOWN) &&
- ((1 << sock->sk->sk_state) &
- (TCPF_ESTABLISHED | TCPF_SYN_SENT |
- TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) &&
- mptcp_close_state(sock->sk)) {
- __mptcp_flush_join_list(msk);
-
- WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
- WRITE_ONCE(msk->snd_data_fin_enable, 1);
-
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
-
- mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
- }
- }
+ sk->sk_shutdown |= how;
+ if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+ __mptcp_wr_shutdown(sk);
/* Wake up anyone sleeping in poll. */
- sock->sk->sk_state_change(sock->sk);
+ sk->sk_state_change(sk);
out_unlock:
- release_sock(sock->sk);
+ release_sock(sk);
return ret;
}
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 13ab89dc1914..7cf9d110b85f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -23,6 +23,7 @@
#define OPTION_MPTCP_ADD_ADDR BIT(6)
#define OPTION_MPTCP_ADD_ADDR6 BIT(7)
#define OPTION_MPTCP_RM_ADDR BIT(8)
+#define OPTION_MPTCP_FASTCLOSE BIT(9)
/* MPTCP option subtypes */
#define MPTCPOPT_MP_CAPABLE 0
@@ -49,15 +50,16 @@
#define TCPOLEN_MPTCP_DSS_MAP64 14
#define TCPOLEN_MPTCP_DSS_CHECKSUM 2
#define TCPOLEN_MPTCP_ADD_ADDR 16
-#define TCPOLEN_MPTCP_ADD_ADDR_PORT 18
+#define TCPOLEN_MPTCP_ADD_ADDR_PORT 20
#define TCPOLEN_MPTCP_ADD_ADDR_BASE 8
-#define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT 10
+#define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT 12
#define TCPOLEN_MPTCP_ADD_ADDR6 28
-#define TCPOLEN_MPTCP_ADD_ADDR6_PORT 30
+#define TCPOLEN_MPTCP_ADD_ADDR6_PORT 32
#define TCPOLEN_MPTCP_ADD_ADDR6_BASE 20
-#define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT 22
-#define TCPOLEN_MPTCP_PORT_LEN 2
+#define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT 24
+#define TCPOLEN_MPTCP_PORT_LEN 4
#define TCPOLEN_MPTCP_RM_ADDR_BASE 4
+#define TCPOLEN_MPTCP_FASTCLOSE 12
/* MPTCP MP_JOIN flags */
#define MPTCPOPT_BACKUP BIT(0)
@@ -86,11 +88,20 @@
/* MPTCP socket flags */
#define MPTCP_DATA_READY 0
-#define MPTCP_SEND_SPACE 1
+#define MPTCP_NOSPACE 1
#define MPTCP_WORK_RTX 2
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_PUSH_PENDING 6
+#define MPTCP_CLEAN_UNA 7
+
+static inline bool before64(__u64 seq1, __u64 seq2)
+{
+ return (__s64)(seq1 - seq2) < 0;
+}
+
+#define after64(seq2, seq1) before64(seq1, seq2)
struct mptcp_options_received {
u64 sndr_key;
@@ -101,6 +112,7 @@ struct mptcp_options_received {
u16 data_len;
u16 mp_capable : 1,
mp_join : 1,
+ fastclose : 1,
dss : 1,
add_addr : 1,
rm_addr : 1,
@@ -110,7 +122,7 @@ struct mptcp_options_received {
u32 token;
u32 nonce;
u64 thmac;
- u8 hmac[20];
+ u8 hmac[MPTCPOPT_HMAC_LEN];
u8 join_id;
u8 use_map:1,
dsn64:1,
@@ -153,11 +165,21 @@ struct mptcp_addr_info {
enum mptcp_pm_status {
MPTCP_PM_ADD_ADDR_RECEIVED,
+ MPTCP_PM_ADD_ADDR_SEND_ACK,
MPTCP_PM_RM_ADDR_RECEIVED,
MPTCP_PM_ESTABLISHED,
+ MPTCP_PM_ALREADY_ESTABLISHED, /* persistent status, set after ESTABLISHED event */
MPTCP_PM_SUBFLOW_ESTABLISHED,
};
+enum mptcp_addr_signal_status {
+ MPTCP_ADD_ADDR_SIGNAL,
+ MPTCP_ADD_ADDR_ECHO,
+ MPTCP_ADD_ADDR_IPV6,
+ MPTCP_ADD_ADDR_PORT,
+ MPTCP_RM_ADDR_SIGNAL,
+};
+
struct mptcp_pm_data {
struct mptcp_addr_info local;
struct mptcp_addr_info remote;
@@ -165,13 +187,11 @@ struct mptcp_pm_data {
spinlock_t lock; /*protects the whole PM data */
- bool add_addr_signal;
- bool rm_addr_signal;
+ u8 addr_signal;
bool server_side;
bool work_pending;
bool accept_addr;
bool accept_subflow;
- bool add_addr_echo;
u8 add_addr_signaled;
u8 add_addr_accepted;
u8 local_addr_used;
@@ -187,9 +207,10 @@ struct mptcp_pm_data {
struct mptcp_data_frag {
struct list_head list;
u64 data_seq;
- int data_len;
- int offset;
- int overhead;
+ u16 data_len;
+ u16 offset;
+ u16 overhead;
+ u16 already_sent;
struct page *page;
};
@@ -200,27 +221,40 @@ struct mptcp_sock {
u64 local_key;
u64 remote_key;
u64 write_seq;
+ u64 snd_nxt;
u64 ack_seq;
+ u64 rcv_wnd_sent;
u64 rcv_data_fin_seq;
+ int wmem_reserved;
struct sock *last_snd;
int snd_burst;
- atomic64_t snd_una;
+ int old_wspace;
+ u64 snd_una;
+ u64 wnd_end;
unsigned long timer_ival;
u32 token;
+ int rmem_pending;
+ int rmem_released;
unsigned long flags;
bool can_ack;
bool fully_established;
bool rcv_data_fin;
bool snd_data_fin_enable;
+ bool rcv_fastclose;
bool use_64bit_ack; /* Set when we received a 64-bit DSN */
spinlock_t join_list_lock;
+ struct sock *ack_hint;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
+ struct sk_buff_head receive_queue;
+ struct sk_buff_head skb_tx_cache; /* this is wmem accounted */
+ int tx_pending_data;
+ int size_goal_cache;
struct list_head conn_list;
struct list_head rtx_queue;
+ struct mptcp_data_frag *first_pending;
struct list_head join_list;
- struct skb_ext *cached_ext; /* for the next sendmsg */
struct socket *subflow; /* outgoing connect/listener/!mp_capable */
struct sock *first;
struct mptcp_pm_data pm;
@@ -232,6 +266,22 @@ struct mptcp_sock {
} rcvq_space;
};
+#define mptcp_lock_sock(___sk, cb) do { \
+ struct sock *__sk = (___sk); /* silence macro reuse warning */ \
+ might_sleep(); \
+ spin_lock_bh(&__sk->sk_lock.slock); \
+ if (__sk->sk_lock.owned) \
+ __lock_sock(__sk); \
+ cb; \
+ __sk->sk_lock.owned = 1; \
+ spin_unlock(&__sk->sk_lock.slock); \
+ mutex_acquire(&__sk->sk_lock.dep_map, 0, 0, _RET_IP_); \
+ local_bh_enable(); \
+} while (0)
+
+#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
+#define mptcp_data_unlock(sk) spin_unlock_bh(&(sk)->sk_lock.slock)
+
#define mptcp_for_each_subflow(__msk, __subflow) \
list_for_each_entry(__subflow, &((__msk)->conn_list), node)
@@ -240,11 +290,46 @@ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
return (struct mptcp_sock *)sk;
}
+static inline int __mptcp_space(const struct sock *sk)
+{
+ return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_pending);
+}
+
+static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+
+ return READ_ONCE(msk->first_pending);
+}
+
+static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct mptcp_data_frag *cur;
+
+ cur = msk->first_pending;
+ return list_is_last(&cur->list, &msk->rtx_queue) ? NULL :
+ list_next_entry(cur, list);
+}
+
+static inline struct mptcp_data_frag *mptcp_pending_tail(const struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (!msk->first_pending)
+ return NULL;
+
+ if (WARN_ON_ONCE(list_empty(&msk->rtx_queue)))
+ return NULL;
+
+ return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
+}
+
static inline struct mptcp_data_frag *mptcp_rtx_tail(const struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (list_empty(&msk->rtx_queue))
+ if (!before64(msk->snd_nxt, READ_ONCE(msk->snd_una)))
return NULL;
return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
@@ -312,7 +397,8 @@ struct mptcp_subflow_context {
mpc_map : 1,
backup : 1,
rx_eof : 1,
- can_ack : 1; /* only after processing the remote a key */
+ can_ack : 1, /* only after processing the remote a key */
+ disposable : 1; /* ctx can be free at ulp release time */
enum mptcp_data_avail data_avail;
u32 remote_nonce;
u64 thmac;
@@ -361,15 +447,24 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
}
+static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow)
+{
+ sock_hold(mptcp_subflow_tcp_sock(subflow));
+ spin_lock_bh(&msk->join_list_lock);
+ list_add_tail(&subflow->node, &msk->join_list);
+ spin_unlock_bh(&msk->join_list_lock);
+}
+
int mptcp_is_enabled(struct net *net);
+unsigned int mptcp_get_add_addr_timeout(struct net *net);
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool mptcp_subflow_data_available(struct sock *sk);
void __init mptcp_subflow_init(void);
void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
- struct mptcp_subflow_context *subflow,
- long timeout);
+ struct mptcp_subflow_context *subflow);
void mptcp_subflow_reset(struct sock *ssk);
/* called with sk socket lock held */
@@ -407,9 +502,18 @@ static inline bool mptcp_is_fully_established(struct sock *sk)
void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk);
void mptcp_data_ready(struct sock *sk, struct sock *ssk);
bool mptcp_finish_join(struct sock *sk);
-void mptcp_data_acked(struct sock *sk);
+bool mptcp_schedule_work(struct sock *sk);
+void __mptcp_wnd_updated(struct sock *sk, struct sock *ssk);
+void __mptcp_data_acked(struct sock *sk);
void mptcp_subflow_eof(struct sock *sk);
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
+void __mptcp_flush_join_list(struct mptcp_sock *msk);
+static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->snd_data_fin_enable) &&
+ READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
+}
+
void mptcp_destroy_common(struct mptcp_sock *msk);
void __init mptcp_token_init(void);
@@ -444,6 +548,7 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id);
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
+void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id);
void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
struct mptcp_pm_add_entry *
@@ -452,30 +557,51 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
- bool echo);
+ bool echo, bool port);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id);
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id);
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
{
- return READ_ONCE(msk->pm.add_addr_signal);
+ return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_SIGNAL);
+}
+
+static inline bool mptcp_pm_should_add_signal_echo(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_ECHO);
+}
+
+static inline bool mptcp_pm_should_add_signal_ipv6(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_IPV6);
+}
+
+static inline bool mptcp_pm_should_add_signal_port(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_PORT);
}
static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk)
{
- return READ_ONCE(msk->pm.rm_addr_signal);
+ return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_RM_ADDR_SIGNAL);
}
-static inline unsigned int mptcp_add_addr_len(int family, bool echo)
+static inline unsigned int mptcp_add_addr_len(int family, bool echo, bool port)
{
- if (family == AF_INET)
- return echo ? TCPOLEN_MPTCP_ADD_ADDR_BASE
- : TCPOLEN_MPTCP_ADD_ADDR;
- return echo ? TCPOLEN_MPTCP_ADD_ADDR6_BASE : TCPOLEN_MPTCP_ADD_ADDR6;
+ u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
+
+ if (family == AF_INET6)
+ len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
+ if (!echo)
+ len += MPTCPOPT_THMAC_LEN;
+ if (port)
+ len += TCPOLEN_MPTCP_PORT_LEN;
+
+ return len;
}
bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
- struct mptcp_addr_info *saddr, bool *echo);
+ struct mptcp_addr_info *saddr, bool *echo, bool *port);
bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
u8 *rm_id);
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
@@ -485,6 +611,7 @@ void mptcp_pm_nl_data_init(struct mptcp_sock *msk);
void mptcp_pm_nl_fully_established(struct mptcp_sock *msk);
void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk);
void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk);
+void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk);
void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
@@ -494,13 +621,6 @@ static inline struct mptcp_ext *mptcp_get_ext(struct sk_buff *skb)
return (struct mptcp_ext *)skb_ext_find(skb, SKB_EXT_MPTCP);
}
-static inline bool before64(__u64 seq1, __u64 seq2)
-{
- return (__s64)(seq1 - seq2) < 0;
-}
-
-#define after64(seq2, seq1) before64(seq1, seq2)
-
void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops);
static inline bool __mptcp_check_fallback(const struct mptcp_sock *msk)
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index c21a852a6ffa..278cbe3e539e 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -112,9 +112,14 @@ static int __subflow_init_req(struct request_sock *req, const struct sock *sk_li
return 0;
}
-static void subflow_init_req(struct request_sock *req,
- const struct sock *sk_listener,
- struct sk_buff *skb)
+/* Init mptcp request socket.
+ *
+ * Returns an error code if a JOIN has failed and a TCP reset
+ * should be sent.
+ */
+static int subflow_init_req(struct request_sock *req,
+ const struct sock *sk_listener,
+ struct sk_buff *skb)
{
struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
@@ -125,7 +130,7 @@ static void subflow_init_req(struct request_sock *req,
ret = __subflow_init_req(req, sk_listener);
if (ret)
- return;
+ return 0;
mptcp_get_options(skb, &mp_opt);
@@ -133,7 +138,7 @@ static void subflow_init_req(struct request_sock *req,
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
if (mp_opt.mp_join)
- return;
+ return 0;
} else if (mp_opt.mp_join) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
}
@@ -157,7 +162,7 @@ again:
} else {
subflow_req->mp_capable = 1;
}
- return;
+ return 0;
}
err = mptcp_token_new_request(req);
@@ -175,7 +180,11 @@ again:
subflow_req->remote_nonce = mp_opt.nonce;
subflow_req->msk = subflow_token_join_request(req, skb);
- if (unlikely(req->syncookie) && subflow_req->msk) {
+ /* Can't fall back to TCP in this case. */
+ if (!subflow_req->msk)
+ return -EPERM;
+
+ if (unlikely(req->syncookie)) {
if (mptcp_can_accept_new_subflow(subflow_req->msk))
subflow_init_req_cookie_join_save(subflow_req, skb);
}
@@ -183,6 +192,8 @@ again:
pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
subflow_req->remote_nonce, subflow_req->msk);
}
+
+ return 0;
}
int mptcp_subflow_init_cookie_req(struct request_sock *req,
@@ -228,27 +239,53 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
}
EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
-static void subflow_v4_init_req(struct request_sock *req,
- const struct sock *sk_listener,
- struct sk_buff *skb)
+static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
+ struct sk_buff *skb,
+ struct flowi *fl,
+ struct request_sock *req)
{
+ struct dst_entry *dst;
+ int err;
+
tcp_rsk(req)->is_mptcp = 1;
- tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
+ dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
+ if (!dst)
+ return NULL;
+
+ err = subflow_init_req(req, sk, skb);
+ if (err == 0)
+ return dst;
- subflow_init_req(req, sk_listener, skb);
+ dst_release(dst);
+ if (!req->syncookie)
+ tcp_request_sock_ops.send_reset(sk, skb);
+ return NULL;
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static void subflow_v6_init_req(struct request_sock *req,
- const struct sock *sk_listener,
- struct sk_buff *skb)
+static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
+ struct sk_buff *skb,
+ struct flowi *fl,
+ struct request_sock *req)
{
+ struct dst_entry *dst;
+ int err;
+
tcp_rsk(req)->is_mptcp = 1;
- tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
+ dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
+ if (!dst)
+ return NULL;
- subflow_init_req(req, sk_listener, skb);
+ err = subflow_init_req(req, sk, skb);
+ if (err == 0)
+ return dst;
+
+ dst_release(dst);
+ if (!req->syncookie)
+ tcp6_request_sock_ops.send_reset(sk, skb);
+ return NULL;
}
#endif
@@ -276,12 +313,17 @@ void mptcp_subflow_reset(struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
+ /* must hold: tcp_done() could drop last reference on parent */
+ sock_hold(sk);
+
tcp_set_state(ssk, TCP_CLOSE);
tcp_send_active_reset(ssk, GFP_ATOMIC);
tcp_done(ssk);
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
schedule_work(&mptcp_sk(sk)->work))
- sock_hold(sk);
+ return; /* worker will put sk for us */
+
+ sock_put(sk);
}
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
@@ -577,6 +619,11 @@ create_child:
*/
inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
+ /* record the newly created socket as the first msk
+ * subflow, but don't link it yet into conn_list
+ */
+ WRITE_ONCE(mptcp_sk(new_msk)->first, child);
+
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
@@ -845,8 +892,6 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
sk_eat_skb(ssk, skb);
if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
subflow->map_valid = 0;
- if (incr)
- tcp_cleanup_rbuf(ssk, incr);
}
static bool subflow_check_data_avail(struct sock *ssk)
@@ -968,7 +1013,7 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
const struct sock *sk = subflow->conn;
- *space = tcp_space(sk);
+ *space = __mptcp_space(sk);
*full_space = tcp_full_space(sk);
}
@@ -993,20 +1038,9 @@ static void subflow_data_ready(struct sock *sk)
mptcp_data_ready(parent, sk);
}
-static void subflow_write_space(struct sock *sk)
+static void subflow_write_space(struct sock *ssk)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- struct sock *parent = subflow->conn;
-
- if (!sk_stream_is_writeable(sk))
- return;
-
- if (sk_stream_is_writeable(parent)) {
- set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
- smp_mb__after_atomic();
- /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
- sk_stream_write_space(parent);
- }
+ /* we take action in __mptcp_clean_una() */
}
static struct inet_connection_sock_af_ops *
@@ -1120,21 +1154,48 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
mptcp_info2sockaddr(remote, &addr);
+ mptcp_add_pending_subflow(msk, subflow);
err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
if (err && err != -EINPROGRESS)
- goto failed;
+ goto failed_unlink;
+ return err;
+
+failed_unlink:
spin_lock_bh(&msk->join_list_lock);
- list_add_tail(&subflow->node, &msk->join_list);
+ list_del(&subflow->node);
spin_unlock_bh(&msk->join_list_lock);
- return err;
-
failed:
+ subflow->disposable = 1;
sock_release(sf);
return err;
}
+static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
+{
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
+ *child_skcd = &child->sk_cgrp_data;
+
+ /* only the additional subflows created by kworkers have to be modified */
+ if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
+ cgroup_id(sock_cgroup_ptr(child_skcd))) {
+#ifdef CONFIG_MEMCG
+ struct mem_cgroup *memcg = parent->sk_memcg;
+
+ mem_cgroup_sk_free(child);
+ if (memcg && css_tryget(&memcg->css))
+ child->sk_memcg = memcg;
+#endif /* CONFIG_MEMCG */
+
+ cgroup_sk_free(child_skcd);
+ *child_skcd = *parent_skcd;
+ cgroup_sk_clone(child_skcd);
+ }
+#endif /* CONFIG_SOCK_CGROUP_DATA */
+}
+
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
{
struct mptcp_subflow_context *subflow;
@@ -1155,6 +1216,9 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
lock_sock(sf->sk);
+ /* the newly created socket has to be in the same cgroup as its parent */
+ mptcp_attach_cgroup(sk, sf->sk);
+
/* kernel sockets do not by default acquire net ref, but TCP timer
* needs it.
*/
@@ -1253,7 +1317,6 @@ static void subflow_state_change(struct sock *sk)
mptcp_data_ready(parent, sk);
if (__mptcp_check_fallback(mptcp_sk(parent)) &&
- !(parent->sk_shutdown & RCV_SHUTDOWN) &&
!subflow->rx_eof && subflow_is_done(sk)) {
subflow->rx_eof = 1;
mptcp_subflow_eof(parent);
@@ -1296,17 +1359,27 @@ out:
return err;
}
-static void subflow_ulp_release(struct sock *sk)
+static void subflow_ulp_release(struct sock *ssk)
{
- struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
+ struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
+ bool release = true;
+ struct sock *sk;
if (!ctx)
return;
- if (ctx->conn)
- sock_put(ctx->conn);
+ sk = ctx->conn;
+ if (sk) {
+ /* if the msk has been orphaned, keep the ctx
+ * alive, will be freed by __mptcp_close_ssk(),
+ * when the subflow is still unaccepted
+ */
+ release = ctx->disposable || list_empty(&ctx->node);
+ sock_put(sk);
+ }
- kfree_rcu(ctx, rcu);
+ if (release)
+ kfree_rcu(ctx, rcu);
}
static void subflow_ulp_clone(const struct request_sock *req,
@@ -1391,7 +1464,7 @@ void __init mptcp_subflow_init(void)
panic("MPTCP: failed to init subflow request sock ops\n");
subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
- subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
+ subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
subflow_specific = ipv4_specific;
subflow_specific.conn_request = subflow_v4_conn_request;
@@ -1400,7 +1473,7 @@ void __init mptcp_subflow_init(void)
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
- subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
+ subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
subflow_v6_specific = ipv6_specific;
subflow_v6_specific.conn_request = subflow_v6_conn_request;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 52370211e46b..49fbef0d99be 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -682,6 +682,16 @@ config NFT_FIB_NETDEV
The lookup will be delegated to the IPv4 or IPv6 FIB depending
on the protocol of the packet.
+config NFT_REJECT_NETDEV
+ depends on NFT_REJECT_IPV4
+ depends on NFT_REJECT_IPV6
+ tristate "Netfilter nf_tables netdev REJECT support"
+ help
+ This option enables the REJECT support from the netdev table.
+ The return packet generation will be delegated to the IPv4
+ or IPv6 ICMP or TCP RST implementation depending on the
+ protocol of the packet.
+
endif # NF_TABLES_NETDEV
endif # NF_TABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 0e0ded87e27b..33da7bf1b68e 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
+obj-$(CONFIG_NFT_REJECT_NETDEV) += nft_reject_netdev.o
obj-$(CONFIG_NFT_TUNNEL) += nft_tunnel.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 2b19189a930f..89009c82a6b2 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1109,6 +1109,8 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
ret = -IPSET_ERR_PROTOCOL;
goto put_out;
}
+ /* Set create flags depending on the type revision */
+ set->flags |= set->type->create_flags[revision];
ret = set->type->create(net, set, tb, flags);
if (ret != 0)
@@ -1239,10 +1241,12 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
+ u32 flags = flag_exist(nlh);
s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
&i);
if (!s) {
- ret = -ENOENT;
+ if (!(flags & IPSET_FLAG_EXIST))
+ ret = -ENOENT;
goto out;
} else if (s->ref || s->ref_netlink) {
ret = -IPSET_ERR_BUSY;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 521e970be402..5f1208ad049e 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -37,18 +37,18 @@
*/
/* Number of elements to store in an initial array block */
-#define AHASH_INIT_SIZE 4
+#define AHASH_INIT_SIZE 2
/* Max number of elements to store in an array block */
-#define AHASH_MAX_SIZE (3 * AHASH_INIT_SIZE)
+#define AHASH_MAX_SIZE (6 * AHASH_INIT_SIZE)
/* Max muber of elements in the array block when tuned */
#define AHASH_MAX_TUNED 64
+#define AHASH_MAX(h) ((h)->bucketsize)
+
/* Max number of elements can be tuned */
#ifdef IP_SET_HASH_WITH_MULTI
-#define AHASH_MAX(h) ((h)->ahash_max)
-
static u8
-tune_ahash_max(u8 curr, u32 multi)
+tune_bucketsize(u8 curr, u32 multi)
{
u32 n;
@@ -61,12 +61,10 @@ tune_ahash_max(u8 curr, u32 multi)
*/
return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
}
-
-#define TUNE_AHASH_MAX(h, multi) \
- ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi))
+#define TUNE_BUCKETSIZE(h, multi) \
+ ((h)->bucketsize = tune_bucketsize((h)->bucketsize, multi))
#else
-#define AHASH_MAX(h) AHASH_MAX_SIZE
-#define TUNE_AHASH_MAX(h, multi)
+#define TUNE_BUCKETSIZE(h, multi)
#endif
/* A hash bucket */
@@ -321,9 +319,7 @@ struct htype {
#ifdef IP_SET_HASH_WITH_MARKMASK
u32 markmask; /* markmask value for mark mask to store */
#endif
-#ifdef IP_SET_HASH_WITH_MULTI
- u8 ahash_max; /* max elements in an array block */
-#endif
+ u8 bucketsize; /* max elements in an array block */
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask; /* netmask value for subnets to store */
#endif
@@ -950,7 +946,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
goto set_full;
/* Create a new slot */
if (n->pos >= n->size) {
- TUNE_AHASH_MAX(h, multi);
+ TUNE_BUCKETSIZE(h, multi);
if (n->size >= AHASH_MAX(h)) {
/* Trigger rehashing */
mtype_data_next(&h->next, d);
@@ -1305,6 +1301,11 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
goto nla_put_failure;
#endif
+ if (set->flags & IPSET_CREATE_FLAG_BUCKETSIZE) {
+ if (nla_put_u8(skb, IPSET_ATTR_BUCKETSIZE, h->bucketsize) ||
+ nla_put_net32(skb, IPSET_ATTR_INITVAL, htonl(h->initval)))
+ goto nla_put_failure;
+ }
if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
@@ -1547,8 +1548,20 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
#ifdef IP_SET_HASH_WITH_MARKMASK
h->markmask = markmask;
#endif
- get_random_bytes(&h->initval, sizeof(h->initval));
-
+ if (tb[IPSET_ATTR_INITVAL])
+ h->initval = ntohl(nla_get_be32(tb[IPSET_ATTR_INITVAL]));
+ else
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->bucketsize = AHASH_MAX_SIZE;
+ if (tb[IPSET_ATTR_BUCKETSIZE]) {
+ h->bucketsize = nla_get_u8(tb[IPSET_ATTR_BUCKETSIZE]);
+ if (h->bucketsize < AHASH_INIT_SIZE)
+ h->bucketsize = AHASH_INIT_SIZE;
+ else if (h->bucketsize > AHASH_MAX_SIZE)
+ h->bucketsize = AHASH_MAX_SIZE;
+ else if (h->bucketsize % 2)
+ h->bucketsize += 1;
+ }
t->htable_bits = hbits;
t->maxelem = h->maxelem / ahash_numof_locks(hbits);
RCU_INIT_POINTER(h->table, t);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5d6d68eaf6a9..d1bef23fd4f5 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -23,7 +23,8 @@
/* 1 Counters support */
/* 2 Comments support */
/* 3 Forceadd support */
-#define IPSET_TYPE_REV_MAX 4 /* skbinfo support */
+/* 4 skbinfo support */
+#define IPSET_TYPE_REV_MAX 5 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -277,11 +278,13 @@ static struct ip_set_type hash_ip_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index eceb7bc4a93a..467c59a83c0a 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -23,7 +23,7 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
-#define IPSET_TYPE_REV_MAX 0
+#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Chilinski <tomasz.chilinski@chilan.com>");
@@ -268,11 +268,13 @@ static struct ip_set_type hash_ipmac_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index aba1df617d6e..18346d18aa16 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -21,7 +21,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support */
-#define IPSET_TYPE_REV_MAX 2 /* skbinfo support */
+/* 2 skbinfo support */
+#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vytas Dauksa <vytas.dauksa@smoothwall.net>");
@@ -274,12 +275,14 @@ static struct ip_set_type hash_ipmark_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmark_create,
.create_policy = {
[IPSET_ATTR_MARKMASK] = { .type = NLA_U32 },
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 1ff228717e29..e1ca11196515 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -25,7 +25,8 @@
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
+/* 5 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 6 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -341,11 +342,13 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index fa88afd812fa..ab179e064597 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -25,7 +25,8 @@
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
+/* 5 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 6 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -356,11 +357,13 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index eef6ecfcb409..8f075b44cf64 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -27,7 +27,8 @@
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
+/* 7 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -513,11 +514,13 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index 0b61593165ef..718814730acf 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -16,7 +16,7 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
-#define IPSET_TYPE_REV_MAX 0
+#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -125,11 +125,13 @@ static struct ip_set_type hash_mac_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_mac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 136cf0781d3a..c1a11f041ac6 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -24,7 +24,8 @@
/* 3 Counters support added */
/* 4 Comments support added */
/* 5 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 6 /* skbinfo mapping support added */
+/* 6 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 7 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -354,11 +355,13 @@ static struct ip_set_type hash_net_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index be5e95a0d876..ddd51c2e1cb3 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -26,7 +26,8 @@
/* 4 Comments support added */
/* 5 Forceadd support added */
/* 6 skbinfo support added */
-#define IPSET_TYPE_REV_MAX 7 /* interface wildcard support added */
+/* 7 interface wildcard support added */
+#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -225,7 +226,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
- nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
+ nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
@@ -442,7 +443,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
ip6_netmask(&e.ip, e.cidr);
- nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
+ nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
@@ -470,11 +471,13 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index da4ef910b12d..6532f0505e66 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -22,7 +22,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
+/* 2 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@@ -459,11 +460,13 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 34448df80fb9..ec1564a1cb5a 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -26,7 +26,8 @@
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
+/* 7 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -460,11 +461,13 @@ static struct ip_set_type hash_netport_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index 934c1712cba8..0e91d1e82f1c 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -23,7 +23,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 0 Comments support added */
/* 1 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
+/* 2 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@@ -558,11 +559,13 @@ static struct ip_set_type hash_netportnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index c0b8215ab3d4..54e086c65721 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -2137,7 +2137,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
pkts = sysctl_sync_threshold(ipvs);
else
- pkts = atomic_add_return(1, &cp->in_pkts);
+ pkts = atomic_inc_return(&cp->in_pkts);
if (ipvs->sync_state & IP_VS_STATE_MASTER)
ip_vs_sync_conn(ipvs, cp, pkts);
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 16b48064f715..9d43277b8b4f 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -615,7 +615,7 @@ static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp,
cp = cp->control;
if (cp) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
- pkts = atomic_add_return(1, &cp->in_pkts);
+ pkts = atomic_inc_return(&cp->in_pkts);
else
pkts = sysctl_sync_threshold(ipvs);
ip_vs_sync_conn(ipvs, cp, pkts);
@@ -776,7 +776,7 @@ control:
if (!cp)
return;
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
- pkts = atomic_add_return(1, &cp->in_pkts);
+ pkts = atomic_inc_return(&cp->in_pkts);
else
pkts = sysctl_sync_threshold(ipvs);
goto sloop;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 3d0fd33be018..84caf3316946 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -167,10 +167,14 @@ nla_put_failure:
return -1;
}
-static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
+ bool skip_zero)
{
long timeout = nf_ct_expires(ct) / HZ;
+ if (skip_zero && timeout == 0)
+ return 0;
+
if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
goto nla_put_failure;
return 0;
@@ -179,7 +183,8 @@ nla_put_failure:
return -1;
}
-static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
+static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct,
+ bool destroy)
{
const struct nf_conntrack_l4proto *l4proto;
struct nlattr *nest_proto;
@@ -193,7 +198,7 @@ static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
if (!nest_proto)
goto nla_put_failure;
- ret = l4proto->to_nlattr(skb, nest_proto, ct);
+ ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy);
nla_nest_end(skb, nest_proto);
@@ -537,8 +542,8 @@ static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
return -1;
if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
- (ctnetlink_dump_timeout(skb, ct) < 0 ||
- ctnetlink_dump_protoinfo(skb, ct) < 0))
+ (ctnetlink_dump_timeout(skb, ct, false) < 0 ||
+ ctnetlink_dump_protoinfo(skb, ct, false) < 0))
return -1;
return 0;
@@ -780,15 +785,19 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
goto nla_put_failure;
if (events & (1 << IPCT_DESTROY)) {
+ if (ctnetlink_dump_timeout(skb, ct, true) < 0)
+ goto nla_put_failure;
+
if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
- ctnetlink_dump_timestamp(skb, ct) < 0)
+ ctnetlink_dump_timestamp(skb, ct) < 0 ||
+ ctnetlink_dump_protoinfo(skb, ct, true) < 0)
goto nla_put_failure;
} else {
- if (ctnetlink_dump_timeout(skb, ct) < 0)
+ if (ctnetlink_dump_timeout(skb, ct, false) < 0)
goto nla_put_failure;
- if (events & (1 << IPCT_PROTOINFO)
- && ctnetlink_dump_protoinfo(skb, ct) < 0)
+ if (events & (1 << IPCT_PROTOINFO) &&
+ ctnetlink_dump_protoinfo(skb, ct, false) < 0)
goto nla_put_failure;
if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
@@ -2720,10 +2729,10 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
if (ctnetlink_dump_status(skb, ct) < 0)
goto nla_put_failure;
- if (ctnetlink_dump_timeout(skb, ct) < 0)
+ if (ctnetlink_dump_timeout(skb, ct, false) < 0)
goto nla_put_failure;
- if (ctnetlink_dump_protoinfo(skb, ct) < 0)
+ if (ctnetlink_dump_protoinfo(skb, ct, false) < 0)
goto nla_put_failure;
if (ctnetlink_dump_helpinfo(skb, ct) < 0)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index b3f4a334f9d7..db7479db8512 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -589,7 +589,7 @@ static void dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
- struct nf_conn *ct)
+ struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
@@ -597,15 +597,22 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP);
if (!nest_parms)
goto nla_put_failure;
- if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
- nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
+ if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state))
+ goto nla_put_failure;
+
+ if (destroy)
+ goto skip_state;
+
+ if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
cpu_to_be64(ct->proto.dccp.handshake_seq),
CTA_PROTOINFO_DCCP_PAD))
goto nla_put_failure;
+skip_state:
nla_nest_end(skb, nest_parms);
spin_unlock_bh(&ct->lock);
+
return 0;
nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 810cca24b399..fb8dc02e502f 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -543,7 +543,7 @@ static bool sctp_can_early_drop(const struct nf_conn *ct)
#include <linux/netfilter/nfnetlink_conntrack.h>
static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
- struct nf_conn *ct)
+ struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
@@ -552,15 +552,20 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
if (!nest_parms)
goto nla_put_failure;
- if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
- nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
+ if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state))
+ goto nla_put_failure;
+
+ if (destroy)
+ goto skip_state;
+
+ if (nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
goto nla_put_failure;
+skip_state:
spin_unlock_bh(&ct->lock);
-
nla_nest_end(skb, nest_parms);
return 0;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index c8fb2187ad4b..1d7e1c595546 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -834,12 +834,6 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
-static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
-{
- return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
- test_bit(IPS_ASSURED_BIT, &ct->status);
-}
-
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@@ -1192,7 +1186,7 @@ static bool tcp_can_early_drop(const struct nf_conn *ct)
#include <linux/netfilter/nfnetlink_conntrack.h>
static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
- struct nf_conn *ct)
+ struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
struct nf_ct_tcp_flags tmp = {};
@@ -1202,8 +1196,13 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
if (!nest_parms)
goto nla_put_failure;
- if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
- nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+ if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
+ goto nla_put_failure;
+
+ if (destroy)
+ goto skip_state;
+
+ if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
ct->proto.tcp.seen[0].td_scale) ||
nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
ct->proto.tcp.seen[1].td_scale))
@@ -1218,8 +1217,8 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
sizeof(struct nf_ct_tcp_flags), &tmp))
goto nla_put_failure;
+skip_state:
spin_unlock_bh(&ct->lock);
-
nla_nest_end(skb, nest_parms);
return 0;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 9a080767667b..8d5aa0ac45f4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -581,7 +581,8 @@ struct nft_module_request {
};
#ifdef CONFIG_MODULES
-static int nft_request_module(struct net *net, const char *fmt, ...)
+static __printf(2, 3) int nft_request_module(struct net *net, const char *fmt,
+ ...)
{
char module_name[MODULE_NAME_LEN];
struct nft_module_request *req;
@@ -1282,7 +1283,7 @@ static struct nft_chain *nft_chain_lookup(struct net *net,
if (nla == NULL)
return ERR_PTR(-EINVAL);
- nla_strlcpy(search, nla, sizeof(search));
+ nla_strscpy(search, nla, sizeof(search));
WARN_ON(!rcu_read_lock_held() &&
!lockdep_commit_lock_is_held(net));
@@ -1722,7 +1723,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
goto err_hook_alloc;
}
- nla_strlcpy(ifname, attr, IFNAMSIZ);
+ nla_strscpy(ifname, attr, IFNAMSIZ);
/* nf_tables_netdev_event() is called under rtnl_mutex, this is
* indirectly serializing all the other holders of the commit_mutex with
* the rtnl_mutex.
@@ -3570,6 +3571,7 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
[NFTA_SET_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_SET_HANDLE] = { .type = NLA_U64 },
[NFTA_SET_EXPR] = { .type = NLA_NESTED },
+ [NFTA_SET_EXPRESSIONS] = { .type = NLA_NESTED },
};
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
@@ -3777,6 +3779,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
u32 portid = ctx->portid;
struct nlattr *nest;
u32 seq = ctx->seq;
+ int i;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
@@ -3845,11 +3848,22 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
nla_nest_end(skb, nest);
- if (set->expr) {
+ if (set->num_exprs == 1) {
nest = nla_nest_start_noflag(skb, NFTA_SET_EXPR);
- if (nf_tables_fill_expr_info(skb, set->expr) < 0)
+ if (nf_tables_fill_expr_info(skb, set->exprs[0]) < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ } else if (set->num_exprs > 1) {
+ nest = nla_nest_start_noflag(skb, NFTA_SET_EXPRESSIONS);
+ if (nest == NULL)
goto nla_put_failure;
+ for (i = 0; i < set->num_exprs; i++) {
+ if (nft_expr_dump(skb, NFTA_LIST_ELEM,
+ set->exprs[i]) < 0)
+ goto nla_put_failure;
+ }
nla_nest_end(skb, nest);
}
@@ -4219,7 +4233,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
return err;
}
- if (nla[NFTA_SET_EXPR])
+ if (nla[NFTA_SET_EXPR] || nla[NFTA_SET_EXPRESSIONS])
desc.expr = true;
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask);
@@ -4283,6 +4297,31 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
err = PTR_ERR(expr);
goto err_set_alloc_name;
}
+ set->exprs[0] = expr;
+ set->num_exprs++;
+ } else if (nla[NFTA_SET_EXPRESSIONS]) {
+ struct nft_expr *expr;
+ struct nlattr *tmp;
+ int left;
+
+ i = 0;
+ nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
+ if (i == NFT_SET_EXPR_MAX) {
+ err = -E2BIG;
+ goto err_set_init;
+ }
+ if (nla_type(tmp) != NFTA_LIST_ELEM) {
+ err = -EINVAL;
+ goto err_set_init;
+ }
+ expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
+ if (IS_ERR(expr)) {
+ err = PTR_ERR(expr);
+ goto err_set_init;
+ }
+ set->exprs[i++] = expr;
+ set->num_exprs++;
+ }
}
udata = NULL;
@@ -4300,7 +4339,6 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
set->dtype = dtype;
set->objtype = objtype;
set->dlen = desc.dlen;
- set->expr = expr;
set->flags = flags;
set->size = desc.size;
set->policy = policy;
@@ -4329,8 +4367,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
err_set_trans:
ops->destroy(set);
err_set_init:
- if (expr)
- nft_expr_destroy(&ctx, expr);
+ for (i = 0; i < set->num_exprs; i++)
+ nft_expr_destroy(&ctx, set->exprs[i]);
err_set_alloc_name:
kfree(set->name);
err_set_name:
@@ -4340,11 +4378,13 @@ err_set_name:
static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
{
+ int i;
+
if (WARN_ON(set->use > 0))
return;
- if (set->expr)
- nft_expr_destroy(ctx, set->expr);
+ for (i = 0; i < set->num_exprs; i++)
+ nft_expr_destroy(ctx, set->exprs[i]);
set->ops->destroy(set);
kfree(set->name);
@@ -4497,8 +4537,8 @@ const struct nft_set_ext_type nft_set_ext_types[] = {
[NFT_SET_EXT_DATA] = {
.align = __alignof__(u32),
},
- [NFT_SET_EXT_EXPR] = {
- .align = __alignof__(struct nft_expr),
+ [NFT_SET_EXT_EXPRESSIONS] = {
+ .align = __alignof__(struct nft_set_elem_expr),
},
[NFT_SET_EXT_OBJREF] = {
.len = sizeof(struct nft_object *),
@@ -4541,6 +4581,7 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
[NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING,
.len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_KEY_END] = { .type = NLA_NESTED },
+ [NFTA_SET_ELEM_EXPRESSIONS] = { .type = NLA_NESTED },
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -4574,6 +4615,43 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
return 0;
}
+static int nft_set_elem_expr_dump(struct sk_buff *skb,
+ const struct nft_set *set,
+ const struct nft_set_ext *ext)
+{
+ struct nft_set_elem_expr *elem_expr;
+ u32 size, num_exprs = 0;
+ struct nft_expr *expr;
+ struct nlattr *nest;
+
+ elem_expr = nft_set_ext_expr(ext);
+ nft_setelem_expr_foreach(expr, elem_expr, size)
+ num_exprs++;
+
+ if (num_exprs == 1) {
+ expr = nft_setelem_expr_at(elem_expr, 0);
+ if (nft_expr_dump(skb, NFTA_SET_ELEM_EXPR, expr) < 0)
+ return -1;
+
+ return 0;
+ } else if (num_exprs > 1) {
+ nest = nla_nest_start_noflag(skb, NFTA_SET_ELEM_EXPRESSIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ nft_setelem_expr_foreach(expr, elem_expr, size) {
+ expr = nft_setelem_expr_at(elem_expr, size);
+ if (nft_expr_dump(skb, NFTA_LIST_ELEM, expr) < 0)
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
+ }
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
static int nf_tables_fill_setelem(struct sk_buff *skb,
const struct nft_set *set,
const struct nft_set_elem *elem)
@@ -4601,8 +4679,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
set->dlen) < 0)
goto nla_put_failure;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR) &&
- nft_expr_dump(skb, NFTA_SET_ELEM_EXPR, nft_set_ext_expr(ext)) < 0)
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
+ nft_set_elem_expr_dump(skb, set, ext))
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) &&
@@ -5097,8 +5175,8 @@ void *nft_set_elem_init(const struct nft_set *set,
return elem;
}
-static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
- struct nft_expr *expr)
+static void __nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
+ struct nft_expr *expr)
{
if (expr->ops->destroy_clone) {
expr->ops->destroy_clone(ctx, expr);
@@ -5108,6 +5186,16 @@ static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
}
}
+static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
+ struct nft_set_elem_expr *elem_expr)
+{
+ struct nft_expr *expr;
+ u32 size;
+
+ nft_setelem_expr_foreach(expr, elem_expr, size)
+ __nft_set_elem_expr_destroy(ctx, expr);
+}
+
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr)
{
@@ -5120,7 +5208,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_release(nft_set_ext_data(ext), set->dtype);
- if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+ if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS))
nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
@@ -5137,15 +5225,57 @@ static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
- if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS))
nft_set_elem_expr_destroy(ctx, nft_set_ext_expr(ext));
kfree(elem);
}
+static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ struct nft_expr *expr_array[])
+{
+ struct nft_expr *expr;
+ int err, i, k;
+
+ for (i = 0; i < set->num_exprs; i++) {
+ expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL);
+ if (!expr)
+ goto err_expr;
+
+ err = nft_expr_clone(expr, set->exprs[i]);
+ if (err < 0) {
+ nft_expr_destroy(ctx, expr);
+ goto err_expr;
+ }
+ expr_array[i] = expr;
+ }
+
+ return 0;
+
+err_expr:
+ for (k = i - 1; k >= 0; k++)
+ nft_expr_destroy(ctx, expr_array[i]);
+
+ return -ENOMEM;
+}
+
+static void nft_set_elem_expr_setup(const struct nft_set_ext *ext, int i,
+ struct nft_expr *expr_array[])
+{
+ struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
+ struct nft_expr *expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+
+ memcpy(expr, expr_array[i], expr_array[i]->ops->size);
+ elem_expr->size += expr_array[i]->ops->size;
+ kfree(expr_array[i]);
+ expr_array[i] = NULL;
+}
+
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
+ struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {};
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_ext_tmpl tmpl;
@@ -5153,16 +5283,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_elem elem;
struct nft_set_binding *binding;
struct nft_object *obj = NULL;
- struct nft_expr *expr = NULL;
struct nft_userdata *udata;
struct nft_data_desc desc;
enum nft_registers dreg;
struct nft_trans *trans;
- u32 flags = 0;
+ u32 flags = 0, size = 0;
u64 timeout;
u64 expiration;
+ int err, i;
u8 ulen;
- int err;
err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
@@ -5195,7 +5324,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
nla[NFTA_SET_ELEM_TIMEOUT] ||
nla[NFTA_SET_ELEM_EXPIRATION] ||
nla[NFTA_SET_ELEM_USERDATA] ||
- nla[NFTA_SET_ELEM_EXPR]))
+ nla[NFTA_SET_ELEM_EXPR] ||
+ nla[NFTA_SET_ELEM_EXPRESSIONS]))
return -EINVAL;
timeout = 0;
@@ -5220,23 +5350,62 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
}
- if (nla[NFTA_SET_ELEM_EXPR] != NULL) {
+ if (nla[NFTA_SET_ELEM_EXPR]) {
+ struct nft_expr *expr;
+
+ if (set->num_exprs != 1)
+ return -EOPNOTSUPP;
+
expr = nft_set_elem_expr_alloc(ctx, set,
nla[NFTA_SET_ELEM_EXPR]);
if (IS_ERR(expr))
return PTR_ERR(expr);
- err = -EOPNOTSUPP;
- if (set->expr && set->expr->ops != expr->ops)
+ expr_array[0] = expr;
+
+ if (set->exprs[0] && set->exprs[0]->ops != expr->ops) {
+ err = -EOPNOTSUPP;
goto err_set_elem_expr;
- } else if (set->expr) {
- expr = kzalloc(set->expr->ops->size, GFP_KERNEL);
- if (!expr)
- return -ENOMEM;
+ }
+ } else if (nla[NFTA_SET_ELEM_EXPRESSIONS]) {
+ struct nft_expr *expr;
+ struct nlattr *tmp;
+ int left;
- err = nft_expr_clone(expr, set->expr);
- if (err < 0)
+ if (set->num_exprs == 0)
+ return -EOPNOTSUPP;
+
+ i = 0;
+ nla_for_each_nested(tmp, nla[NFTA_SET_ELEM_EXPRESSIONS], left) {
+ if (i == set->num_exprs) {
+ err = -E2BIG;
+ goto err_set_elem_expr;
+ }
+ if (nla_type(tmp) != NFTA_LIST_ELEM) {
+ err = -EINVAL;
+ goto err_set_elem_expr;
+ }
+ expr = nft_set_elem_expr_alloc(ctx, set, tmp);
+ if (IS_ERR(expr)) {
+ err = PTR_ERR(expr);
+ goto err_set_elem_expr;
+ }
+ expr_array[i] = expr;
+
+ if (expr->ops != set->exprs[i]->ops) {
+ err = -EOPNOTSUPP;
+ goto err_set_elem_expr;
+ }
+ i++;
+ }
+ if (set->num_exprs != i) {
+ err = -EOPNOTSUPP;
goto err_set_elem_expr;
+ }
+ } else if (set->num_exprs > 0) {
+ err = nft_set_elem_expr_clone(ctx, set, expr_array);
+ if (err < 0)
+ goto err_set_elem_expr_clone;
}
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
@@ -5261,9 +5430,14 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
}
- if (expr)
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPR,
- expr->ops->size);
+ if (set->num_exprs) {
+ for (i = 0; i < set->num_exprs; i++)
+ size += expr_array[i]->ops->size;
+
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
+ sizeof(struct nft_set_elem_expr) +
+ size);
+ }
if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
if (!(set->flags & NFT_SET_OBJECT)) {
@@ -5345,11 +5519,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
*nft_set_ext_obj(ext) = obj;
obj->use++;
}
- if (expr) {
- memcpy(nft_set_ext_expr(ext), expr, expr->ops->size);
- kfree(expr);
- expr = NULL;
- }
+ for (i = 0; i < set->num_exprs; i++)
+ nft_set_elem_expr_setup(ext, i, expr_array);
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL)
@@ -5410,9 +5581,9 @@ err_parse_key_end:
err_parse_key:
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
err_set_elem_expr:
- if (expr != NULL)
- nft_expr_destroy(ctx, expr);
-
+ for (i = 0; i < set->num_exprs && expr_array[i]; i++)
+ nft_expr_destroy(ctx, expr_array[i]);
+err_set_elem_expr_clone:
return err;
}
@@ -5739,7 +5910,7 @@ struct nft_object *nft_obj_lookup(const struct net *net,
struct rhlist_head *tmp, *list;
struct nft_object *obj;
- nla_strlcpy(search, nla, sizeof(search));
+ nla_strscpy(search, nla, sizeof(search));
k.name = search;
WARN_ON_ONCE(!rcu_read_lock_held() &&
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 5bfec829c12f..0fa1653b5f19 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <net/netlink.h>
#include <net/sock.h>
+#include <net/netns/generic.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
@@ -41,6 +42,17 @@ struct nfacct_filter {
u32 mask;
};
+struct nfnl_acct_net {
+ struct list_head nfnl_acct_list;
+};
+
+static unsigned int nfnl_acct_net_id __read_mostly;
+
+static inline struct nfnl_acct_net *nfnl_acct_pernet(struct net *net)
+{
+ return net_generic(net, nfnl_acct_net_id);
+}
+
#define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES)
#define NFACCT_OVERQUOTA_BIT 2 /* NFACCT_F_OVERQUOTA */
@@ -49,6 +61,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
const struct nlattr * const tb[],
struct netlink_ext_ack *extack)
{
+ struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *nfacct, *matching = NULL;
char *acct_name;
unsigned int size = 0;
@@ -61,7 +74,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
if (strlen(acct_name) == 0)
return -EINVAL;
- list_for_each_entry(nfacct, &net->nfnl_acct_list, head) {
+ list_for_each_entry(nfacct, &nfnl_acct_net->nfnl_acct_list, head) {
if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
@@ -112,7 +125,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
nfacct->flags = flags;
}
- nla_strlcpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
+ nla_strscpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
if (tb[NFACCT_BYTES]) {
atomic64_set(&nfacct->bytes,
@@ -123,7 +136,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
}
refcount_set(&nfacct->refcnt, 1);
- list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
+ list_add_tail_rcu(&nfacct->head, &nfnl_acct_net->nfnl_acct_list);
return 0;
}
@@ -188,6 +201,7 @@ static int
nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
+ struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *cur, *last;
const struct nfacct_filter *filter = cb->data;
@@ -199,7 +213,7 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[1] = 0;
rcu_read_lock();
- list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
+ list_for_each_entry_rcu(cur, &nfnl_acct_net->nfnl_acct_list, head) {
if (last) {
if (cur != last)
continue;
@@ -269,6 +283,7 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
const struct nlattr * const tb[],
struct netlink_ext_ack *extack)
{
+ struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
int ret = -ENOENT;
struct nf_acct *cur;
char *acct_name;
@@ -288,7 +303,7 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
return -EINVAL;
acct_name = nla_data(tb[NFACCT_NAME]);
- list_for_each_entry(cur, &net->nfnl_acct_list, head) {
+ list_for_each_entry(cur, &nfnl_acct_net->nfnl_acct_list, head) {
struct sk_buff *skb2;
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
@@ -342,19 +357,20 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
const struct nlattr * const tb[],
struct netlink_ext_ack *extack)
{
+ struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *cur, *tmp;
int ret = -ENOENT;
char *acct_name;
if (!tb[NFACCT_NAME]) {
- list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
+ list_for_each_entry_safe(cur, tmp, &nfnl_acct_net->nfnl_acct_list, head)
nfnl_acct_try_del(cur);
return 0;
}
acct_name = nla_data(tb[NFACCT_NAME]);
- list_for_each_entry(cur, &net->nfnl_acct_list, head) {
+ list_for_each_entry(cur, &nfnl_acct_net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
@@ -402,10 +418,11 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
{
+ struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *cur, *acct = NULL;
rcu_read_lock();
- list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
+ list_for_each_entry_rcu(cur, &nfnl_acct_net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
continue;
@@ -488,16 +505,17 @@ EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
static int __net_init nfnl_acct_net_init(struct net *net)
{
- INIT_LIST_HEAD(&net->nfnl_acct_list);
+ INIT_LIST_HEAD(&nfnl_acct_pernet(net)->nfnl_acct_list);
return 0;
}
static void __net_exit nfnl_acct_net_exit(struct net *net)
{
+ struct nfnl_acct_net *nfnl_acct_net = nfnl_acct_pernet(net);
struct nf_acct *cur, *tmp;
- list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
+ list_for_each_entry_safe(cur, tmp, &nfnl_acct_net->nfnl_acct_list, head) {
list_del_rcu(&cur->head);
if (refcount_dec_and_test(&cur->refcnt))
@@ -508,6 +526,8 @@ static void __net_exit nfnl_acct_net_exit(struct net *net)
static struct pernet_operations nfnl_acct_ops = {
.init = nfnl_acct_net_init,
.exit = nfnl_acct_net_exit,
+ .id = &nfnl_acct_net_id,
+ .size = sizeof(struct nfnl_acct_net),
};
static int __init nfnl_acct_init(void)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 5b0d0a77379c..0f94fce1d3ed 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -146,7 +146,7 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
!tb[NFCTH_POLICY_EXPECT_TIMEOUT])
return -EINVAL;
- nla_strlcpy(expect_policy->name,
+ nla_strscpy(expect_policy->name,
tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN);
expect_policy->max_expected =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
@@ -233,7 +233,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
if (ret < 0)
goto err1;
- nla_strlcpy(helper->name,
+ nla_strscpy(helper->name,
tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
if (size > sizeof_field(struct nf_conn_help, data)) {
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index a1b0aac46e9e..8bcd49f14797 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -988,7 +988,7 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
if (!priv->l4proto)
return -ENOENT;
- nla_strlcpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name));
+ nla_strscpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name));
if (tb[NFTA_CT_HELPER_L3PROTO])
family = ntohs(nla_get_be16(tb[NFTA_CT_HELPER_L3PROTO]));
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 9af4f93c7f0e..983a1d5ca3ab 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -19,11 +19,30 @@ struct nft_dynset {
enum nft_registers sreg_key:8;
enum nft_registers sreg_data:8;
bool invert;
+ u8 num_exprs;
u64 timeout;
- struct nft_expr *expr;
+ struct nft_expr *expr_array[NFT_SET_EXPR_MAX];
struct nft_set_binding binding;
};
+static int nft_dynset_expr_setup(const struct nft_dynset *priv,
+ const struct nft_set_ext *ext)
+{
+ struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
+ struct nft_expr *expr;
+ int i;
+
+ for (i = 0; i < priv->num_exprs; i++) {
+ expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+ if (nft_expr_clone(expr, priv->expr_array[i]) < 0)
+ return -1;
+
+ elem_expr->size += priv->expr_array[i]->ops->size;
+ }
+
+ return 0;
+}
+
static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
struct nft_regs *regs)
{
@@ -44,8 +63,7 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
goto err1;
ext = nft_set_elem_ext(set, elem);
- if (priv->expr != NULL &&
- nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
+ if (priv->num_exprs && nft_dynset_expr_setup(priv, ext) < 0)
goto err2;
return elem;
@@ -90,6 +108,41 @@ void nft_dynset_eval(const struct nft_expr *expr,
regs->verdict.code = NFT_BREAK;
}
+static void nft_dynset_ext_add_expr(struct nft_dynset *priv)
+{
+ u8 size = 0;
+ int i;
+
+ for (i = 0; i < priv->num_exprs; i++)
+ size += priv->expr_array[i]->ops->size;
+
+ nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPRESSIONS,
+ sizeof(struct nft_set_elem_expr) + size);
+}
+
+static struct nft_expr *
+nft_dynset_expr_alloc(const struct nft_ctx *ctx, const struct nft_set *set,
+ const struct nlattr *attr, int pos)
+{
+ struct nft_expr *expr;
+ int err;
+
+ expr = nft_set_elem_expr_alloc(ctx, set, attr);
+ if (IS_ERR(expr))
+ return expr;
+
+ if (set->exprs[pos] && set->exprs[pos]->ops != expr->ops) {
+ err = -EOPNOTSUPP;
+ goto err_dynset_expr;
+ }
+
+ return expr;
+
+err_dynset_expr:
+ nft_expr_destroy(ctx, expr);
+ return ERR_PTR(err);
+}
+
static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
[NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
@@ -100,6 +153,7 @@ static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
[NFTA_DYNSET_TIMEOUT] = { .type = NLA_U64 },
[NFTA_DYNSET_EXPR] = { .type = NLA_NESTED },
[NFTA_DYNSET_FLAGS] = { .type = NLA_U32 },
+ [NFTA_DYNSET_EXPRESSIONS] = { .type = NLA_NESTED },
};
static int nft_dynset_init(const struct nft_ctx *ctx,
@@ -110,7 +164,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set *set;
u64 timeout;
- int err;
+ int err, i;
lockdep_assert_held(&ctx->net->nft.commit_mutex);
@@ -181,16 +235,58 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
} else if (set->flags & NFT_SET_MAP)
return -EINVAL;
- if (tb[NFTA_DYNSET_EXPR] != NULL) {
- if (!(set->flags & NFT_SET_EVAL))
- return -EINVAL;
+ if ((tb[NFTA_DYNSET_EXPR] || tb[NFTA_DYNSET_EXPRESSIONS]) &&
+ !(set->flags & NFT_SET_EVAL))
+ return -EINVAL;
+
+ if (tb[NFTA_DYNSET_EXPR]) {
+ struct nft_expr *dynset_expr;
+
+ dynset_expr = nft_dynset_expr_alloc(ctx, set,
+ tb[NFTA_DYNSET_EXPR], 0);
+ if (IS_ERR(dynset_expr))
+ return PTR_ERR(dynset_expr);
- priv->expr = nft_set_elem_expr_alloc(ctx, set,
- tb[NFTA_DYNSET_EXPR]);
- if (IS_ERR(priv->expr))
- return PTR_ERR(priv->expr);
+ priv->num_exprs++;
+ priv->expr_array[0] = dynset_expr;
- if (set->expr && set->expr->ops != priv->expr->ops) {
+ if (set->num_exprs > 1 ||
+ (set->num_exprs == 1 &&
+ dynset_expr->ops != set->exprs[0]->ops)) {
+ err = -EOPNOTSUPP;
+ goto err_expr_free;
+ }
+ } else if (tb[NFTA_DYNSET_EXPRESSIONS]) {
+ struct nft_expr *dynset_expr;
+ struct nlattr *tmp;
+ int left;
+
+ i = 0;
+ nla_for_each_nested(tmp, tb[NFTA_DYNSET_EXPRESSIONS], left) {
+ if (i == NFT_SET_EXPR_MAX) {
+ err = -E2BIG;
+ goto err_expr_free;
+ }
+ if (nla_type(tmp) != NFTA_LIST_ELEM) {
+ err = -EINVAL;
+ goto err_expr_free;
+ }
+ dynset_expr = nft_dynset_expr_alloc(ctx, set, tmp, i);
+ if (IS_ERR(dynset_expr)) {
+ err = PTR_ERR(dynset_expr);
+ goto err_expr_free;
+ }
+ priv->expr_array[i] = dynset_expr;
+ priv->num_exprs++;
+
+ if (set->num_exprs &&
+ dynset_expr->ops != set->exprs[i]->ops) {
+ err = -EOPNOTSUPP;
+ goto err_expr_free;
+ }
+ i++;
+ }
+ if (set->num_exprs && set->num_exprs != i) {
err = -EOPNOTSUPP;
goto err_expr_free;
}
@@ -200,9 +296,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen);
if (set->flags & NFT_SET_MAP)
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_DATA, set->dlen);
- if (priv->expr != NULL)
- nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR,
- priv->expr->ops->size);
+
+ if (priv->num_exprs)
+ nft_dynset_ext_add_expr(priv);
+
if (set->flags & NFT_SET_TIMEOUT) {
if (timeout || set->timeout)
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
@@ -221,8 +318,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
return 0;
err_expr_free:
- if (priv->expr != NULL)
- nft_expr_destroy(ctx, priv->expr);
+ for (i = 0; i < priv->num_exprs; i++)
+ nft_expr_destroy(ctx, priv->expr_array[i]);
return err;
}
@@ -247,9 +344,10 @@ static void nft_dynset_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_dynset *priv = nft_expr_priv(expr);
+ int i;
- if (priv->expr != NULL)
- nft_expr_destroy(ctx, priv->expr);
+ for (i = 0; i < priv->num_exprs; i++)
+ nft_expr_destroy(ctx, priv->expr_array[i]);
nf_tables_destroy_set(ctx, priv->set);
}
@@ -258,6 +356,7 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
u32 flags = priv->invert ? NFT_DYNSET_F_INV : 0;
+ int i;
if (nft_dump_register(skb, NFTA_DYNSET_SREG_KEY, priv->sreg_key))
goto nla_put_failure;
@@ -272,8 +371,23 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
nf_jiffies64_to_msecs(priv->timeout),
NFTA_DYNSET_PAD))
goto nla_put_failure;
- if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
- goto nla_put_failure;
+ if (priv->num_exprs == 1) {
+ if (nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr_array[0]))
+ goto nla_put_failure;
+ } else if (priv->num_exprs > 1) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+ for (i = 0; i < priv->num_exprs; i++) {
+ if (nft_expr_dump(skb, NFTA_LIST_ELEM,
+ priv->expr_array[i]))
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
+ }
if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
goto nla_put_failure;
return 0;
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 57899454a530..a06a46b039c5 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -152,7 +152,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
if (priv->prefix == NULL)
return -ENOMEM;
- nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
+ nla_strscpy(priv->prefix, nla, nla_len(nla) + 1);
} else {
priv->prefix = (char *)nft_log_null_prefix;
}
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 61fb7e8afbf0..927ff8459bd9 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -40,6 +40,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
+ int icmp_code;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
@@ -47,9 +48,17 @@ int nft_reject_init(const struct nft_ctx *ctx,
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
return -EINVAL;
- priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+
+ icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+ if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
+ icmp_code > NFT_REJECT_ICMPX_MAX)
+ return -EINVAL;
+
+ priv->icmp_code = icmp_code;
+ break;
case NFT_REJECT_TCP_RST:
break;
default:
@@ -69,6 +78,7 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index cf8f2646e93c..95090186ee90 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -28,7 +28,8 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt));
+ nf_send_reset(nft_net(pkt), pkt->xt.state->sk,
+ pkt->skb, nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach(pkt->skb,
@@ -44,7 +45,8 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
priv->icmp_code, nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt));
+ nf_send_reset6(nft_net(pkt), pkt->xt.state->sk,
+ pkt->skb, nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach6(nft_net(pkt), pkt->skb,
@@ -58,60 +60,16 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
regs->verdict.code = NF_DROP;
}
-static int nft_reject_inet_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_reject_inet_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
{
- struct nft_reject *priv = nft_expr_priv(expr);
- int icmp_code;
-
- if (tb[NFTA_REJECT_TYPE] == NULL)
- return -EINVAL;
-
- priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
- return -EINVAL;
-
- icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
- if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
- icmp_code > NFT_REJECT_ICMPX_MAX)
- return -EINVAL;
-
- priv->icmp_code = icmp_code;
- break;
- case NFT_REJECT_TCP_RST:
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int nft_reject_inet_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_reject *priv = nft_expr_priv(expr);
-
- if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
- goto nla_put_failure;
-
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
- goto nla_put_failure;
- break;
- default:
- break;
- }
-
- return 0;
-
-nla_put_failure:
- return -1;
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_INGRESS));
}
static struct nft_expr_type nft_reject_inet_type;
@@ -119,9 +77,9 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
.type = &nft_reject_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_inet_eval,
- .init = nft_reject_inet_init,
- .dump = nft_reject_inet_dump,
- .validate = nft_reject_validate,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+ .validate = nft_reject_inet_validate,
};
static struct nft_expr_type nft_reject_inet_type __read_mostly = {
diff --git a/net/netfilter/nft_reject_netdev.c b/net/netfilter/nft_reject_netdev.c
new file mode 100644
index 000000000000..d89f68754f42
--- /dev/null
+++ b/net/netfilter/nft_reject_netdev.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Laura Garcia Liebana <nevola@gmail.com>
+ * Copyright (c) 2020 Jose M. Guisado <guigom@riseup.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/ipv6/nf_reject.h>
+
+static void nft_reject_queue_xmit(struct sk_buff *nskb, struct sk_buff *oldskb)
+{
+ dev_hard_header(nskb, nskb->dev, ntohs(oldskb->protocol),
+ eth_hdr(oldskb)->h_source, eth_hdr(oldskb)->h_dest,
+ nskb->len);
+ dev_queue_xmit(nskb);
+}
+
+static void nft_reject_netdev_send_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+static void nft_reject_netdev_send_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+static void nft_reject_netdev_send_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+
+static void nft_reject_netdev_send_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+static void nft_reject_netdev_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct ethhdr *eth = eth_hdr(pkt->skb);
+ struct nft_reject *priv = nft_expr_priv(expr);
+ const unsigned char *dest = eth->h_dest;
+
+ if (is_broadcast_ether_addr(dest) ||
+ is_multicast_ether_addr(dest))
+ goto out;
+
+ switch (eth->h_proto) {
+ case htons(ETH_P_IP):
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nft_reject_netdev_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt));
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ nft_reject_icmp_code(priv->icmp_code));
+ break;
+ }
+ break;
+ case htons(ETH_P_IPV6):
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nft_reject_netdev_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt));
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ nft_reject_icmpv6_code(priv->icmp_code));
+ break;
+ }
+ break;
+ default:
+ /* No explicit way to reject this protocol, drop it. */
+ break;
+ }
+out:
+ regs->verdict.code = NF_DROP;
+}
+
+static int nft_reject_netdev_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
+}
+
+static struct nft_expr_type nft_reject_netdev_type;
+static const struct nft_expr_ops nft_reject_netdev_ops = {
+ .type = &nft_reject_netdev_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_netdev_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+ .validate = nft_reject_netdev_validate,
+};
+
+static struct nft_expr_type nft_reject_netdev_type __read_mostly = {
+ .family = NFPROTO_NETDEV,
+ .name = "reject",
+ .ops = &nft_reject_netdev_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_netdev_module_init(void)
+{
+ return nft_register_expr(&nft_reject_netdev_type);
+}
+
+static void __exit nft_reject_netdev_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_netdev_type);
+}
+
+module_init(nft_reject_netdev_module_init);
+module_exit(nft_reject_netdev_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Laura Garcia Liebana <nevola@gmail.com>");
+MODULE_AUTHOR("Jose M. Guisado <guigom@riseup.net>");
+MODULE_DESCRIPTION("Reject packets from netdev via nftables");
+MODULE_ALIAS_NFT_AF_EXPR(5, "reject");
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 4d3f147e8d8d..bf618b7ec1ae 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -293,6 +293,22 @@ cont:
rhashtable_walk_exit(&hti);
}
+static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
+ struct nft_set_ext *ext)
+{
+ struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
+ struct nft_expr *expr;
+ u32 size;
+
+ nft_setelem_expr_foreach(expr, elem_expr, size) {
+ if (expr->ops->gc &&
+ expr->ops->gc(read_pnet(&set->net), expr))
+ return true;
+ }
+
+ return false;
+}
+
static void nft_rhash_gc(struct work_struct *work)
{
struct nft_set *set;
@@ -314,16 +330,13 @@ static void nft_rhash_gc(struct work_struct *work)
continue;
}
- if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPR)) {
- struct nft_expr *expr = nft_set_ext_expr(&he->ext);
+ if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) &&
+ nft_rhash_expr_needs_gc_run(set, &he->ext))
+ goto needs_gc_run;
- if (expr->ops->gc &&
- expr->ops->gc(read_pnet(&set->net), expr))
- goto gc;
- }
if (!nft_set_elem_expired(&he->ext))
continue;
-gc:
+needs_gc_run:
if (nft_set_elem_mark_busy(&he->ext))
continue;
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index a97c2259bbc8..7c6bf1c16813 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -27,7 +27,7 @@ static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
overquota = nfnl_acct_overquota(xt_net(par), info->nfacct);
- return overquota == NFACCT_UNDERQUOTA ? false : true;
+ return overquota != NFACCT_UNDERQUOTA;
}
static int
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index 4e62f2ad3575..f28c8947c730 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -366,6 +366,7 @@ static const struct netlbl_calipso_ops *calipso_ops;
/**
* netlbl_calipso_ops_register - Register the CALIPSO operations
+ * @ops: ops to register
*
* Description:
* Register the CALIPSO packet engine operations.
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index eb1d66d20afb..df1b41ed73fd 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -95,7 +95,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_free_entry;
}
- nla_strlcpy(entry->domain,
+ nla_strscpy(entry->domain,
info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size);
}
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 9b27599870e3..96b91674dd37 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# NFC sybsystem configuration
+# NFC subsystem configuration
#
menuconfig NFC
diff --git a/net/nfc/core.c b/net/nfc/core.c
index eb377f87bcae..573c80c6ff7a 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -189,7 +189,8 @@ static const struct rfkill_ops nfc_rfkill_ops = {
* nfc_start_poll - start polling for nfc targets
*
* @dev: The nfc device that must start polling
- * @protocols: bitset of nfc protocols that must be used for polling
+ * @im_protocols: bitset of nfc initiator protocols to be used for polling
+ * @tm_protocols: bitset of nfc transport protocols to be used for polling
*
* The device remains polling for targets until a target is found or
* the nfc_stop_poll function is called.
@@ -436,6 +437,7 @@ error:
*
* @dev: The nfc device that found the target
* @target_idx: index of the target that must be deactivated
+ * @mode: idle or sleep?
*/
int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
{
@@ -703,7 +705,11 @@ EXPORT_SYMBOL(nfc_tm_deactivated);
/**
* nfc_alloc_send_skb - allocate a skb for data exchange responses
*
+ * @dev: device sending the response
+ * @sk: socket sending the response
+ * @flags: MSG_DONTWAIT flag
* @size: size to allocate
+ * @err: pointer to memory to store the error code
*/
struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
unsigned int flags, unsigned int size,
@@ -1039,6 +1045,8 @@ struct nfc_dev *nfc_get_device(unsigned int idx)
*
* @ops: device operations
* @supported_protocols: NFC protocols supported by the device
+ * @tx_headroom: reserved space at beginning of skb
+ * @tx_tailroom: reserved space at end of skb
*/
struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
u32 supported_protocols,
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index e3599ed4a7a8..da7e2112771f 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -458,6 +458,9 @@ static void digital_add_poll_tech(struct nfc_digital_dev *ddev, u8 rf_tech,
/**
* start_poll operation
+ * @nfc_dev: device to be polled
+ * @im_protocols: bitset of nfc initiator protocols to be used for polling
+ * @tm_protocols: bitset of nfc transport protocols to be used for polling
*
* For every supported protocol, the corresponding polling function is added
* to the table of polling technologies (ddev->poll_techs[]) using
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 741da8f81c2b..e64727e1a72f 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -165,7 +165,12 @@ static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
{
- nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
+ u8 plen = 0;
+
+ if (opt)
+ plen = sizeof(struct nci_core_init_v2_cmd);
+
+ nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, plen, (void *)opt);
}
static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
@@ -497,7 +502,16 @@ static int nci_open_device(struct nci_dev *ndev)
}
if (!rc) {
- rc = __nci_request(ndev, nci_init_req, 0,
+ struct nci_core_init_v2_cmd nci_init_v2_cmd = {
+ .feature1 = NCI_FEATURE_DISABLE,
+ .feature2 = NCI_FEATURE_DISABLE
+ };
+ unsigned long opt = 0;
+
+ if (!(ndev->nci_ver & NCI_VER_2_MASK))
+ opt = (unsigned long)&nci_init_v2_cmd;
+
+ rc = __nci_request(ndev, nci_init_req, opt,
msecs_to_jiffies(NCI_INIT_TIMEOUT));
}
@@ -1112,6 +1126,8 @@ static struct nfc_ops nci_nfc_ops = {
*
* @ops: device operations
* @supported_protocols: NFC protocols supported by the device
+ * @tx_headroom: Reserved space at beginning of skb
+ * @tx_tailroom: Reserved space at end of skb
*/
struct nci_dev *nci_allocate_device(struct nci_ops *ops,
__u32 supported_protocols,
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index c18e76d6d8ba..6b275a387a92 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -363,16 +363,13 @@ exit:
}
static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
- u8 result, struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct nci_conn_info *conn_info;
- u8 status = result;
conn_info = ndev->hci_dev->conn_info;
- if (!conn_info) {
- status = NCI_STATUS_REJECTED;
+ if (!conn_info)
goto exit;
- }
conn_info->rx_skb = skb;
@@ -388,7 +385,7 @@ static void nci_hci_hcp_message_rx(struct nci_dev *ndev, u8 pipe,
{
switch (type) {
case NCI_HCI_HCP_RESPONSE:
- nci_hci_resp_received(ndev, pipe, instruction, skb);
+ nci_hci_resp_received(ndev, pipe, skb);
break;
case NCI_HCI_HCP_COMMAND:
nci_hci_cmd_received(ndev, pipe, instruction, skb);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 33e1170817f0..98af04c86b2c 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -27,6 +27,23 @@
/* Handle NCI Notification packets */
+static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ /* Handle NCI 2.x core reset notification */
+ struct nci_core_reset_ntf *ntf = (void *)skb->data;
+
+ ndev->nci_ver = ntf->nci_ver;
+ pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+ ntf->nci_ver, ntf->config_status);
+
+ ndev->manufact_id = ntf->manufact_id;
+ ndev->manufact_specific_info =
+ __le32_to_cpu(ntf->manufact_specific_info);
+
+ nci_req_complete(ndev, NCI_STATUS_OK);
+}
+
static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
@@ -756,6 +773,10 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
}
switch (ntf_opcode) {
+ case NCI_OP_CORE_RESET_NTF:
+ nci_core_reset_ntf_packet(ndev, skb);
+ break;
+
case NCI_OP_CORE_CONN_CREDITS_NTF:
nci_core_conn_credits_ntf_packet(ndev, skb);
break;
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index a48297b79f34..e9605922a322 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -31,16 +31,19 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
pr_debug("status 0x%x\n", rsp->status);
- if (rsp->status == NCI_STATUS_OK) {
- ndev->nci_ver = rsp->nci_ver;
- pr_debug("nci_ver 0x%x, config_status 0x%x\n",
- rsp->nci_ver, rsp->config_status);
- }
+ /* Handle NCI 1.x ver */
+ if (skb->len != 1) {
+ if (rsp->status == NCI_STATUS_OK) {
+ ndev->nci_ver = rsp->nci_ver;
+ pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+ rsp->nci_ver, rsp->config_status);
+ }
- nci_req_complete(ndev, rsp->status);
+ nci_req_complete(ndev, rsp->status);
+ }
}
-static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+static u8 nci_core_init_rsp_packet_v1(struct nci_dev *ndev, struct sk_buff *skb)
{
struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
struct nci_core_init_rsp_2 *rsp_2;
@@ -48,16 +51,14 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
pr_debug("status 0x%x\n", rsp_1->status);
if (rsp_1->status != NCI_STATUS_OK)
- goto exit;
+ return rsp_1->status;
ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
- if (ndev->num_supported_rf_interfaces >
- NCI_MAX_SUPPORTED_RF_INTERFACES) {
- ndev->num_supported_rf_interfaces =
- NCI_MAX_SUPPORTED_RF_INTERFACES;
- }
+ ndev->num_supported_rf_interfaces =
+ min((int)ndev->num_supported_rf_interfaces,
+ NCI_MAX_SUPPORTED_RF_INTERFACES);
memcpy(ndev->supported_rf_interfaces,
rsp_1->supported_rf_interfaces,
@@ -77,6 +78,58 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
ndev->manufact_specific_info =
__le32_to_cpu(rsp_2->manufact_specific_info);
+ return NCI_STATUS_OK;
+}
+
+static u8 nci_core_init_rsp_packet_v2(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nci_core_init_rsp_nci_ver2 *rsp = (void *)skb->data;
+ u8 *supported_rf_interface = rsp->supported_rf_interfaces;
+ u8 rf_interface_idx = 0;
+ u8 rf_extension_cnt = 0;
+
+ pr_debug("status %x\n", rsp->status);
+
+ if (rsp->status != NCI_STATUS_OK)
+ return rsp->status;
+
+ ndev->nfcc_features = __le32_to_cpu(rsp->nfcc_features);
+ ndev->num_supported_rf_interfaces = rsp->num_supported_rf_interfaces;
+
+ ndev->num_supported_rf_interfaces =
+ min((int)ndev->num_supported_rf_interfaces,
+ NCI_MAX_SUPPORTED_RF_INTERFACES);
+
+ while (rf_interface_idx < ndev->num_supported_rf_interfaces) {
+ ndev->supported_rf_interfaces[rf_interface_idx++] = *supported_rf_interface++;
+
+ /* skip rf extension parameters */
+ rf_extension_cnt = *supported_rf_interface++;
+ supported_rf_interface += rf_extension_cnt;
+ }
+
+ ndev->max_logical_connections = rsp->max_logical_connections;
+ ndev->max_routing_table_size =
+ __le16_to_cpu(rsp->max_routing_table_size);
+ ndev->max_ctrl_pkt_payload_len =
+ rsp->max_ctrl_pkt_payload_len;
+ ndev->max_size_for_large_params = NCI_MAX_LARGE_PARAMS_NCI_v2;
+
+ return NCI_STATUS_OK;
+}
+
+static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ u8 status = 0;
+
+ if (!(ndev->nci_ver & NCI_VER_2_MASK))
+ status = nci_core_init_rsp_packet_v1(ndev, skb);
+ else
+ status = nci_core_init_rsp_packet_v2(ndev, skb);
+
+ if (status != NCI_STATUS_OK)
+ goto exit;
+
pr_debug("nfcc_features 0x%x\n",
ndev->nfcc_features);
pr_debug("num_supported_rf_interfaces %d\n",
@@ -103,7 +156,7 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
ndev->manufact_specific_info);
exit:
- nci_req_complete(ndev, rsp_1->status);
+ nci_req_complete(ndev, status);
}
static void nci_core_set_config_rsp_packet(struct nci_dev *ndev,
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 8709f3d4e7c4..573b38ad2f8e 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1226,7 +1226,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
- nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME],
+ nla_strscpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME],
sizeof(firmware_name));
rc = nfc_fw_download(dev, firmware_name);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c3a664871cb5..e8902a7e60f2 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -959,16 +959,13 @@ static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr, bool last)
{
- /* The first action is always 'OVS_DEC_TTL_ATTR_ARG'. */
- struct nlattr *dec_ttl_arg = nla_data(attr);
+ /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
+ struct nlattr *actions = nla_data(attr);
- if (nla_len(dec_ttl_arg)) {
- struct nlattr *actions = nla_data(dec_ttl_arg);
+ if (nla_len(actions))
+ return clone_execute(dp, skb, key, 0, nla_data(actions),
+ nla_len(actions), last, false);
- if (actions)
- return clone_execute(dp, skb, key, 0, nla_data(actions),
- nla_len(actions), last, false);
- }
consume_skb(skb);
return 0;
}
@@ -1212,7 +1209,7 @@ static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
return -EHOSTUNREACH;
key->ip.ttl = --nh->hop_limit;
- } else {
+ } else if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *nh;
u8 old_ttl;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 4beb96139d77..5eddfe7bd391 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1037,6 +1037,14 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
return -EINVAL;
}
+
+ if (nf_ct_protonum(ct) == IPPROTO_TCP &&
+ nf_ct_is_confirmed(ct) && nf_conntrack_tcp_established(ct)) {
+ /* Be liberal for tcp packets so that out-of-window
+ * packets are not marked invalid.
+ */
+ nf_ct_set_tcp_be_liberal(ct);
+ }
}
return 0;
@@ -2025,15 +2033,11 @@ static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
struct sk_buff *reply)
{
struct ovs_zone_limit zone_limit;
- int err;
zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
zone_limit.limit = info->default_limit;
- err = nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
- if (err)
- return err;
- return 0;
+ return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
}
static int __ovs_ct_limit_get_zone_limit(struct net *net,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b03d142ec82e..c7f34d6a9934 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -294,6 +294,10 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
/**
* Parse vlan tag from vlan header.
+ * @skb: skb containing frame to parse
+ * @key_vh: pointer to parsed vlan tag
+ * @untag_vlan: should the vlan header be removed from the frame
+ *
* Returns ERROR on memory error.
* Returns 0 if it encounters a non-vlan or incomplete packet.
* Returns 1 after successfully parsing vlan tag.
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 8fbefd52af7f..15424d26e85d 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -423,7 +423,7 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
meter = dp_meter_create(a);
- if (IS_ERR_OR_NULL(meter))
+ if (IS_ERR(meter))
return PTR_ERR(meter);
reply = ovs_meter_cmd_reply_start(info, OVS_METER_CMD_SET,
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 1e30d8df3ba5..5b2ee9c1c00b 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -35,21 +35,18 @@ internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int len, err;
+ /* store len value because skb can be freed inside ovs_vport_receive() */
len = skb->len;
+
rcu_read_lock();
err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
rcu_read_unlock();
- if (likely(!err)) {
- struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += len;
- tstats->tx_packets++;
- u64_stats_update_end(&tstats->syncp);
- } else {
+ if (likely(!err))
+ dev_sw_netstats_tx_add(netdev, 1, len);
+ else
netdev->stats.tx_errors++;
- }
+
return NETDEV_TX_OK;
}
@@ -83,24 +80,12 @@ static void internal_dev_destructor(struct net_device *dev)
ovs_vport_free(vport);
}
-static void
-internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
- memset(stats, 0, sizeof(*stats));
- stats->rx_errors = dev->stats.rx_errors;
- stats->tx_errors = dev->stats.tx_errors;
- stats->tx_dropped = dev->stats.tx_dropped;
- stats->rx_dropped = dev->stats.rx_dropped;
-
- dev_fetch_sw_netstats(stats, dev->tstats);
-}
-
static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_stats64 = internal_get_stats,
+ .ndo_get_stats64 = dev_get_tstats64,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 82d801f063b7..4ed7e52c7012 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -111,10 +111,12 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
*
* @priv_size: Size of private data area to allocate.
* @ops: vport device ops
+ * @parms: information about new vport.
*
* Allocate and initialize a new vport defined by @ops. The vport will contain
* a private data area of size @priv_size that can be accessed using
- * vport_priv(). vports that are no longer needed should be released with
+ * vport_priv(). Some parameters of the vport will be initialized from @parms.
+ * @vports that are no longer needed should be released with
* vport_free().
*/
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7a18ffff8551..de8e8dbbdeb8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -46,6 +46,7 @@
* Copyright (C) 2011, <lokec@ccs.neu.edu>
*/
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/capability.h>
@@ -1636,13 +1637,15 @@ static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
return false;
}
-static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+static int fanout_add(struct sock *sk, struct fanout_args *args)
{
struct packet_rollover *rollover = NULL;
struct packet_sock *po = pkt_sk(sk);
+ u16 type_flags = args->type_flags;
struct packet_fanout *f, *match;
u8 type = type_flags & 0xff;
u8 flags = type_flags >> 8;
+ u16 id = args->id;
int err;
switch (type) {
@@ -1700,11 +1703,21 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
}
}
err = -EINVAL;
- if (match && match->flags != flags)
- goto out;
- if (!match) {
+ if (match) {
+ if (match->flags != flags)
+ goto out;
+ if (args->max_num_members &&
+ args->max_num_members != match->max_num_members)
+ goto out;
+ } else {
+ if (args->max_num_members > PACKET_FANOUT_MAX)
+ goto out;
+ if (!args->max_num_members)
+ /* legacy PACKET_FANOUT_MAX */
+ args->max_num_members = 256;
err = -ENOMEM;
- match = kzalloc(sizeof(*match), GFP_KERNEL);
+ match = kvzalloc(struct_size(match, arr, args->max_num_members),
+ GFP_KERNEL);
if (!match)
goto out;
write_pnet(&match->net, sock_net(sk));
@@ -1720,6 +1733,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match;
match->prot_hook.id_match = match_fanout_group;
+ match->max_num_members = args->max_num_members;
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
@@ -1730,7 +1744,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
- if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
+ if (refcount_read(&match->sk_ref) < match->max_num_members) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
po->rollover = rollover;
@@ -1744,7 +1758,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
if (err && !refcount_read(&match->sk_ref)) {
list_del(&match->list);
- kfree(match);
+ kvfree(match);
}
out:
@@ -3075,7 +3089,7 @@ static int packet_release(struct socket *sock)
kfree(po->rollover);
if (f) {
fanout_release_data(f);
- kfree(f);
+ kvfree(f);
}
/*
* Now the socket is dead. No more input will appear.
@@ -3866,14 +3880,14 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
}
case PACKET_FANOUT:
{
- int val;
+ struct fanout_args args = { 0 };
- if (optlen != sizeof(val))
+ if (optlen != sizeof(int) && optlen != sizeof(args))
return -EINVAL;
- if (copy_from_sockptr(&val, optval, sizeof(val)))
+ if (copy_from_sockptr(&args, optval, optlen))
return -EFAULT;
- return fanout_add(sk, val & 0xffff, val >> 16);
+ return fanout_add(sk, &args);
}
case PACKET_FANOUT_DATA:
{
@@ -4615,9 +4629,11 @@ static int __net_init packet_net_init(struct net *net)
mutex_init(&net->packet.sklist_lock);
INIT_HLIST_HEAD(&net->packet.sklist);
+#ifdef CONFIG_PROC_FS
if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
sizeof(struct seq_net_private)))
return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
return 0;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index fd41ecb7f605..baafc3f3fa25 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -77,11 +77,12 @@ struct packet_ring_buffer {
};
extern struct mutex fanout_mutex;
-#define PACKET_FANOUT_MAX 256
+#define PACKET_FANOUT_MAX (1 << 16)
struct packet_fanout {
possible_net_t net;
unsigned int num_members;
+ u32 max_num_members;
u16 id;
u8 type;
u8 flags;
@@ -90,10 +91,10 @@ struct packet_fanout {
struct bpf_prog __rcu *bpf_prog;
};
struct list_head list;
- struct sock *arr[PACKET_FANOUT_MAX];
spinlock_t lock;
refcount_t sk_ref;
struct packet_type prot_hook ____cacheline_aligned_in_smp;
+ struct sock *arr[];
};
struct packet_rollover {
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index ff0c41467fc1..2bf2b1943e61 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -76,6 +76,11 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
struct qrtr_mhi_dev *qdev;
int rc;
+ /* start channels */
+ rc = mhi_prepare_for_transfer(mhi_dev);
+ if (rc)
+ return rc;
+
qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return -ENOMEM;
@@ -99,6 +104,7 @@ static void qcom_mhi_qrtr_remove(struct mhi_device *mhi_dev)
struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
qrtr_endpoint_unregister(&qdev->ep);
+ mhi_unprepare_from_transfer(mhi_dev);
dev_set_drvdata(&mhi_dev->dev, NULL);
}
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index b8559c882431..56aaf8cb6527 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -517,10 +517,6 @@ static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
port = from->sq_port;
}
- /* Don't accept spoofed messages */
- if (from->sq_node != node_id)
- return -EINVAL;
-
srv = server_add(service, instance, node_id, port);
if (!srv)
return -EINVAL;
@@ -559,10 +555,6 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
port = from->sq_port;
}
- /* Don't accept spoofed messages */
- if (from->sq_node != node_id)
- return -EINVAL;
-
/* Local servers may only unregister themselves */
if (from->sq_node == qrtr_ns.local_node && from->sq_port != port)
return -EINVAL;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 957aa9263ba4..f4ab3ca6d73b 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -171,8 +171,13 @@ static void __qrtr_node_release(struct kref *kref)
void __rcu **slot;
spin_lock_irqsave(&qrtr_nodes_lock, flags);
- if (node->nid != QRTR_EP_NID_AUTO)
- radix_tree_delete(&qrtr_nodes, node->nid);
+ /* If the node is a bridge for other nodes, there are possibly
+ * multiple entries pointing to our released node, delete them all.
+ */
+ radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+ if (*slot == node)
+ radix_tree_iter_delete(&qrtr_nodes, &iter, slot);
+ }
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
list_del(&node->item);
@@ -347,7 +352,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
hdr->src_port_id = cpu_to_le32(from->sq_port);
if (to->sq_port == QRTR_PORT_CTRL) {
hdr->dst_node_id = cpu_to_le32(node->nid);
- hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
+ hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
} else {
hdr->dst_node_id = cpu_to_le32(to->sq_node);
hdr->dst_port_id = cpu_to_le32(to->sq_port);
@@ -401,12 +406,13 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
{
unsigned long flags;
- if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
+ if (nid == QRTR_EP_NID_AUTO)
return;
spin_lock_irqsave(&qrtr_nodes_lock, flags);
radix_tree_insert(&qrtr_nodes, nid, node);
- node->nid = nid;
+ if (node->nid == QRTR_EP_NID_AUTO)
+ node->nid = nid;
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
}
@@ -494,6 +500,13 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
qrtr_node_assign(node, cb->src_node);
+ if (cb->type == QRTR_TYPE_NEW_SERVER) {
+ /* Remote node endpoint can bridge other distant nodes */
+ const struct qrtr_ctrl_pkt *pkt = data + hdrlen;
+
+ qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
+ }
+
if (cb->type == QRTR_TYPE_RESUME_TX) {
qrtr_tx_resume(node, skb);
} else {
@@ -519,18 +532,20 @@ EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
/**
* qrtr_alloc_ctrl_packet() - allocate control packet skb
* @pkt: reference to qrtr_ctrl_pkt pointer
+ * @flags: the type of memory to allocate
*
* Returns newly allocated sk_buff, or NULL on failure
*
* This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
* on success returns a reference to the control packet in @pkt.
*/
-static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
+static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt,
+ gfp_t flags)
{
const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
struct sk_buff *skb;
- skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
+ skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, flags);
if (!skb)
return NULL;
@@ -592,6 +607,7 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
struct qrtr_ctrl_pkt *pkt;
struct qrtr_tx_flow *flow;
struct sk_buff *skb;
+ unsigned long flags;
void __rcu **slot;
mutex_lock(&node->ep_lock);
@@ -599,11 +615,18 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
mutex_unlock(&node->ep_lock);
/* Notify the local controller about the event */
- skb = qrtr_alloc_ctrl_packet(&pkt);
- if (skb) {
- pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
- qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
+ spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+ if (*slot != node)
+ continue;
+ src.sq_node = iter.index;
+ skb = qrtr_alloc_ctrl_packet(&pkt, GFP_ATOMIC);
+ if (skb) {
+ pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
+ qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
+ }
}
+ spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
/* Wake up any transmitters waiting for resume-tx from the node */
mutex_lock(&node->qrtr_tx_lock);
@@ -656,7 +679,7 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
to.sq_node = QRTR_NODE_BCAST;
to.sq_port = QRTR_PORT_CTRL;
- skb = qrtr_alloc_ctrl_packet(&pkt);
+ skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
if (skb) {
pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
pkt->client.node = cpu_to_le32(ipc->us.sq_node);
@@ -982,7 +1005,7 @@ static int qrtr_send_resume_tx(struct qrtr_cb *cb)
if (!node)
return -EINVAL;
- skb = qrtr_alloc_ctrl_packet(&pkt);
+ skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 97101c55763d..68d6ef9e59fc 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -40,6 +40,7 @@ struct rfkill {
enum rfkill_type type;
unsigned long state;
+ unsigned long hard_block_reasons;
u32 idx;
@@ -265,6 +266,7 @@ static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
RFKILL_BLOCK_SW_PREV));
+ ev->hard_block_reasons = rfkill->hard_block_reasons;
spin_unlock_irqrestore(&rfkill->lock, flags);
}
@@ -522,19 +524,29 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
}
#endif
-bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
+bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked, unsigned long reason)
{
unsigned long flags;
bool ret, prev;
BUG_ON(!rfkill);
+ if (WARN(reason &
+ ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
+ "hw_state reason not supported: 0x%lx", reason))
+ return blocked;
+
spin_lock_irqsave(&rfkill->lock, flags);
- prev = !!(rfkill->state & RFKILL_BLOCK_HW);
- if (blocked)
+ prev = !!(rfkill->hard_block_reasons & reason);
+ if (blocked) {
rfkill->state |= RFKILL_BLOCK_HW;
- else
- rfkill->state &= ~RFKILL_BLOCK_HW;
+ rfkill->hard_block_reasons |= reason;
+ } else {
+ rfkill->hard_block_reasons &= ~reason;
+ if (!rfkill->hard_block_reasons)
+ rfkill->state &= ~RFKILL_BLOCK_HW;
+ }
ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
spin_unlock_irqrestore(&rfkill->lock, flags);
@@ -546,7 +558,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
return ret;
}
-EXPORT_SYMBOL(rfkill_set_hw_state);
+EXPORT_SYMBOL(rfkill_set_hw_state_reason);
static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
{
@@ -744,6 +756,16 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(soft);
+static ssize_t hard_block_reasons_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons);
+}
+static DEVICE_ATTR_RO(hard_block_reasons);
+
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
@@ -796,6 +818,7 @@ static struct attribute *rfkill_dev_attrs[] = {
&dev_attr_state.attr,
&dev_attr_soft.attr,
&dev_attr_hard.attr,
+ &dev_attr_hard_block_reasons.attr,
NULL,
};
ATTRIBUTE_GROUPS(rfkill_dev);
@@ -811,6 +834,7 @@ static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long flags;
+ unsigned long reasons;
u32 state;
int error;
@@ -823,10 +847,13 @@ static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
return error;
spin_lock_irqsave(&rfkill->lock, flags);
state = rfkill->state;
+ reasons = rfkill->hard_block_reasons;
spin_unlock_irqrestore(&rfkill->lock, flags);
error = add_uevent_var(env, "RFKILL_STATE=%d",
user_state_from_blocked(state));
- return error;
+ if (error)
+ return error;
+ return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons);
}
void rfkill_pause_polling(struct rfkill *rfkill)
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index ddd0f95713a9..b11281bed2a4 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -28,6 +28,7 @@ rxrpc-y := \
rtt.o \
security.o \
sendmsg.o \
+ server_key.o \
skbuff.o \
utils.o
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index dce48162f6c2..7bd6f8a66a3e 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -12,6 +12,7 @@
#include <net/netns/generic.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
+#include <keys/rxrpc-type.h>
#include "protocol.h"
#if 0
@@ -34,6 +35,7 @@ struct rxrpc_crypt {
#define rxrpc_queue_delayed_work(WS,D) \
queue_delayed_work(rxrpc_workqueue, (WS), (D))
+struct key_preparsed_payload;
struct rxrpc_connection;
/*
@@ -216,17 +218,30 @@ struct rxrpc_security {
/* Clean up a security service */
void (*exit)(void);
+ /* Parse the information from a server key */
+ int (*preparse_server_key)(struct key_preparsed_payload *);
+
+ /* Clean up the preparse buffer after parsing a server key */
+ void (*free_preparse_server_key)(struct key_preparsed_payload *);
+
+ /* Destroy the payload of a server key */
+ void (*destroy_server_key)(struct key *);
+
+ /* Describe a server key */
+ void (*describe_server_key)(const struct key *, struct seq_file *);
+
/* initialise a connection's security */
- int (*init_connection_security)(struct rxrpc_connection *);
+ int (*init_connection_security)(struct rxrpc_connection *,
+ struct rxrpc_key_token *);
- /* prime a connection's packet security */
- int (*prime_packet_security)(struct rxrpc_connection *);
+ /* Work out how much data we can store in a packet, given an estimate
+ * of the amount of data remaining.
+ */
+ int (*how_much_data)(struct rxrpc_call *, size_t,
+ size_t *, size_t *, size_t *);
/* impose security on a packet */
- int (*secure_packet)(struct rxrpc_call *,
- struct sk_buff *,
- size_t,
- void *);
+ int (*secure_packet)(struct rxrpc_call *, struct sk_buff *, size_t);
/* verify the security on a received packet */
int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
@@ -438,10 +453,15 @@ struct rxrpc_connection {
struct list_head proc_link; /* link in procfs list */
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
+
const struct rxrpc_security *security; /* applied security module */
- struct key *server_key; /* security for this service */
- struct crypto_sync_skcipher *cipher; /* encryption handle */
- struct rxrpc_crypt csum_iv; /* packet checksum base */
+ union {
+ struct {
+ struct crypto_sync_skcipher *cipher; /* encryption handle */
+ struct rxrpc_crypt csum_iv; /* packet checksum base */
+ u32 nonce; /* response re-use preventer */
+ } rxkad;
+ };
unsigned long flags;
unsigned long events;
unsigned long idle_timestamp; /* Time at which last became idle */
@@ -451,10 +471,7 @@ struct rxrpc_connection {
int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
- u32 security_nonce; /* response re-use preventer */
u32 service_id; /* Service ID, possibly upgraded */
- u8 size_align; /* data size alignment (for security) */
- u8 security_size; /* security header size */
u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
u8 bundle_shift; /* Index into bundle->avail_chans */
@@ -888,8 +905,7 @@ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
struct sk_buff *);
struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
- const struct rxrpc_security *, struct key *,
- struct sk_buff *);
+ const struct rxrpc_security *, struct sk_buff *);
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
@@ -906,10 +922,8 @@ extern const struct rxrpc_security rxrpc_no_security;
* key.c
*/
extern struct key_type key_type_rxrpc;
-extern struct key_type key_type_rxrpc_s;
int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int);
-int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
u32);
@@ -1052,11 +1066,13 @@ extern const struct rxrpc_security rxkad;
* security.c
*/
int __init rxrpc_init_security(void);
+const struct rxrpc_security *rxrpc_security_lookup(u8);
void rxrpc_exit_security(void);
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
- const struct rxrpc_security **, struct key **,
- struct sk_buff *);
+const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *,
+ struct sk_buff *);
+struct key *rxrpc_look_up_server_security(struct rxrpc_connection *,
+ struct sk_buff *, u32, u32);
/*
* sendmsg.c
@@ -1064,6 +1080,13 @@ bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
/*
+ * server_key.c
+ */
+extern struct key_type key_type_rxrpc_s;
+
+int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
+
+/*
* skbuff.c
*/
void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 8df1964db333..382add72c66f 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -261,7 +261,6 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
const struct rxrpc_security *sec,
- struct key *key,
struct sk_buff *skb)
{
struct rxrpc_backlog *b = rx->backlog;
@@ -309,7 +308,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
conn->params.local = rxrpc_get_local(local);
conn->params.peer = peer;
rxrpc_see_connection(conn);
- rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
+ rxrpc_new_incoming_connection(rx, conn, sec, skb);
} else {
rxrpc_get_connection(conn);
}
@@ -353,7 +352,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
struct rxrpc_connection *conn;
struct rxrpc_peer *peer = NULL;
struct rxrpc_call *call = NULL;
- struct key *key = NULL;
_enter("");
@@ -374,11 +372,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
*/
conn = rxrpc_find_connection_rcu(local, skb, &peer);
- if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
- goto no_call;
+ if (!conn) {
+ sec = rxrpc_get_incoming_security(rx, skb);
+ if (!sec)
+ goto no_call;
+ }
- call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
- key_put(key);
+ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, skb);
if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
goto no_call;
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 7e574c75be8e..dbea0bfee48e 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -180,10 +180,6 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
if (ret < 0)
goto error_1;
- ret = conn->security->prime_packet_security(conn);
- if (ret < 0)
- goto error_2;
-
atomic_inc(&rxnet->nr_conns);
write_lock(&rxnet->conn_lock);
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
@@ -203,8 +199,6 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
_leave(" = %p", conn);
return conn;
-error_2:
- conn->security->clear(conn);
error_1:
rxrpc_put_client_connection_id(conn);
error_0:
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index aff184145ffa..aab069701398 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -333,11 +333,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
if (ret < 0)
return ret;
- ret = conn->security->init_connection_security(conn);
- if (ret < 0)
- return ret;
-
- ret = conn->security->prime_packet_security(conn);
+ ret = conn->security->init_connection_security(
+ conn, conn->params.key->payload.data[0]);
if (ret < 0)
return ret;
@@ -377,7 +374,6 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
_enter("{%d}", conn->debug_id);
ASSERT(conn->security_ix != 0);
- ASSERT(conn->server_key);
if (conn->security->issue_challenge(conn) < 0) {
abort_code = RX_CALL_DEAD;
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 3bcbe0665f91..b2159dbf5412 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -49,7 +49,6 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
conn->security = &rxrpc_no_security;
spin_lock_init(&conn->state_lock);
conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
- conn->size_align = 4;
conn->idle_timestamp = jiffies;
}
@@ -363,7 +362,6 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
conn->security->clear(conn);
key_put(conn->params.key);
- key_put(conn->server_key);
rxrpc_put_bundle(conn->bundle);
rxrpc_put_peer(conn->params.peer);
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index 6c847720494f..e1966dfc9152 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -156,7 +156,6 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
struct rxrpc_connection *conn,
const struct rxrpc_security *sec,
- struct key *key,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -170,7 +169,6 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
conn->security_ix = sp->hdr.securityIndex;
conn->out_clientflag = 0;
conn->security = sec;
- conn->server_key = key_get(key);
if (conn->security_ix)
conn->state = RXRPC_CONN_SERVICE_UNSECURED;
else
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index f6c59f5fae9d..9aae99d67833 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -8,20 +8,25 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-static int none_init_connection_security(struct rxrpc_connection *conn)
+static int none_init_connection_security(struct rxrpc_connection *conn,
+ struct rxrpc_key_token *token)
{
return 0;
}
-static int none_prime_packet_security(struct rxrpc_connection *conn)
+/*
+ * Work out how much data we can put in an unsecured packet.
+ */
+static int none_how_much_data(struct rxrpc_call *call, size_t remain,
+ size_t *_buf_size, size_t *_data_size, size_t *_offset)
{
+ *_buf_size = *_data_size = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
+ *_offset = 0;
return 0;
}
-static int none_secure_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- size_t data_size,
- void *sechdr)
+static int none_secure_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ size_t data_size)
{
return 0;
}
@@ -86,8 +91,8 @@ const struct rxrpc_security rxrpc_no_security = {
.init = none_init,
.exit = none_exit,
.init_connection_security = none_init_connection_security,
- .prime_packet_security = none_prime_packet_security,
.free_call_crypto = none_free_call_crypto,
+ .how_much_data = none_how_much_data,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
.locate_data = none_locate_data,
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 2e8bd3b97301..9631aa8543b5 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -5,7 +5,7 @@
* Written by David Howells (dhowells@redhat.com)
*
* RxRPC keys should have a description of describing their purpose:
- * "afs@CAMBRIDGE.REDHAT.COM>
+ * "afs@example.com"
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -23,13 +23,9 @@
#include <keys/user-type.h>
#include "ar-internal.h"
-static int rxrpc_vet_description_s(const char *);
static int rxrpc_preparse(struct key_preparsed_payload *);
-static int rxrpc_preparse_s(struct key_preparsed_payload *);
static void rxrpc_free_preparse(struct key_preparsed_payload *);
-static void rxrpc_free_preparse_s(struct key_preparsed_payload *);
static void rxrpc_destroy(struct key *);
-static void rxrpc_destroy_s(struct key *);
static void rxrpc_describe(const struct key *, struct seq_file *);
static long rxrpc_read(const struct key *, char *, size_t);
@@ -50,38 +46,6 @@ struct key_type key_type_rxrpc = {
EXPORT_SYMBOL(key_type_rxrpc);
/*
- * rxrpc server defined keys take "<serviceId>:<securityIndex>" as the
- * description and an 8-byte decryption key as the payload
- */
-struct key_type key_type_rxrpc_s = {
- .name = "rxrpc_s",
- .flags = KEY_TYPE_NET_DOMAIN,
- .vet_description = rxrpc_vet_description_s,
- .preparse = rxrpc_preparse_s,
- .free_preparse = rxrpc_free_preparse_s,
- .instantiate = generic_key_instantiate,
- .destroy = rxrpc_destroy_s,
- .describe = rxrpc_describe,
-};
-
-/*
- * Vet the description for an RxRPC server key
- */
-static int rxrpc_vet_description_s(const char *desc)
-{
- unsigned long num;
- char *p;
-
- num = simple_strtoul(desc, &p, 10);
- if (*p != ':' || num > 65535)
- return -EINVAL;
- num = simple_strtoul(p + 1, &p, 10);
- if (*p || num < 1 || num > 255)
- return -EINVAL;
- return 0;
-}
-
-/*
* parse an RxKAD type XDR format token
* - the caller guarantees we have at least 4 words
*/
@@ -165,402 +129,17 @@ static int rxrpc_preparse_xdr_rxkad(struct key_preparsed_payload *prep,
return 0;
}
-static void rxrpc_free_krb5_principal(struct krb5_principal *princ)
-{
- int loop;
-
- if (princ->name_parts) {
- for (loop = princ->n_name_parts - 1; loop >= 0; loop--)
- kfree(princ->name_parts[loop]);
- kfree(princ->name_parts);
- }
- kfree(princ->realm);
-}
-
-static void rxrpc_free_krb5_tagged(struct krb5_tagged_data *td)
-{
- kfree(td->data);
-}
-
-/*
- * free up an RxK5 token
- */
-static void rxrpc_rxk5_free(struct rxk5_key *rxk5)
-{
- int loop;
-
- rxrpc_free_krb5_principal(&rxk5->client);
- rxrpc_free_krb5_principal(&rxk5->server);
- rxrpc_free_krb5_tagged(&rxk5->session);
-
- if (rxk5->addresses) {
- for (loop = rxk5->n_addresses - 1; loop >= 0; loop--)
- rxrpc_free_krb5_tagged(&rxk5->addresses[loop]);
- kfree(rxk5->addresses);
- }
- if (rxk5->authdata) {
- for (loop = rxk5->n_authdata - 1; loop >= 0; loop--)
- rxrpc_free_krb5_tagged(&rxk5->authdata[loop]);
- kfree(rxk5->authdata);
- }
-
- kfree(rxk5->ticket);
- kfree(rxk5->ticket2);
- kfree(rxk5);
-}
-
-/*
- * extract a krb5 principal
- */
-static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
- const __be32 **_xdr,
- unsigned int *_toklen)
-{
- const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
-
- /* there must be at least one name, and at least #names+1 length
- * words */
- if (toklen <= 12)
- return -EINVAL;
-
- _enter(",{%x,%x,%x},%u",
- ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), toklen);
-
- n_parts = ntohl(*xdr++);
- toklen -= 4;
- if (n_parts <= 0 || n_parts > AFSTOKEN_K5_COMPONENTS_MAX)
- return -EINVAL;
- princ->n_name_parts = n_parts;
-
- if (toklen <= (n_parts + 1) * 4)
- return -EINVAL;
-
- princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
- if (!princ->name_parts)
- return -ENOMEM;
-
- for (loop = 0; loop < n_parts; loop++) {
- if (toklen < 4)
- return -EINVAL;
- tmp = ntohl(*xdr++);
- toklen -= 4;
- if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
- return -EINVAL;
- paddedlen = (tmp + 3) & ~3;
- if (paddedlen > toklen)
- return -EINVAL;
- princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
- if (!princ->name_parts[loop])
- return -ENOMEM;
- memcpy(princ->name_parts[loop], xdr, tmp);
- princ->name_parts[loop][tmp] = 0;
- toklen -= paddedlen;
- xdr += paddedlen >> 2;
- }
-
- if (toklen < 4)
- return -EINVAL;
- tmp = ntohl(*xdr++);
- toklen -= 4;
- if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
- return -EINVAL;
- paddedlen = (tmp + 3) & ~3;
- if (paddedlen > toklen)
- return -EINVAL;
- princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
- if (!princ->realm)
- return -ENOMEM;
- memcpy(princ->realm, xdr, tmp);
- princ->realm[tmp] = 0;
- toklen -= paddedlen;
- xdr += paddedlen >> 2;
-
- _debug("%s/...@%s", princ->name_parts[0], princ->realm);
-
- *_xdr = xdr;
- *_toklen = toklen;
- _leave(" = 0 [toklen=%u]", toklen);
- return 0;
-}
-
-/*
- * extract a piece of krb5 tagged data
- */
-static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
- size_t max_data_size,
- const __be32 **_xdr,
- unsigned int *_toklen)
-{
- const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len, paddedlen;
-
- /* there must be at least one tag and one length word */
- if (toklen <= 8)
- return -EINVAL;
-
- _enter(",%zu,{%x,%x},%u",
- max_data_size, ntohl(xdr[0]), ntohl(xdr[1]), toklen);
-
- td->tag = ntohl(*xdr++);
- len = ntohl(*xdr++);
- toklen -= 8;
- if (len > max_data_size)
- return -EINVAL;
- paddedlen = (len + 3) & ~3;
- if (paddedlen > toklen)
- return -EINVAL;
- td->data_len = len;
-
- if (len > 0) {
- td->data = kmemdup(xdr, len, GFP_KERNEL);
- if (!td->data)
- return -ENOMEM;
- toklen -= paddedlen;
- xdr += paddedlen >> 2;
- }
-
- _debug("tag %x len %x", td->tag, td->data_len);
-
- *_xdr = xdr;
- *_toklen = toklen;
- _leave(" = 0 [toklen=%u]", toklen);
- return 0;
-}
-
-/*
- * extract an array of tagged data
- */
-static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
- u8 *_n_elem,
- u8 max_n_elem,
- size_t max_elem_size,
- const __be32 **_xdr,
- unsigned int *_toklen)
-{
- struct krb5_tagged_data *td;
- const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, n_elem, loop;
- int ret;
-
- /* there must be at least one count */
- if (toklen < 4)
- return -EINVAL;
-
- _enter(",,%u,%zu,{%x},%u",
- max_n_elem, max_elem_size, ntohl(xdr[0]), toklen);
-
- n_elem = ntohl(*xdr++);
- toklen -= 4;
- if (n_elem > max_n_elem)
- return -EINVAL;
- *_n_elem = n_elem;
- if (n_elem > 0) {
- if (toklen <= (n_elem + 1) * 4)
- return -EINVAL;
-
- _debug("n_elem %d", n_elem);
-
- td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
- GFP_KERNEL);
- if (!td)
- return -ENOMEM;
- *_td = td;
-
- for (loop = 0; loop < n_elem; loop++) {
- ret = rxrpc_krb5_decode_tagged_data(&td[loop],
- max_elem_size,
- &xdr, &toklen);
- if (ret < 0)
- return ret;
- }
- }
-
- *_xdr = xdr;
- *_toklen = toklen;
- _leave(" = 0 [toklen=%u]", toklen);
- return 0;
-}
-
-/*
- * extract a krb5 ticket
- */
-static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
- const __be32 **_xdr, unsigned int *_toklen)
-{
- const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len, paddedlen;
-
- /* there must be at least one length word */
- if (toklen <= 4)
- return -EINVAL;
-
- _enter(",{%x},%u", ntohl(xdr[0]), toklen);
-
- len = ntohl(*xdr++);
- toklen -= 4;
- if (len > AFSTOKEN_K5_TIX_MAX)
- return -EINVAL;
- paddedlen = (len + 3) & ~3;
- if (paddedlen > toklen)
- return -EINVAL;
- *_tktlen = len;
-
- _debug("ticket len %u", len);
-
- if (len > 0) {
- *_ticket = kmemdup(xdr, len, GFP_KERNEL);
- if (!*_ticket)
- return -ENOMEM;
- toklen -= paddedlen;
- xdr += paddedlen >> 2;
- }
-
- *_xdr = xdr;
- *_toklen = toklen;
- _leave(" = 0 [toklen=%u]", toklen);
- return 0;
-}
-
-/*
- * parse an RxK5 type XDR format token
- * - the caller guarantees we have at least 4 words
- */
-static int rxrpc_preparse_xdr_rxk5(struct key_preparsed_payload *prep,
- size_t datalen,
- const __be32 *xdr, unsigned int toklen)
-{
- struct rxrpc_key_token *token, **pptoken;
- struct rxk5_key *rxk5;
- const __be32 *end_xdr = xdr + (toklen >> 2);
- time64_t expiry;
- int ret;
-
- _enter(",{%x,%x,%x,%x},%u",
- ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]),
- toklen);
-
- /* reserve some payload space for this subkey - the length of the token
- * is a reasonable approximation */
- prep->quotalen = datalen + toklen;
-
- token = kzalloc(sizeof(*token), GFP_KERNEL);
- if (!token)
- return -ENOMEM;
-
- rxk5 = kzalloc(sizeof(*rxk5), GFP_KERNEL);
- if (!rxk5) {
- kfree(token);
- return -ENOMEM;
- }
-
- token->security_index = RXRPC_SECURITY_RXK5;
- token->k5 = rxk5;
-
- /* extract the principals */
- ret = rxrpc_krb5_decode_principal(&rxk5->client, &xdr, &toklen);
- if (ret < 0)
- goto error;
- ret = rxrpc_krb5_decode_principal(&rxk5->server, &xdr, &toklen);
- if (ret < 0)
- goto error;
-
- /* extract the session key and the encoding type (the tag field ->
- * ENCTYPE_xxx) */
- ret = rxrpc_krb5_decode_tagged_data(&rxk5->session, AFSTOKEN_DATA_MAX,
- &xdr, &toklen);
- if (ret < 0)
- goto error;
-
- if (toklen < 4 * 8 + 2 * 4)
- goto inval;
- rxk5->authtime = be64_to_cpup((const __be64 *) xdr);
- xdr += 2;
- rxk5->starttime = be64_to_cpup((const __be64 *) xdr);
- xdr += 2;
- rxk5->endtime = be64_to_cpup((const __be64 *) xdr);
- xdr += 2;
- rxk5->renew_till = be64_to_cpup((const __be64 *) xdr);
- xdr += 2;
- rxk5->is_skey = ntohl(*xdr++);
- rxk5->flags = ntohl(*xdr++);
- toklen -= 4 * 8 + 2 * 4;
-
- _debug("times: a=%llx s=%llx e=%llx rt=%llx",
- rxk5->authtime, rxk5->starttime, rxk5->endtime,
- rxk5->renew_till);
- _debug("is_skey=%x flags=%x", rxk5->is_skey, rxk5->flags);
-
- /* extract the permitted client addresses */
- ret = rxrpc_krb5_decode_tagged_array(&rxk5->addresses,
- &rxk5->n_addresses,
- AFSTOKEN_K5_ADDRESSES_MAX,
- AFSTOKEN_DATA_MAX,
- &xdr, &toklen);
- if (ret < 0)
- goto error;
-
- ASSERTCMP((end_xdr - xdr) << 2, ==, toklen);
-
- /* extract the tickets */
- ret = rxrpc_krb5_decode_ticket(&rxk5->ticket, &rxk5->ticket_len,
- &xdr, &toklen);
- if (ret < 0)
- goto error;
- ret = rxrpc_krb5_decode_ticket(&rxk5->ticket2, &rxk5->ticket2_len,
- &xdr, &toklen);
- if (ret < 0)
- goto error;
-
- ASSERTCMP((end_xdr - xdr) << 2, ==, toklen);
-
- /* extract the typed auth data */
- ret = rxrpc_krb5_decode_tagged_array(&rxk5->authdata,
- &rxk5->n_authdata,
- AFSTOKEN_K5_AUTHDATA_MAX,
- AFSTOKEN_BDATALN_MAX,
- &xdr, &toklen);
- if (ret < 0)
- goto error;
-
- ASSERTCMP((end_xdr - xdr) << 2, ==, toklen);
-
- if (toklen != 0)
- goto inval;
-
- /* attach the payload */
- for (pptoken = (struct rxrpc_key_token **)&prep->payload.data[0];
- *pptoken;
- pptoken = &(*pptoken)->next)
- continue;
- *pptoken = token;
- expiry = rxrpc_u32_to_time64(token->k5->endtime);
- if (expiry < prep->expiry)
- prep->expiry = expiry;
-
- _leave(" = 0");
- return 0;
-
-inval:
- ret = -EINVAL;
-error:
- rxrpc_rxk5_free(rxk5);
- kfree(token);
- _leave(" = %d", ret);
- return ret;
-}
-
/*
* attempt to parse the data as the XDR format
* - the caller guarantees we have more than 7 words
*/
static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
{
- const __be32 *xdr = prep->data, *token;
+ const __be32 *xdr = prep->data, *token, *p;
const char *cp;
unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
size_t datalen = prep->datalen;
- int ret;
+ int ret, ret2;
_enter(",{%x,%x,%x,%x},%zu",
ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]),
@@ -610,20 +189,20 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
goto not_xdr;
/* check each token wrapper */
- token = xdr;
+ p = xdr;
loop = ntoken;
do {
if (datalen < 8)
goto not_xdr;
- toklen = ntohl(*xdr++);
- sec_ix = ntohl(*xdr);
+ toklen = ntohl(*p++);
+ sec_ix = ntohl(*p);
datalen -= 4;
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
paddedlen = (toklen + 3) & ~3;
if (toklen < 20 || toklen > datalen || paddedlen > datalen)
goto not_xdr;
datalen -= paddedlen;
- xdr += paddedlen >> 2;
+ p += paddedlen >> 2;
} while (--loop > 0);
@@ -634,44 +213,50 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
/* okay: we're going to assume it's valid XDR format
* - we ignore the cellname, relying on the key to be correctly named
*/
+ ret = -EPROTONOSUPPORT;
do {
- xdr = token;
toklen = ntohl(*xdr++);
- token = xdr + ((toklen + 3) >> 2);
- sec_ix = ntohl(*xdr++);
+ token = xdr;
+ xdr += (toklen + 3) / 4;
+
+ sec_ix = ntohl(*token++);
toklen -= 4;
- _debug("TOKEN type=%u [%p-%p]", sec_ix, xdr, token);
+ _debug("TOKEN type=%x len=%x", sec_ix, toklen);
switch (sec_ix) {
case RXRPC_SECURITY_RXKAD:
- ret = rxrpc_preparse_xdr_rxkad(prep, datalen, xdr, toklen);
- if (ret != 0)
- goto error;
+ ret2 = rxrpc_preparse_xdr_rxkad(prep, datalen, token, toklen);
break;
+ default:
+ ret2 = -EPROTONOSUPPORT;
+ break;
+ }
- case RXRPC_SECURITY_RXK5:
- ret = rxrpc_preparse_xdr_rxk5(prep, datalen, xdr, toklen);
+ switch (ret2) {
+ case 0:
+ ret = 0;
+ break;
+ case -EPROTONOSUPPORT:
+ break;
+ case -ENOPKG:
if (ret != 0)
- goto error;
+ ret = -ENOPKG;
break;
-
default:
- ret = -EPROTONOSUPPORT;
+ ret = ret2;
goto error;
}
} while (--ntoken > 0);
- _leave(" = 0");
- return 0;
+error:
+ _leave(" = %d", ret);
+ return ret;
not_xdr:
_leave(" = -EPROTO");
return -EPROTO;
-error:
- _leave(" = %d", ret);
- return ret;
}
/*
@@ -805,10 +390,6 @@ static void rxrpc_free_token_list(struct rxrpc_key_token *token)
case RXRPC_SECURITY_RXKAD:
kfree(token->kad);
break;
- case RXRPC_SECURITY_RXK5:
- if (token->k5)
- rxrpc_rxk5_free(token->k5);
- break;
default:
pr_err("Unknown token type %x on rxrpc key\n",
token->security_index);
@@ -828,45 +409,6 @@ static void rxrpc_free_preparse(struct key_preparsed_payload *prep)
}
/*
- * Preparse a server secret key.
- *
- * The data should be the 8-byte secret key.
- */
-static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
-{
- struct crypto_skcipher *ci;
-
- _enter("%zu", prep->datalen);
-
- if (prep->datalen != 8)
- return -EINVAL;
-
- memcpy(&prep->payload.data[2], prep->data, 8);
-
- ci = crypto_alloc_skcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(ci)) {
- _leave(" = %ld", PTR_ERR(ci));
- return PTR_ERR(ci);
- }
-
- if (crypto_skcipher_setkey(ci, prep->data, 8) < 0)
- BUG();
-
- prep->payload.data[0] = ci;
- _leave(" = 0");
- return 0;
-}
-
-/*
- * Clean up preparse data.
- */
-static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
-{
- if (prep->payload.data[0])
- crypto_free_skcipher(prep->payload.data[0]);
-}
-
-/*
* dispose of the data dangling from the corpse of a rxrpc key
*/
static void rxrpc_destroy(struct key *key)
@@ -875,22 +417,29 @@ static void rxrpc_destroy(struct key *key)
}
/*
- * dispose of the data dangling from the corpse of a rxrpc key
- */
-static void rxrpc_destroy_s(struct key *key)
-{
- if (key->payload.data[0]) {
- crypto_free_skcipher(key->payload.data[0]);
- key->payload.data[0] = NULL;
- }
-}
-
-/*
* describe the rxrpc key
*/
static void rxrpc_describe(const struct key *key, struct seq_file *m)
{
+ const struct rxrpc_key_token *token;
+ const char *sep = ": ";
+
seq_puts(m, key->description);
+
+ for (token = key->payload.data[0]; token; token = token->next) {
+ seq_puts(m, sep);
+
+ switch (token->security_index) {
+ case RXRPC_SECURITY_RXKAD:
+ seq_puts(m, "ka");
+ break;
+ default: /* we have a ticket we can't encode */
+ seq_printf(m, "%u", token->security_index);
+ break;
+ }
+
+ sep = " ";
+ }
}
/*
@@ -924,36 +473,6 @@ int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
}
/*
- * grab the security keyring for a server socket
- */
-int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
-{
- struct key *key;
- char *description;
-
- _enter("");
-
- if (optlen <= 0 || optlen > PAGE_SIZE - 1)
- return -EINVAL;
-
- description = memdup_sockptr_nul(optval, optlen);
- if (IS_ERR(description))
- return PTR_ERR(description);
-
- key = request_key(&key_type_keyring, description, NULL);
- if (IS_ERR(key)) {
- kfree(description);
- _leave(" = %ld", PTR_ERR(key));
- return PTR_ERR(key);
- }
-
- rx->securities = key;
- kfree(description);
- _leave(" = 0 [key %x]", key->serial);
- return 0;
-}
-
-/*
* generate a server data key
*/
int rxrpc_get_server_data_key(struct rxrpc_connection *conn,
@@ -1044,12 +563,10 @@ static long rxrpc_read(const struct key *key,
char *buffer, size_t buflen)
{
const struct rxrpc_key_token *token;
- const struct krb5_principal *princ;
size_t size;
__be32 *xdr, *oldxdr;
u32 cnlen, toksize, ntoks, tok, zero;
u16 toksizes[AFSTOKEN_MAX];
- int loop;
_enter("");
@@ -1074,36 +591,8 @@ static long rxrpc_read(const struct key *key,
case RXRPC_SECURITY_RXKAD:
toksize += 8 * 4; /* viceid, kvno, key*2, begin,
* end, primary, tktlen */
- toksize += RND(token->kad->ticket_len);
- break;
-
- case RXRPC_SECURITY_RXK5:
- princ = &token->k5->client;
- toksize += 4 + princ->n_name_parts * 4;
- for (loop = 0; loop < princ->n_name_parts; loop++)
- toksize += RND(strlen(princ->name_parts[loop]));
- toksize += 4 + RND(strlen(princ->realm));
-
- princ = &token->k5->server;
- toksize += 4 + princ->n_name_parts * 4;
- for (loop = 0; loop < princ->n_name_parts; loop++)
- toksize += RND(strlen(princ->name_parts[loop]));
- toksize += 4 + RND(strlen(princ->realm));
-
- toksize += 8 + RND(token->k5->session.data_len);
-
- toksize += 4 * 8 + 2 * 4;
-
- toksize += 4 + token->k5->n_addresses * 8;
- for (loop = 0; loop < token->k5->n_addresses; loop++)
- toksize += RND(token->k5->addresses[loop].data_len);
-
- toksize += 4 + RND(token->k5->ticket_len);
- toksize += 4 + RND(token->k5->ticket2_len);
-
- toksize += 4 + token->k5->n_authdata * 8;
- for (loop = 0; loop < token->k5->n_authdata; loop++)
- toksize += RND(token->k5->authdata[loop].data_len);
+ if (!token->no_leak_key)
+ toksize += RND(token->kad->ticket_len);
break;
default: /* we have a ticket we can't encode */
@@ -1178,49 +667,10 @@ static long rxrpc_read(const struct key *key,
ENCODE(token->kad->start);
ENCODE(token->kad->expiry);
ENCODE(token->kad->primary_flag);
- ENCODE_DATA(token->kad->ticket_len, token->kad->ticket);
- break;
-
- case RXRPC_SECURITY_RXK5:
- princ = &token->k5->client;
- ENCODE(princ->n_name_parts);
- for (loop = 0; loop < princ->n_name_parts; loop++)
- ENCODE_STR(princ->name_parts[loop]);
- ENCODE_STR(princ->realm);
-
- princ = &token->k5->server;
- ENCODE(princ->n_name_parts);
- for (loop = 0; loop < princ->n_name_parts; loop++)
- ENCODE_STR(princ->name_parts[loop]);
- ENCODE_STR(princ->realm);
-
- ENCODE(token->k5->session.tag);
- ENCODE_DATA(token->k5->session.data_len,
- token->k5->session.data);
-
- ENCODE64(token->k5->authtime);
- ENCODE64(token->k5->starttime);
- ENCODE64(token->k5->endtime);
- ENCODE64(token->k5->renew_till);
- ENCODE(token->k5->is_skey);
- ENCODE(token->k5->flags);
-
- ENCODE(token->k5->n_addresses);
- for (loop = 0; loop < token->k5->n_addresses; loop++) {
- ENCODE(token->k5->addresses[loop].tag);
- ENCODE_DATA(token->k5->addresses[loop].data_len,
- token->k5->addresses[loop].data);
- }
-
- ENCODE_DATA(token->k5->ticket_len, token->k5->ticket);
- ENCODE_DATA(token->k5->ticket2_len, token->k5->ticket2);
-
- ENCODE(token->k5->n_authdata);
- for (loop = 0; loop < token->k5->n_authdata; loop++) {
- ENCODE(token->k5->authdata[loop].tag);
- ENCODE_DATA(token->k5->authdata[loop].data_len,
- token->k5->authdata[loop].data);
- }
+ if (token->no_leak_key)
+ ENCODE(0);
+ else
+ ENCODE_DATA(token->kad->ticket_len, token->kad->ticket);
break;
default:
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 2c842851d72e..fef3573fdc8b 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -69,7 +69,7 @@ bool __rxrpc_set_call_completion(struct rxrpc_call *call,
if (call->state < RXRPC_CALL_COMPLETE) {
call->abort_code = abort_code;
call->error = error;
- call->completion = compl,
+ call->completion = compl;
call->state = RXRPC_CALL_COMPLETE;
trace_rxrpc_call_complete(call);
wake_up(&call->waitq);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index f114dc2af5cf..e2e9e9b0a6d7 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -15,6 +15,7 @@
#include <linux/scatterlist.h>
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/key-type.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <keys/rxrpc-type.h>
@@ -27,6 +28,7 @@
#define INST_SZ 40 /* size of principal's instance */
#define REALM_SZ 40 /* size of principal's auth domain */
#define SNAME_SZ 40 /* size of service name */
+#define RXKAD_ALIGN 8
struct rxkad_level1_hdr {
__be32 data_size; /* true data size (excluding padding) */
@@ -37,6 +39,9 @@ struct rxkad_level2_hdr {
__be32 checksum; /* decrypted data checksum */
};
+static int rxkad_prime_packet_security(struct rxrpc_connection *conn,
+ struct crypto_sync_skcipher *ci);
+
/*
* this holds a pinned cipher so that keventd doesn't get called by the cipher
* alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
@@ -47,17 +52,59 @@ static struct skcipher_request *rxkad_ci_req;
static DEFINE_MUTEX(rxkad_ci_mutex);
/*
+ * Parse the information from a server key
+ *
+ * The data should be the 8-byte secret key.
+ */
+static int rxkad_preparse_server_key(struct key_preparsed_payload *prep)
+{
+ struct crypto_skcipher *ci;
+
+ if (prep->datalen != 8)
+ return -EINVAL;
+
+ memcpy(&prep->payload.data[2], prep->data, 8);
+
+ ci = crypto_alloc_skcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ci)) {
+ _leave(" = %ld", PTR_ERR(ci));
+ return PTR_ERR(ci);
+ }
+
+ if (crypto_skcipher_setkey(ci, prep->data, 8) < 0)
+ BUG();
+
+ prep->payload.data[0] = ci;
+ _leave(" = 0");
+ return 0;
+}
+
+static void rxkad_free_preparse_server_key(struct key_preparsed_payload *prep)
+{
+
+ if (prep->payload.data[0])
+ crypto_free_skcipher(prep->payload.data[0]);
+}
+
+static void rxkad_destroy_server_key(struct key *key)
+{
+ if (key->payload.data[0]) {
+ crypto_free_skcipher(key->payload.data[0]);
+ key->payload.data[0] = NULL;
+ }
+}
+
+/*
* initialise connection security
*/
-static int rxkad_init_connection_security(struct rxrpc_connection *conn)
+static int rxkad_init_connection_security(struct rxrpc_connection *conn,
+ struct rxrpc_key_token *token)
{
struct crypto_sync_skcipher *ci;
- struct rxrpc_key_token *token;
int ret;
_enter("{%d},{%x}", conn->debug_id, key_serial(conn->params.key));
- token = conn->params.key->payload.data[0];
conn->security_ix = token->security_index;
ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
@@ -73,32 +120,68 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
switch (conn->params.security_level) {
case RXRPC_SECURITY_PLAIN:
- break;
case RXRPC_SECURITY_AUTH:
- conn->size_align = 8;
- conn->security_size = sizeof(struct rxkad_level1_hdr);
- break;
case RXRPC_SECURITY_ENCRYPT:
- conn->size_align = 8;
- conn->security_size = sizeof(struct rxkad_level2_hdr);
break;
default:
ret = -EKEYREJECTED;
goto error;
}
- conn->cipher = ci;
- ret = 0;
+ ret = rxkad_prime_packet_security(conn, ci);
+ if (ret < 0)
+ goto error_ci;
+
+ conn->rxkad.cipher = ci;
+ return 0;
+
+error_ci:
+ crypto_free_sync_skcipher(ci);
error:
_leave(" = %d", ret);
return ret;
}
/*
+ * Work out how much data we can put in a packet.
+ */
+static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
+ size_t *_buf_size, size_t *_data_size, size_t *_offset)
+{
+ size_t shdr, buf_size, chunk;
+
+ switch (call->conn->params.security_level) {
+ default:
+ buf_size = chunk = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
+ shdr = 0;
+ goto out;
+ case RXRPC_SECURITY_AUTH:
+ shdr = sizeof(struct rxkad_level1_hdr);
+ break;
+ case RXRPC_SECURITY_ENCRYPT:
+ shdr = sizeof(struct rxkad_level2_hdr);
+ break;
+ }
+
+ buf_size = round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN);
+
+ chunk = buf_size - shdr;
+ if (remain < chunk)
+ buf_size = round_up(shdr + remain, RXKAD_ALIGN);
+
+out:
+ *_buf_size = buf_size;
+ *_data_size = chunk;
+ *_offset = shdr;
+ return 0;
+}
+
+/*
* prime the encryption state with the invariant parts of a connection's
* description
*/
-static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
+static int rxkad_prime_packet_security(struct rxrpc_connection *conn,
+ struct crypto_sync_skcipher *ci)
{
struct skcipher_request *req;
struct rxrpc_key_token *token;
@@ -116,7 +199,7 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
if (!tmpbuf)
return -ENOMEM;
- req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS);
+ req = skcipher_request_alloc(&ci->base, GFP_NOFS);
if (!req) {
kfree(tmpbuf);
return -ENOMEM;
@@ -131,13 +214,13 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
tmpbuf[3] = htonl(conn->security_ix);
sg_init_one(&sg, tmpbuf, tmpsize);
- skcipher_request_set_sync_tfm(req, conn->cipher);
+ skcipher_request_set_sync_tfm(req, ci);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x);
crypto_skcipher_encrypt(req);
skcipher_request_free(req);
- memcpy(&conn->csum_iv, tmpbuf + 2, sizeof(conn->csum_iv));
+ memcpy(&conn->rxkad.csum_iv, tmpbuf + 2, sizeof(conn->rxkad.csum_iv));
kfree(tmpbuf);
_leave(" = 0");
return 0;
@@ -149,7 +232,7 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
*/
static struct skcipher_request *rxkad_get_call_crypto(struct rxrpc_call *call)
{
- struct crypto_skcipher *tfm = &call->conn->cipher->base;
+ struct crypto_skcipher *tfm = &call->conn->rxkad.cipher->base;
struct skcipher_request *cipher_req = call->cipher_req;
if (!cipher_req) {
@@ -176,15 +259,14 @@ static void rxkad_free_call_crypto(struct rxrpc_call *call)
* partially encrypt a packet (level 1 security)
*/
static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 data_size,
- void *sechdr,
+ struct sk_buff *skb, u32 data_size,
struct skcipher_request *req)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxkad_level1_hdr hdr;
struct rxrpc_crypt iv;
struct scatterlist sg;
+ size_t pad;
u16 check;
_enter("");
@@ -193,13 +275,19 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
data_size |= (u32)check << 16;
hdr.data_size = htonl(data_size);
- memcpy(sechdr, &hdr, sizeof(hdr));
+ memcpy(skb->head, &hdr, sizeof(hdr));
+
+ pad = sizeof(struct rxkad_level1_hdr) + data_size;
+ pad = RXKAD_ALIGN - pad;
+ pad &= RXKAD_ALIGN - 1;
+ if (pad)
+ skb_put_zero(skb, pad);
/* start the encryption afresh */
memset(&iv, 0, sizeof(iv));
- sg_init_one(&sg, sechdr, 8);
- skcipher_request_set_sync_tfm(req, call->conn->cipher);
+ sg_init_one(&sg, skb->head, 8);
+ skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -215,7 +303,6 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 data_size,
- void *sechdr,
struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
@@ -224,6 +311,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct rxrpc_crypt iv;
struct scatterlist sg[16];
unsigned int len;
+ size_t pad;
u16 check;
int err;
@@ -235,14 +323,20 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
rxkhdr.data_size = htonl(data_size | (u32)check << 16);
rxkhdr.checksum = 0;
- memcpy(sechdr, &rxkhdr, sizeof(rxkhdr));
+ memcpy(skb->head, &rxkhdr, sizeof(rxkhdr));
+
+ pad = sizeof(struct rxkad_level2_hdr) + data_size;
+ pad = RXKAD_ALIGN - pad;
+ pad &= RXKAD_ALIGN - 1;
+ if (pad)
+ skb_put_zero(skb, pad);
/* encrypt from the session key */
token = call->conn->params.key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
- skcipher_request_set_sync_tfm(req, call->conn->cipher);
+ sg_init_one(&sg[0], skb->head, sizeof(rxkhdr));
+ skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x);
crypto_skcipher_encrypt(req);
@@ -252,11 +346,10 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
if (skb_shinfo(skb)->nr_frags > 16)
goto out;
- len = data_size + call->conn->size_align - 1;
- len &= ~(call->conn->size_align - 1);
+ len = round_up(data_size, RXKAD_ALIGN);
sg_init_table(sg, ARRAY_SIZE(sg));
- err = skb_to_sgvec(skb, sg, 0, len);
+ err = skb_to_sgvec(skb, sg, 8, len);
if (unlikely(err < 0))
goto out;
skcipher_request_set_crypt(req, sg, sg, len, iv.x);
@@ -275,8 +368,7 @@ out:
*/
static int rxkad_secure_packet(struct rxrpc_call *call,
struct sk_buff *skb,
- size_t data_size,
- void *sechdr)
+ size_t data_size)
{
struct rxrpc_skb_priv *sp;
struct skcipher_request *req;
@@ -291,7 +383,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
call->debug_id, key_serial(call->conn->params.key),
sp->hdr.seq, data_size);
- if (!call->conn->cipher)
+ if (!call->conn->rxkad.cipher)
return 0;
ret = key_validate(call->conn->params.key);
@@ -303,7 +395,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
return -ENOMEM;
/* continue encrypting from where we left off */
- memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
+ memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
/* calculate the security checksum */
x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
@@ -312,7 +404,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
- skcipher_request_set_sync_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -329,12 +421,10 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
ret = 0;
break;
case RXRPC_SECURITY_AUTH:
- ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr,
- req);
+ ret = rxkad_secure_packet_auth(call, skb, data_size, req);
break;
case RXRPC_SECURITY_ENCRYPT:
- ret = rxkad_secure_packet_encrypt(call, skb, data_size,
- sechdr, req);
+ ret = rxkad_secure_packet_encrypt(call, skb, data_size, req);
break;
default:
ret = -EPERM;
@@ -380,7 +470,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
- skcipher_request_set_sync_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
crypto_skcipher_decrypt(req);
@@ -472,7 +562,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
token = call->conn->params.key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- skcipher_request_set_sync_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, iv.x);
crypto_skcipher_decrypt(req);
@@ -538,7 +628,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
_enter("{%d{%x}},{#%u}",
call->debug_id, key_serial(call->conn->params.key), seq);
- if (!call->conn->cipher)
+ if (!call->conn->rxkad.cipher)
return 0;
req = rxkad_get_call_crypto(call);
@@ -546,7 +636,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
return -ENOMEM;
/* continue encrypting from where we left off */
- memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
+ memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
/* validate the security checksum */
x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
@@ -555,7 +645,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
- skcipher_request_set_sync_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -648,16 +738,12 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
u32 serial;
int ret;
- _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
+ _enter("{%d}", conn->debug_id);
- ret = key_validate(conn->server_key);
- if (ret < 0)
- return ret;
-
- get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce));
+ get_random_bytes(&conn->rxkad.nonce, sizeof(conn->rxkad.nonce));
challenge.version = htonl(2);
- challenge.nonce = htonl(conn->security_nonce);
+ challenge.nonce = htonl(conn->rxkad.nonce);
challenge.min_level = htonl(0);
challenge.__padding = 0;
@@ -785,7 +871,7 @@ static int rxkad_encrypt_response(struct rxrpc_connection *conn,
struct rxrpc_crypt iv;
struct scatterlist sg[1];
- req = skcipher_request_alloc(&conn->cipher->base, GFP_NOFS);
+ req = skcipher_request_alloc(&conn->rxkad.cipher->base, GFP_NOFS);
if (!req)
return -ENOMEM;
@@ -794,7 +880,7 @@ static int rxkad_encrypt_response(struct rxrpc_connection *conn,
sg_init_table(sg, 1);
sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
- skcipher_request_set_sync_tfm(req, conn->cipher);
+ skcipher_request_set_sync_tfm(req, conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
crypto_skcipher_encrypt(req);
@@ -892,6 +978,7 @@ other_error:
* decrypt the kerberos IV ticket in the response
*/
static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
+ struct key *server_key,
struct sk_buff *skb,
void *ticket, size_t ticket_len,
struct rxrpc_crypt *_session_key,
@@ -911,30 +998,17 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
u32 abort_code;
u8 *p, *q, *name, *end;
- _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key));
+ _enter("{%d},{%x}", conn->debug_id, key_serial(server_key));
*_expiry = 0;
- ret = key_validate(conn->server_key);
- if (ret < 0) {
- switch (ret) {
- case -EKEYEXPIRED:
- abort_code = RXKADEXPIRED;
- goto other_error;
- default:
- abort_code = RXKADNOAUTH;
- goto other_error;
- }
- }
-
- ASSERT(conn->server_key->payload.data[0] != NULL);
+ ASSERT(server_key->payload.data[0] != NULL);
ASSERTCMP((unsigned long) ticket & 7UL, ==, 0);
- memcpy(&iv, &conn->server_key->payload.data[2], sizeof(iv));
+ memcpy(&iv, &server_key->payload.data[2], sizeof(iv));
ret = -ENOMEM;
- req = skcipher_request_alloc(conn->server_key->payload.data[0],
- GFP_NOFS);
+ req = skcipher_request_alloc(server_key->payload.data[0], GFP_NOFS);
if (!req)
goto temporary_error;
@@ -1090,6 +1164,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
struct rxkad_response *response;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt session_key;
+ struct key *server_key;
const char *eproto;
time64_t expiry;
void *ticket;
@@ -1097,7 +1172,27 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
__be32 csum;
int ret, i;
- _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
+ _enter("{%d}", conn->debug_id);
+
+ server_key = rxrpc_look_up_server_security(conn, skb, 0, 0);
+ if (IS_ERR(server_key)) {
+ switch (PTR_ERR(server_key)) {
+ case -ENOKEY:
+ abort_code = RXKADUNKNOWNKEY;
+ break;
+ case -EKEYEXPIRED:
+ abort_code = RXKADEXPIRED;
+ break;
+ default:
+ abort_code = RXKADNOAUTH;
+ break;
+ }
+ trace_rxrpc_abort(0, "SVK",
+ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ abort_code, PTR_ERR(server_key));
+ *_abort_code = abort_code;
+ return -EPROTO;
+ }
ret = -ENOMEM;
response = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
@@ -1109,8 +1204,6 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
response, sizeof(*response)) < 0)
goto protocol_error;
- if (!pskb_pull(skb, sizeof(*response)))
- BUG();
version = ntohl(response->version);
ticket_len = ntohl(response->ticket_len);
@@ -1141,12 +1234,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
eproto = tracepoint_string("rxkad_tkt_short");
abort_code = RXKADPACKETSHORT;
- if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
ticket, ticket_len) < 0)
goto protocol_error_free;
- ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
- &expiry, _abort_code);
+ ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len,
+ &session_key, &expiry, _abort_code);
if (ret < 0)
goto temporary_error_free_ticket;
@@ -1196,7 +1289,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
eproto = tracepoint_string("rxkad_rsp_seq");
abort_code = RXKADOUTOFSEQUENCE;
- if (ntohl(response->encrypted.inc_nonce) != conn->security_nonce + 1)
+ if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1)
goto protocol_error_free;
eproto = tracepoint_string("rxkad_rsp_level");
@@ -1225,6 +1318,7 @@ protocol_error_free:
protocol_error:
kfree(response);
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
+ key_put(server_key);
*_abort_code = abort_code;
return -EPROTO;
@@ -1237,6 +1331,7 @@ temporary_error:
* ENOMEM. We just want to send the challenge again. Note that we
* also come out this way if the ticket decryption fails.
*/
+ key_put(server_key);
return ret;
}
@@ -1247,8 +1342,8 @@ static void rxkad_clear(struct rxrpc_connection *conn)
{
_enter("");
- if (conn->cipher)
- crypto_free_sync_skcipher(conn->cipher);
+ if (conn->rxkad.cipher)
+ crypto_free_sync_skcipher(conn->rxkad.cipher);
}
/*
@@ -1296,8 +1391,11 @@ const struct rxrpc_security rxkad = {
.no_key_abort = RXKADUNKNOWNKEY,
.init = rxkad_init,
.exit = rxkad_exit,
+ .preparse_server_key = rxkad_preparse_server_key,
+ .free_preparse_server_key = rxkad_free_preparse_server_key,
+ .destroy_server_key = rxkad_destroy_server_key,
.init_connection_security = rxkad_init_connection_security,
- .prime_packet_security = rxkad_prime_packet_security,
+ .how_much_data = rxkad_how_much_data,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
.free_call_crypto = rxkad_free_call_crypto,
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index 9b1fb9ed0717..50cb5f1ee0c0 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -55,7 +55,7 @@ void rxrpc_exit_security(void)
/*
* look up an rxrpc security module
*/
-static const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
+const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
{
if (security_index >= ARRAY_SIZE(rxrpc_security_types))
return NULL;
@@ -81,16 +81,17 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
if (ret < 0)
return ret;
- token = key->payload.data[0];
- if (!token)
- return -EKEYREJECTED;
+ for (token = key->payload.data[0]; token; token = token->next) {
+ sec = rxrpc_security_lookup(token->security_index);
+ if (sec)
+ goto found;
+ }
+ return -EKEYREJECTED;
- sec = rxrpc_security_lookup(token->security_index);
- if (!sec)
- return -EKEYREJECTED;
+found:
conn->security = sec;
- ret = conn->security->init_connection_security(conn);
+ ret = conn->security->init_connection_security(conn, token);
if (ret < 0) {
conn->security = &rxrpc_no_security;
return ret;
@@ -101,22 +102,16 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
}
/*
- * Find the security key for a server connection.
+ * Set the ops a server connection.
*/
-bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx,
- const struct rxrpc_security **_sec,
- struct key **_key,
- struct sk_buff *skb)
+const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *rx,
+ struct sk_buff *skb)
{
const struct rxrpc_security *sec;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- key_ref_t kref = NULL;
- char kdesc[5 + 1 + 3 + 1];
_enter("");
- sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex);
-
sec = rxrpc_security_lookup(sp->hdr.securityIndex);
if (!sec) {
trace_rxrpc_abort(0, "SVS",
@@ -124,35 +119,72 @@ bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock
RX_INVALID_OPERATION, EKEYREJECTED);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
- return false;
+ return NULL;
}
- if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE)
- goto out;
-
- if (!rx->securities) {
+ if (sp->hdr.securityIndex != RXRPC_SECURITY_NONE &&
+ !rx->securities) {
trace_rxrpc_abort(0, "SVR",
sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_INVALID_OPERATION, EKEYREJECTED);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
- skb->priority = RX_INVALID_OPERATION;
- return false;
+ skb->priority = sec->no_key_abort;
+ return NULL;
}
+ return sec;
+}
+
+/*
+ * Find the security key for a server connection.
+ */
+struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
+ struct sk_buff *skb,
+ u32 kvno, u32 enctype)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_sock *rx;
+ struct key *key = ERR_PTR(-EKEYREJECTED);
+ key_ref_t kref = NULL;
+ char kdesc[5 + 1 + 3 + 1 + 12 + 1 + 12 + 1];
+ int ret;
+
+ _enter("");
+
+ if (enctype)
+ sprintf(kdesc, "%u:%u:%u:%u",
+ sp->hdr.serviceId, sp->hdr.securityIndex, kvno, enctype);
+ else if (kvno)
+ sprintf(kdesc, "%u:%u:%u",
+ sp->hdr.serviceId, sp->hdr.securityIndex, kvno);
+ else
+ sprintf(kdesc, "%u:%u",
+ sp->hdr.serviceId, sp->hdr.securityIndex);
+
+ rcu_read_lock();
+
+ rx = rcu_dereference(conn->params.local->service);
+ if (!rx)
+ goto out;
+
/* look through the service's keyring */
kref = keyring_search(make_key_ref(rx->securities, 1UL),
&key_type_rxrpc_s, kdesc, true);
if (IS_ERR(kref)) {
- trace_rxrpc_abort(0, "SVK",
- sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- sec->no_key_abort, EKEYREJECTED);
- skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
- skb->priority = sec->no_key_abort;
- return false;
+ key = ERR_CAST(kref);
+ goto out;
+ }
+
+ key = key_ref_to_ptr(kref);
+
+ ret = key_validate(key);
+ if (ret < 0) {
+ key_put(key);
+ key = ERR_PTR(ret);
+ goto out;
}
out:
- *_sec = sec;
- *_key = key_ref_to_ptr(kref);
- return true;
+ rcu_read_unlock();
+ return key;
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index d27140c836cc..af8ad6c30b9f 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -327,7 +327,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
rxrpc_send_ack_packet(call, false, NULL);
if (!skb) {
- size_t size, chunk, max, space;
+ size_t remain, bufsize, chunk, offset;
_debug("alloc");
@@ -342,24 +342,21 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
goto maybe_error;
}
- max = RXRPC_JUMBO_DATALEN;
- max -= call->conn->security_size;
- max &= ~(call->conn->size_align - 1UL);
-
- chunk = max;
- if (chunk > msg_data_left(msg) && !more)
- chunk = msg_data_left(msg);
-
- space = chunk + call->conn->size_align;
- space &= ~(call->conn->size_align - 1UL);
-
- size = space + call->conn->security_size;
+ /* Work out the maximum size of a packet. Assume that
+ * the security header is going to be in the padded
+ * region (enc blocksize), but the trailer is not.
+ */
+ remain = more ? INT_MAX : msg_data_left(msg);
+ ret = call->conn->security->how_much_data(call, remain,
+ &bufsize, &chunk, &offset);
+ if (ret < 0)
+ goto maybe_error;
- _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
+ _debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset);
/* create a buffer that we can retain until it's ACK'd */
skb = sock_alloc_send_skb(
- sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
+ sk, bufsize, msg->msg_flags & MSG_DONTWAIT, &ret);
if (!skb)
goto maybe_error;
@@ -371,9 +368,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
ASSERTCMP(skb->mark, ==, 0);
- _debug("HS: %u", call->conn->security_size);
- skb_reserve(skb, call->conn->security_size);
- skb->len += call->conn->security_size;
+ __skb_put(skb, offset);
sp->remain = chunk;
if (sp->remain > skb_tailroom(skb))
@@ -422,17 +417,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
(msg_data_left(msg) == 0 && !more)) {
struct rxrpc_connection *conn = call->conn;
uint32_t seq;
- size_t pad;
-
- /* pad out if we're using security */
- if (conn->security_ix) {
- pad = conn->security_size + skb->mark;
- pad = conn->size_align - pad;
- pad &= conn->size_align - 1;
- _debug("pad %zu", pad);
- if (pad)
- skb_put_zero(skb, pad);
- }
seq = call->tx_top + 1;
@@ -446,8 +430,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
call->tx_winsize)
sp->hdr.flags |= RXRPC_MORE_PACKETS;
- ret = call->security->secure_packet(
- call, skb, skb->mark, skb->head);
+ ret = call->security->secure_packet(call, skb, skb->mark);
if (ret < 0)
goto out;
diff --git a/net/rxrpc/server_key.c b/net/rxrpc/server_key.c
new file mode 100644
index 000000000000..ead3471307ee
--- /dev/null
+++ b/net/rxrpc/server_key.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* RxRPC key management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * RxRPC keys should have a description of describing their purpose:
+ * "afs@CAMBRIDGE.REDHAT.COM>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/skcipher.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/key-type.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <keys/rxrpc-type.h>
+#include <keys/user-type.h>
+#include "ar-internal.h"
+
+static int rxrpc_vet_description_s(const char *);
+static int rxrpc_preparse_s(struct key_preparsed_payload *);
+static void rxrpc_free_preparse_s(struct key_preparsed_payload *);
+static void rxrpc_destroy_s(struct key *);
+static void rxrpc_describe_s(const struct key *, struct seq_file *);
+
+/*
+ * rxrpc server keys take "<serviceId>:<securityIndex>[:<sec-specific>]" as the
+ * description and the key material as the payload.
+ */
+struct key_type key_type_rxrpc_s = {
+ .name = "rxrpc_s",
+ .flags = KEY_TYPE_NET_DOMAIN,
+ .vet_description = rxrpc_vet_description_s,
+ .preparse = rxrpc_preparse_s,
+ .free_preparse = rxrpc_free_preparse_s,
+ .instantiate = generic_key_instantiate,
+ .destroy = rxrpc_destroy_s,
+ .describe = rxrpc_describe_s,
+};
+
+/*
+ * Vet the description for an RxRPC server key.
+ */
+static int rxrpc_vet_description_s(const char *desc)
+{
+ unsigned long service, sec_class;
+ char *p;
+
+ service = simple_strtoul(desc, &p, 10);
+ if (*p != ':' || service > 65535)
+ return -EINVAL;
+ sec_class = simple_strtoul(p + 1, &p, 10);
+ if ((*p && *p != ':') || sec_class < 1 || sec_class > 255)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Preparse a server secret key.
+ */
+static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
+{
+ const struct rxrpc_security *sec;
+ unsigned int service, sec_class;
+ int n;
+
+ _enter("%zu", prep->datalen);
+
+ if (!prep->orig_description)
+ return -EINVAL;
+
+ if (sscanf(prep->orig_description, "%u:%u%n", &service, &sec_class, &n) != 2)
+ return -EINVAL;
+
+ sec = rxrpc_security_lookup(sec_class);
+ if (!sec)
+ return -ENOPKG;
+
+ prep->payload.data[1] = (struct rxrpc_security *)sec;
+
+ return sec->preparse_server_key(prep);
+}
+
+static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
+{
+ const struct rxrpc_security *sec = prep->payload.data[1];
+
+ if (sec)
+ sec->free_preparse_server_key(prep);
+}
+
+static void rxrpc_destroy_s(struct key *key)
+{
+ const struct rxrpc_security *sec = key->payload.data[1];
+
+ if (sec)
+ sec->destroy_server_key(key);
+}
+
+static void rxrpc_describe_s(const struct key *key, struct seq_file *m)
+{
+ const struct rxrpc_security *sec = key->payload.data[1];
+
+ seq_puts(m, key->description);
+ if (sec && sec->describe_server_key)
+ sec->describe_server_key(key, m);
+}
+
+/*
+ * grab the security keyring for a server socket
+ */
+int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
+{
+ struct key *key;
+ char *description;
+
+ _enter("");
+
+ if (optlen <= 0 || optlen > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ description = memdup_sockptr_nul(optval, optlen);
+ if (IS_ERR(description))
+ return PTR_ERR(description);
+
+ key = request_key(&key_type_keyring, description, NULL);
+ if (IS_ERR(key)) {
+ kfree(description);
+ _leave(" = %ld", PTR_ERR(key));
+ return PTR_ERR(key);
+ }
+
+ rx->securities = key;
+ kfree(description);
+ _leave(" = 0 [key %x]", key->serial);
+ return 0;
+}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a3b37d88800e..1e8ab4749c6c 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -281,7 +281,7 @@ config NET_SCH_CHOKE
help
Say Y here if you want to use the CHOKe packet scheduler (CHOose
and Keep for responsive flows, CHOose and Kill for unresponsive
- flows). This is a variation of RED which trys to penalize flows
+ flows). This is a variation of RED which tries to penalize flows
that monopolize the queue.
To compile this code as a module, choose M here: the
@@ -813,7 +813,7 @@ config NET_ACT_SAMPLE
config NET_ACT_IPT
tristate "IPtables targets"
- depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+ depends on NET_CLS_ACT && NETFILTER && NETFILTER_XTABLES
help
Say Y here to be able to invoke iptables targets after successful
classification.
@@ -912,7 +912,7 @@ config NET_ACT_BPF
config NET_ACT_CONNMARK
tristate "Netfilter Connection Mark Retriever"
- depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+ depends on NET_CLS_ACT && NETFILTER
depends on NF_CONNTRACK && NF_CONNTRACK_MARK
help
Say Y here to allow retrieving of conn mark
@@ -924,7 +924,7 @@ config NET_ACT_CONNMARK
config NET_ACT_CTINFO
tristate "Netfilter Connection Mark Actions"
- depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+ depends on NET_CLS_ACT && NETFILTER
depends on NF_CONNTRACK && NF_CONNTRACK_MARK
help
Say Y here to allow transfer of a connmark stored information.
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 66bbf9a98f9e..dd14ef413fda 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -5,6 +5,7 @@
obj-y := sch_generic.o sch_mq.o
+obj-$(CONFIG_INET) += sch_frag.o
obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o
obj-$(CONFIG_NET_CLS) += cls_api.o
obj-$(CONFIG_NET_CLS_ACT) += act_api.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f66417d5d2c3..2e85b636b27b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -22,6 +22,22 @@
#include <net/act_api.h>
#include <net/netlink.h>
+#ifdef CONFIG_INET
+DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
+EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
+#endif
+
+int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
+{
+#ifdef CONFIG_INET
+ if (static_branch_unlikely(&tcf_frag_xmit_count))
+ return sch_frag_xmit_hook(skb, xmit);
+#endif
+
+ return xmit(skb);
+}
+EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
+
static void tcf_action_goto_chain_exec(const struct tc_action *a,
struct tcf_result *res)
{
@@ -215,6 +231,36 @@ static size_t tcf_action_fill_size(const struct tc_action *act)
return sz;
}
+static int
+tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_cookie *cookie;
+
+ if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+ goto nla_put_failure;
+ if (tcf_action_copy_stats(skb, a, 0))
+ goto nla_put_failure;
+ if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
+ goto nla_put_failure;
+
+ rcu_read_lock();
+ cookie = rcu_dereference(a->act_cookie);
+ if (cookie) {
+ if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
+ rcu_read_unlock();
+ goto nla_put_failure;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -248,7 +294,9 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
index--;
goto nla_put_failure;
}
- err = tcf_action_dump_1(skb, p, 0, 0);
+ err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
+ tcf_action_dump_terse(skb, p, true) :
+ tcf_action_dump_1(skb, p, 0, 0);
if (err < 0) {
index--;
nlmsg_trim(skb, nest);
@@ -256,7 +304,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
}
nla_nest_end(skb, nest);
n_i++;
- if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
+ if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
n_i >= TCA_ACT_MAX_PRIO)
goto done;
}
@@ -266,7 +314,7 @@ done:
mutex_unlock(&idrinfo->lock);
if (n_i) {
- if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
+ if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
cb->args[1] = n_i;
}
return n_i;
@@ -651,7 +699,7 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
return res;
}
-/*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
+/*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
#define TCA_ACT_MAX_PRIO_MASK 0x1FF
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res)
@@ -752,34 +800,6 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return a->ops->dump(skb, a, bind, ref);
}
-static int
-tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cookie *cookie;
-
- if (nla_put_string(skb, TCA_KIND, a->ops->kind))
- goto nla_put_failure;
- if (tcf_action_copy_stats(skb, a, 0))
- goto nla_put_failure;
-
- rcu_read_lock();
- cookie = rcu_dereference(a->act_cookie);
- if (cookie) {
- if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
- rcu_read_unlock();
- goto nla_put_failure;
- }
- }
- rcu_read_unlock();
-
- return 0;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
int
tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
@@ -787,7 +807,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- if (tcf_action_dump_terse(skb, a))
+ if (tcf_action_dump_terse(skb, a, false))
goto nla_put_failure;
if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
@@ -832,7 +852,7 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
nest = nla_nest_start_noflag(skb, i + 1);
if (nest == NULL)
goto nla_put_failure;
- err = terse ? tcf_action_dump_terse(skb, a) :
+ err = terse ? tcf_action_dump_terse(skb, a, false) :
tcf_action_dump_1(skb, a, bind, ref);
if (err < 0)
goto errout;
@@ -935,7 +955,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
NL_SET_ERR_MSG(extack, "TC action kind must be specified");
goto err_out;
}
- if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
+ if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
NL_SET_ERR_MSG(extack, "TC action name too long");
goto err_out;
}
@@ -1469,7 +1489,8 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
}
static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
- [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_FLAG_LARGE_DUMP_ON),
+ [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
+ TCA_ACT_FLAG_TERSE_DUMP),
[TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
};
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index a4c7ba35a343..e48e980c3b93 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -65,7 +65,7 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
* In case a different well-known TC_ACT opcode has been
* returned, it will overwrite the default one.
*
- * For everything else that is unkown, TC_ACT_UNSPEC is
+ * For everything else that is unknown, TC_ACT_UNSPEC is
* returned.
*/
switch (filter_res) {
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index aba3cd85f284..83a5c6722a06 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -296,7 +296,8 @@ static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
goto err_insert;
ct_ft->nf_ft.type = &flowtable_ct;
- ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
+ ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
+ NF_FLOWTABLE_COUNTER;
err = nf_flow_table_init(&ct_ft->nf_ft);
if (err)
goto err_init;
@@ -540,7 +541,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
flow_offload_refresh(nf_ft, flow);
nf_conntrack_get(&ct->ct_general);
nf_ct_set(skb, ct, ctinfo);
- nf_ct_acct_update(ct, dir, skb->len);
+ if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
+ nf_ct_acct_update(ct, dir, skb->len);
return true;
}
@@ -1541,6 +1543,8 @@ static int __init ct_init_module(void)
if (err)
goto err_register;
+ static_branch_inc(&tcf_frag_xmit_count);
+
return 0;
err_register:
@@ -1552,6 +1556,7 @@ err_tbl_init:
static void __exit ct_cleanup_module(void)
{
+ static_branch_dec(&tcf_frag_xmit_count);
tcf_unregister_action(&act_ct_ops, &ct_net_ops);
tcf_ct_flow_tables_uninit();
destroy_workqueue(act_ct_wq);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8dc3bec0d325..ac7297f42355 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -166,7 +166,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
if (unlikely(!tname))
goto err1;
if (tb[TCA_IPT_TABLE] == NULL ||
- nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
+ nla_strscpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
strcpy(tname, "mangle");
t = kmemdup(td, td->u.target_size, GFP_KERNEL);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e24b7e2331cd..7153c67f641e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -205,6 +205,18 @@ release_idr:
return err;
}
+static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+{
+ int err;
+
+ if (!want_ingress)
+ err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
+ else
+ err = netif_receive_skb(skb);
+
+ return err;
+}
+
static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -287,18 +299,15 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
res->ingress = want_ingress;
- if (skb_tc_reinsert(skb, res))
+ err = tcf_mirred_forward(res->ingress, skb);
+ if (err)
tcf_action_inc_overlimit_qstats(&m->common);
__this_cpu_dec(mirred_rec_level);
return TC_ACT_CONSUMED;
}
}
- if (!want_ingress)
- err = dev_queue_xmit(skb2);
- else
- err = netif_receive_skb(skb2);
-
+ err = tcf_mirred_forward(want_ingress, skb2);
if (err) {
out:
tcf_action_inc_overlimit_qstats(&m->common);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index a4f3d0f0daa9..726cc956d06f 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -52,7 +52,7 @@ static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
if (unlikely(!d->tcfd_defdata))
return -ENOMEM;
- nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+ nla_strscpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
return 0;
}
@@ -71,7 +71,7 @@ static int reset_policy(struct tc_action *a, const struct nlattr *defdata,
spin_lock_bh(&d->tcf_lock);
goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
- nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+ nla_strscpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
spin_unlock_bh(&d->tcf_lock);
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 838b3fd94d77..37b77bd30974 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -223,7 +223,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
{
if (kind)
- return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
+ return nla_strscpy(name, kind, IFNAMSIZ) < 0;
memset(name, 0, IFNAMSIZ);
return false;
}
@@ -991,13 +991,12 @@ __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
*/
struct tcf_proto *
-tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
- bool rtnl_held)
+tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
{
struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
if (tp)
- tcf_proto_put(tp, rtnl_held, NULL);
+ tcf_proto_put(tp, true, NULL);
return tp_next;
}
@@ -1924,15 +1923,14 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
struct tcf_block *block, struct Qdisc *q,
u32 parent, struct nlmsghdr *n,
- struct tcf_chain *chain, int event,
- bool rtnl_held)
+ struct tcf_chain *chain, int event)
{
struct tcf_proto *tp;
- for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
- tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
+ for (tp = tcf_get_next_proto(chain, NULL);
+ tp; tp = tcf_get_next_proto(chain, tp))
tfilter_notify(net, oskb, n, tp, block,
- q, parent, NULL, event, false, rtnl_held);
+ q, parent, NULL, event, false, true);
}
static void tfilter_put(struct tcf_proto *tp, void *fh)
@@ -2262,7 +2260,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (prio == 0) {
tfilter_notify_chain(net, skb, block, q, parent, n,
- chain, RTM_DELTFILTER, rtnl_held);
+ chain, RTM_DELTFILTER);
tcf_chain_flush(chain, rtnl_held);
err = 0;
goto errout;
@@ -2895,7 +2893,7 @@ replay:
break;
case RTM_DELCHAIN:
tfilter_notify_chain(net, skb, block, q, parent, n,
- chain, RTM_DELTFILTER, true);
+ chain, RTM_DELTFILTER);
/* Flush the chain first as the user requested chain removal. */
tcf_chain_flush(chain, true);
/* In case the chain was successfully deleted, put a reference
@@ -2940,7 +2938,6 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
struct tcf_chain *chain;
long index_start;
long index;
- u32 parent;
int err;
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
@@ -2955,13 +2952,6 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
if (!block)
goto out;
- /* If we work with block index, q is NULL and parent value
- * will never be used in the following code. The check
- * in tcf_fill_node prevents it. However, compiler does not
- * see that far, so set parent to zero to silence the warning
- * about parent being uninitialized.
- */
- parent = 0;
} else {
const struct Qdisc_class_ops *cops;
struct net_device *dev;
@@ -2971,13 +2961,11 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
if (!dev)
return skb->len;
- parent = tcm->tcm_parent;
- if (!parent) {
+ if (!tcm->tcm_parent)
q = dev->qdisc;
- parent = q->handle;
- } else {
+ else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
- }
+
if (!q)
goto out;
cops = q->ops->cl_ops;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index d36949d9382c..2e288f88ff02 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -238,7 +238,7 @@ static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
}
}
- /* Something went wrong if we are trying to replace a non-existant
+ /* Something went wrong if we are trying to replace a non-existent
* node. Mind as well halt instead of silently failing.
*/
BUG_ON(1);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 54209a18d7fe..6e1abe805448 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1171,7 +1171,6 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
- int err;
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
cls_u32.command = add ?
@@ -1194,13 +1193,9 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
cls_u32.knode.link_handle = ht->handle;
}
- err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
- &cls_u32, cb_priv, &n->flags,
- &n->in_hw_count);
- if (err)
- return err;
-
- return 0;
+ return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
+ &cls_u32, cb_priv, &n->flags,
+ &n->in_hw_count);
}
static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index a4d09b1fb66a..f17b049ea530 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -41,7 +41,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
break;
case TCF_EM_ALIGN_U32:
- /* Worth checking boundries? The branching seems
+ /* Worth checking boundaries? The branching seems
* to get worse. Visit again.
*/
val = get_unaligned_be32(ptr);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2a76a2f5ed88..51cb553e4317 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1170,7 +1170,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
#ifdef CONFIG_MODULES
if (ops == NULL && kind != NULL) {
char name[IFNAMSIZ];
- if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
+ if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
/* We dropped the RTNL semaphore in order to
* perform the module load. So, even if we
* succeeded in loading the module we have to
@@ -1943,8 +1943,8 @@ static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
chain = tcf_get_next_chain(block, chain)) {
struct tcf_proto *tp;
- for (tp = tcf_get_next_proto(chain, NULL, true);
- tp; tp = tcf_get_next_proto(chain, tp, true)) {
+ for (tp = tcf_get_next_proto(chain, NULL);
+ tp; tp = tcf_get_next_proto(chain, tp)) {
struct tcf_bind_args arg = {};
arg.w.fn = tcf_node_bind;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 1c281cc81f57..007bd2d9f1ff 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -466,10 +466,10 @@ drop: __maybe_unused
* non-ATM interfaces.
*/
-static void sch_atm_dequeue(unsigned long data)
+static void sch_atm_dequeue(struct tasklet_struct *t)
{
- struct Qdisc *sch = (struct Qdisc *)data;
- struct atm_qdisc_data *p = qdisc_priv(sch);
+ struct atm_qdisc_data *p = from_tasklet(p, t, task);
+ struct Qdisc *sch = qdisc_from_priv(p);
struct atm_flow_data *flow;
struct sk_buff *skb;
@@ -563,7 +563,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
if (err)
return err;
- tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
+ tasklet_setup(&p->task, sch_atm_dequeue);
return 0;
}
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 2eaac2ff380f..459cc240eda9 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -50,6 +50,7 @@
* locredit = max_frame_size * (sendslope / port_transmit_rate)
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
new file mode 100644
index 000000000000..e1e77d3fb6c0
--- /dev/null
+++ b/net/sched/sch_frag.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+#include <net/netlink.h>
+#include <net/sch_generic.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/ip6_fib.h>
+
+struct sch_frag_data {
+ unsigned long dst;
+ struct qdisc_skb_cb cb;
+ __be16 inner_protocol;
+ u16 vlan_tci;
+ __be16 vlan_proto;
+ unsigned int l2_len;
+ u8 l2_data[VLAN_ETH_HLEN];
+ int (*xmit)(struct sk_buff *skb);
+};
+
+static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage);
+
+static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct sch_frag_data *data = this_cpu_ptr(&sch_frag_data_storage);
+
+ if (skb_cow_head(skb, data->l2_len) < 0) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ __skb_dst_copy(skb, data->dst);
+ *qdisc_skb_cb(skb) = data->cb;
+ skb->inner_protocol = data->inner_protocol;
+ if (data->vlan_tci & VLAN_CFI_MASK)
+ __vlan_hwaccel_put_tag(skb, data->vlan_proto,
+ data->vlan_tci & ~VLAN_CFI_MASK);
+ else
+ __vlan_hwaccel_clear_tag(skb);
+
+ /* Reconstruct the MAC header. */
+ skb_push(skb, data->l2_len);
+ memcpy(skb->data, &data->l2_data, data->l2_len);
+ skb_postpush_rcsum(skb, skb->data, data->l2_len);
+ skb_reset_mac_header(skb);
+
+ return data->xmit(skb);
+}
+
+static void sch_frag_prepare_frag(struct sk_buff *skb,
+ int (*xmit)(struct sk_buff *skb))
+{
+ unsigned int hlen = skb_network_offset(skb);
+ struct sch_frag_data *data;
+
+ data = this_cpu_ptr(&sch_frag_data_storage);
+ data->dst = skb->_skb_refdst;
+ data->cb = *qdisc_skb_cb(skb);
+ data->xmit = xmit;
+ data->inner_protocol = skb->inner_protocol;
+ if (skb_vlan_tag_present(skb))
+ data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
+ else
+ data->vlan_tci = 0;
+ data->vlan_proto = skb->vlan_proto;
+ data->l2_len = hlen;
+ memcpy(&data->l2_data, skb->data, hlen);
+
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ skb_pull(skb, hlen);
+}
+
+static unsigned int
+sch_frag_dst_get_mtu(const struct dst_entry *dst)
+{
+ return dst->dev->mtu;
+}
+
+static struct dst_ops sch_frag_dst_ops = {
+ .family = AF_UNSPEC,
+ .mtu = sch_frag_dst_get_mtu,
+};
+
+static int sch_fragment(struct net *net, struct sk_buff *skb,
+ u16 mru, int (*xmit)(struct sk_buff *skb))
+{
+ int ret = -1;
+
+ if (skb_network_offset(skb) > VLAN_ETH_HLEN) {
+ net_warn_ratelimited("L2 header too long to fragment\n");
+ goto err;
+ }
+
+ if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
+ struct dst_entry sch_frag_dst;
+ unsigned long orig_dst;
+
+ sch_frag_prepare_frag(skb, xmit);
+ dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
+ sch_frag_dst.dev = skb->dev;
+
+ orig_dst = skb->_skb_refdst;
+ skb_dst_set_noref(skb, &sch_frag_dst);
+ IPCB(skb)->frag_max_size = mru;
+
+ ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
+ refdst_drop(orig_dst);
+ } else if (skb_protocol(skb, true) == htons(ETH_P_IPV6)) {
+ unsigned long orig_dst;
+ struct rt6_info sch_frag_rt;
+
+ sch_frag_prepare_frag(skb, xmit);
+ memset(&sch_frag_rt, 0, sizeof(sch_frag_rt));
+ dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
+ sch_frag_rt.dst.dev = skb->dev;
+
+ orig_dst = skb->_skb_refdst;
+ skb_dst_set_noref(skb, &sch_frag_rt.dst);
+ IP6CB(skb)->frag_max_size = mru;
+
+ ret = ipv6_stub->ipv6_fragment(net, skb->sk, skb,
+ sch_frag_xmit);
+ refdst_drop(orig_dst);
+ } else {
+ net_warn_ratelimited("Fail frag %s: eth=%x, MRU=%d, MTU=%d\n",
+ netdev_name(skb->dev),
+ ntohs(skb_protocol(skb, true)), mru,
+ skb->dev->mtu);
+ goto err;
+ }
+
+ return ret;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
+{
+ u16 mru = qdisc_skb_cb(skb)->mru;
+ int err;
+
+ if (mru && skb->len > mru + skb->dev->hard_header_len)
+ err = sch_fragment(dev_net(skb->dev), skb, mru, xmit);
+ else
+ err = xmit(skb);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(sch_frag_xmit_hook);
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index c65077f0c0f3..5a457ff61acd 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -405,7 +405,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
/* We restart the measurement cycle if the following conditions are met
* 1. If the delay has been low for 2 consecutive Tupdate periods
* 2. Calculated drop probability is zero
- * 3. If average dq_rate_estimator is enabled, we have atleast one
+ * 3. If average dq_rate_estimator is enabled, we have at least one
* estimate for the avg_dq_rate ie., is a non-zero value
*/
if ((vars->qdelay < params->target / 2) &&
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index b0ad7687ee2c..26fb8a62996b 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 39d7fa9569f8..5da599ff84a9 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -11,6 +11,7 @@ menuconfig IP_SCTP
select CRYPTO_HMAC
select CRYPTO_SHA1
select LIBCRC32C
+ select NET_UDP_TUNNEL
help
Stream Control Transmission Protocol
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index fdb69d46276d..336df4b36655 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -99,6 +99,8 @@ static struct sctp_association *sctp_association_init(
*/
asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
+ asoc->encap_port = sp->encap_port;
+
/* Initialize path max retrans value. */
asoc->pathmaxrxt = sp->pathmaxrxt;
@@ -624,6 +626,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
*/
peer->hbinterval = asoc->hbinterval;
+ peer->encap_port = asoc->encap_port;
+
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8a58f42d6d19..c3e89c776e66 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -55,6 +55,7 @@
#include <net/inet_common.h>
#include <net/inet_ecn.h>
#include <net/sctp/sctp.h>
+#include <net/udp_tunnel.h>
#include <linux/uaccess.h>
@@ -191,33 +192,53 @@ out:
return ret;
}
-static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
+static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
{
+ struct dst_entry *dst = dst_clone(t->dst);
+ struct flowi6 *fl6 = &t->fl.u.ip6;
struct sock *sk = skb->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
- struct flowi6 *fl6 = &transport->fl.u.ip6;
__u8 tclass = np->tclass;
- int res;
+ __be32 label;
pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
skb->len, &fl6->saddr, &fl6->daddr);
- if (transport->dscp & SCTP_DSCP_SET_MASK)
- tclass = transport->dscp & SCTP_DSCP_VAL_MASK;
+ if (t->dscp & SCTP_DSCP_SET_MASK)
+ tclass = t->dscp & SCTP_DSCP_VAL_MASK;
if (INET_ECN_is_capable(tclass))
IP6_ECN_flow_xmit(sk, fl6->flowlabel);
- if (!(transport->param_flags & SPP_PMTUD_ENABLE))
+ if (!(t->param_flags & SPP_PMTUD_ENABLE))
skb->ignore_df = 1;
SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
- rcu_read_lock();
- res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
- tclass, sk->sk_priority);
- rcu_read_unlock();
- return res;
+ if (!t->encap_port || !sctp_sk(sk)->udp_port) {
+ int res;
+
+ skb_dst_set(skb, dst);
+ rcu_read_lock();
+ res = ip6_xmit(sk, skb, fl6, sk->sk_mark,
+ rcu_dereference(np->opt),
+ tclass, sk->sk_priority);
+ rcu_read_unlock();
+ return res;
+ }
+
+ if (skb_is_gso(skb))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+
+ skb->encapsulation = 1;
+ skb_reset_inner_mac_header(skb);
+ skb_reset_inner_transport_header(skb);
+ skb_set_inner_ipproto(skb, IPPROTO_SCTP);
+ label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6);
+
+ return udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr,
+ &fl6->daddr, tclass, ip6_dst_hoplimit(dst),
+ label, sctp_sk(sk)->udp_port, t->encap_port, false);
}
/* Returns the dst cache entry for the given source and destination ip
@@ -1053,6 +1074,7 @@ static struct inet_protosw sctpv6_stream_protosw = {
static int sctp6_rcv(struct sk_buff *skb)
{
+ SCTP_INPUT_CB(skb)->encap_port = 0;
return sctp_rcv(skb) ? -1 : 0;
}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 74847d613835..ce281a9a2875 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -27,7 +27,11 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
skb->csum_not_inet = 0;
- gso_reset_checksum(skb, ~0);
+ /* csum and csum_start in GSO CB may be needed to do the UDP
+ * checksum when it's a UDP tunneling packet.
+ */
+ SKB_GSO_CB(skb)->csum = (__force __wsum)~0;
+ SKB_GSO_CB(skb)->csum_start = skb_headroom(skb) + skb->len;
return sctp_compute_cksum(skb, skb_transport_offset(skb));
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1441eaf460bb..6614c9fdc51e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -508,20 +508,14 @@ merge:
sizeof(struct inet6_skb_parm)));
skb_shinfo(head)->gso_segs = pkt_count;
skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
- rcu_read_lock();
- if (skb_dst(head) != tp->dst) {
- dst_hold(tp->dst);
- sk_setup_caps(sk, tp->dst);
- }
- rcu_read_unlock();
goto chksum;
}
if (sctp_checksum_disable)
return 1;
- if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
- dst_xfrm(skb_dst(head)) || packet->ipfragok) {
+ if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) ||
+ dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) {
struct sctphdr *sh =
(struct sctphdr *)skb_transport_header(head);
@@ -548,7 +542,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
struct sctp_association *asoc = tp->asoc;
struct sctp_chunk *chunk, *tmp;
int pkt_count, gso = 0;
- struct dst_entry *dst;
struct sk_buff *head;
struct sctphdr *sh;
struct sock *sk;
@@ -585,13 +578,18 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
sh->checksum = 0;
/* drop packet if no dst */
- dst = dst_clone(tp->dst);
- if (!dst) {
+ if (!tp->dst) {
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(head);
goto out;
}
- skb_dst_set(head, dst);
+
+ rcu_read_lock();
+ if (__sk_dst_get(sk) != tp->dst) {
+ dst_hold(tp->dst);
+ sk_setup_caps(sk, tp->dst);
+ }
+ rcu_read_unlock();
/* pack up chunks */
pkt_count = sctp_packet_pack(packet, head, gso, gfp);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 25833238fe93..6f2bbfeec3a4 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -44,6 +44,7 @@
#include <net/addrconf.h>
#include <net/inet_common.h>
#include <net/inet_ecn.h>
+#include <net/udp_tunnel.h>
#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
@@ -840,6 +841,92 @@ static int sctp_ctl_sock_init(struct net *net)
return 0;
}
+static int sctp_udp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ SCTP_INPUT_CB(skb)->encap_port = udp_hdr(skb)->source;
+
+ skb_set_transport_header(skb, sizeof(struct udphdr));
+ sctp_rcv(skb);
+ return 0;
+}
+
+static int sctp_udp_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ int family;
+
+ skb->transport_header += sizeof(struct udphdr);
+ family = (ip_hdr(skb)->version == 4) ? AF_INET : AF_INET6;
+ sk = sctp_err_lookup(dev_net(skb->dev), family, skb, sctp_hdr(skb),
+ &asoc, &t);
+ if (!sk)
+ return -ENOENT;
+
+ sctp_err_finish(sk, t);
+ return 0;
+}
+
+int sctp_udp_sock_start(struct net *net)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {NULL};
+ struct udp_port_cfg udp_conf = {0};
+ struct socket *sock;
+ int err;
+
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.local_udp_port = htons(net->sctp.udp_port);
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err) {
+ pr_err("Failed to create the SCTP UDP tunneling v4 sock\n");
+ return err;
+ }
+
+ tuncfg.encap_type = 1;
+ tuncfg.encap_rcv = sctp_udp_rcv;
+ tuncfg.encap_err_lookup = sctp_udp_err_lookup;
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+ net->sctp.udp4_sock = sock->sk;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ memset(&udp_conf, 0, sizeof(udp_conf));
+
+ udp_conf.family = AF_INET6;
+ udp_conf.local_ip6 = in6addr_any;
+ udp_conf.local_udp_port = htons(net->sctp.udp_port);
+ udp_conf.use_udp6_rx_checksums = true;
+ udp_conf.ipv6_v6only = true;
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err) {
+ pr_err("Failed to create the SCTP UDP tunneling v6 sock\n");
+ udp_tunnel_sock_release(net->sctp.udp4_sock->sk_socket);
+ net->sctp.udp4_sock = NULL;
+ return err;
+ }
+
+ tuncfg.encap_type = 1;
+ tuncfg.encap_rcv = sctp_udp_rcv;
+ tuncfg.encap_err_lookup = sctp_udp_err_lookup;
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+ net->sctp.udp6_sock = sock->sk;
+#endif
+
+ return 0;
+}
+
+void sctp_udp_sock_stop(struct net *net)
+{
+ if (net->sctp.udp4_sock) {
+ udp_tunnel_sock_release(net->sctp.udp4_sock->sk_socket);
+ net->sctp.udp4_sock = NULL;
+ }
+ if (net->sctp.udp6_sock) {
+ udp_tunnel_sock_release(net->sctp.udp6_sock->sk_socket);
+ net->sctp.udp6_sock = NULL;
+ }
+}
+
/* Register address family specific functions. */
int sctp_register_af(struct sctp_af *af)
{
@@ -971,25 +1058,44 @@ static int sctp_inet_supported_addrs(const struct sctp_sock *opt,
}
/* Wrapper routine that calls the ip transmit routine. */
-static inline int sctp_v4_xmit(struct sk_buff *skb,
- struct sctp_transport *transport)
+static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
{
- struct inet_sock *inet = inet_sk(skb->sk);
+ struct dst_entry *dst = dst_clone(t->dst);
+ struct flowi4 *fl4 = &t->fl.u.ip4;
+ struct sock *sk = skb->sk;
+ struct inet_sock *inet = inet_sk(sk);
__u8 dscp = inet->tos;
+ __be16 df = 0;
pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb,
- skb->len, &transport->fl.u.ip4.saddr,
- &transport->fl.u.ip4.daddr);
+ skb->len, &fl4->saddr, &fl4->daddr);
- if (transport->dscp & SCTP_DSCP_SET_MASK)
- dscp = transport->dscp & SCTP_DSCP_VAL_MASK;
+ if (t->dscp & SCTP_DSCP_SET_MASK)
+ dscp = t->dscp & SCTP_DSCP_VAL_MASK;
+
+ inet->pmtudisc = t->param_flags & SPP_PMTUD_ENABLE ? IP_PMTUDISC_DO
+ : IP_PMTUDISC_DONT;
+ SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
- inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
- IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
+ if (!t->encap_port || !sctp_sk(sk)->udp_port) {
+ skb_dst_set(skb, dst);
+ return __ip_queue_xmit(sk, skb, &t->fl, dscp);
+ }
- SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
+ if (skb_is_gso(skb))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
- return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp);
+ if (ip_dont_fragment(sk, dst) && !skb->ignore_df)
+ df = htons(IP_DF);
+
+ skb->encapsulation = 1;
+ skb_reset_inner_mac_header(skb);
+ skb_reset_inner_transport_header(skb);
+ skb_set_inner_ipproto(skb, IPPROTO_SCTP);
+ udp_tunnel_xmit_skb((struct rtable *)dst, sk, skb, fl4->saddr,
+ fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
+ sctp_sk(sk)->udp_port, t->encap_port, false, false);
+ return 0;
}
static struct sctp_af sctp_af_inet;
@@ -1054,9 +1160,15 @@ static struct inet_protosw sctp_stream_protosw = {
.flags = SCTP_PROTOSW_FLAG
};
+static int sctp4_rcv(struct sk_buff *skb)
+{
+ SCTP_INPUT_CB(skb)->encap_port = 0;
+ return sctp_rcv(skb);
+}
+
/* Register with IP layer. */
static const struct net_protocol sctp_protocol = {
- .handler = sctp_rcv,
+ .handler = sctp4_rcv,
.err_handler = sctp_v4_err,
.no_policy = 1,
.netns_ok = 1,
@@ -1271,6 +1383,12 @@ static int __net_init sctp_defaults_init(struct net *net)
/* Enable ECN by default. */
net->sctp.ecn_enable = 1;
+ /* Set UDP tunneling listening port to 0 by default */
+ net->sctp.udp_port = 0;
+
+ /* Set remote encap port to 0 by default */
+ net->sctp.encap_port = 0;
+
/* Set SCOPE policy to enabled */
net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9a56ae2f3651..f77484df097b 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1142,6 +1142,26 @@ nodata:
return retval;
}
+struct sctp_chunk *sctp_make_new_encap_port(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_new_encap_port_hdr nep;
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_abort(asoc, chunk,
+ sizeof(struct sctp_errhdr) + sizeof(nep));
+ if (!retval)
+ goto nodata;
+
+ sctp_init_cause(retval, SCTP_ERROR_NEW_ENCAP_PORT, sizeof(nep));
+ nep.cur_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+ nep.new_port = chunk->transport->encap_port;
+ sctp_addto_chunk(retval, sizeof(nep), &nep);
+
+nodata:
+ return retval;
+}
+
/* Make a HEARTBEAT chunk. */
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
const struct sctp_transport *transport)
@@ -2321,6 +2341,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
* added as the primary transport. The source address seems to
* be a better choice than any of the embedded addresses.
*/
+ asoc->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
goto nomem;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c669f8bd1eab..af2b7041fa4e 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -87,6 +87,13 @@ static enum sctp_disposition sctp_sf_tabort_8_4_8(
const union sctp_subtype type,
void *arg,
struct sctp_cmd_seq *commands);
+static enum sctp_disposition sctp_sf_new_encap_port(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static enum sctp_disposition sctp_stop_t1_and_abort(
@@ -1493,6 +1500,10 @@ static enum sctp_disposition sctp_sf_do_unexpected_init(
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
+
+ if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port)
+ return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands);
+
/* Grab the INIT header. */
chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
@@ -3392,6 +3403,45 @@ static enum sctp_disposition sctp_sf_tabort_8_4_8(
sctp_packet_append_chunk(packet, abort);
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet));
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+ sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* Handling of SCTP Packets Containing an INIT Chunk Matching an
+ * Existing Associations when the UDP encap port is incorrect.
+ *
+ * From Section 4 at draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.
+ */
+static enum sctp_disposition sctp_sf_new_encap_port(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_packet *packet = NULL;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *abort;
+
+ packet = sctp_ootb_pkt_new(net, asoc, chunk);
+ if (!packet)
+ return SCTP_DISPOSITION_NOMEM;
+
+ abort = sctp_make_new_encap_port(asoc, chunk);
+ if (!abort) {
+ sctp_ootb_pkt_free(packet);
+ return SCTP_DISPOSITION_NOMEM;
+ }
+
+ abort->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, abort);
+
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
@@ -6268,6 +6318,8 @@ static struct sctp_packet *sctp_ootb_pkt_new(
if (!transport)
goto nomem;
+ transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 53d0a4161df3..a710917c5ac7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4417,6 +4417,55 @@ out:
return retval;
}
+static int sctp_setsockopt_encap_port(struct sock *sk,
+ struct sctp_udpencaps *encap,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ __be16 encap_port;
+
+ if (optlen != sizeof(*encap))
+ return -EINVAL;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ encap_port = (__force __be16)encap->sue_port;
+ if (!sctp_is_any(sk, (union sctp_addr *)&encap->sue_address)) {
+ t = sctp_addr_id2transport(sk, &encap->sue_address,
+ encap->sue_assoc_id);
+ if (!t)
+ return -EINVAL;
+
+ t->encap_port = encap_port;
+ return 0;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, encap->sue_assoc_id);
+ if (!asoc && encap->sue_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* If changes are for association, also apply encap_port to
+ * each transport.
+ */
+ if (asoc) {
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports)
+ t->encap_port = encap_port;
+
+ return 0;
+ }
+
+ sctp_sk(sk)->encap_port = encap_port;
+ return 0;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4636,6 +4685,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
retval = sctp_setsockopt_pf_expose(sk, kopt, optlen);
break;
+ case SCTP_REMOTE_UDP_ENCAPS_PORT:
+ retval = sctp_setsockopt_encap_port(sk, kopt, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -4876,6 +4928,8 @@ static int sctp_init_sock(struct sock *sk)
* be modified via SCTP_PEER_ADDR_PARAMS
*/
sp->hbinterval = net->sctp.hb_interval;
+ sp->udp_port = htons(net->sctp.udp_port);
+ sp->encap_port = htons(net->sctp.encap_port);
sp->pathmaxrxt = net->sctp.max_retrans_path;
sp->pf_retrans = net->sctp.pf_retrans;
sp->ps_retrans = net->sctp.ps_retrans;
@@ -7790,6 +7844,65 @@ out:
return retval;
}
+static int sctp_getsockopt_encap_port(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_udpencaps encap;
+ struct sctp_transport *t;
+ __be16 encap_port;
+
+ if (len < sizeof(encap))
+ return -EINVAL;
+
+ len = sizeof(encap);
+ if (copy_from_user(&encap, optval, len))
+ return -EFAULT;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&encap.sue_address)) {
+ t = sctp_addr_id2transport(sk, &encap.sue_address,
+ encap.sue_assoc_id);
+ if (!t) {
+ pr_debug("%s: failed no transport\n", __func__);
+ return -EINVAL;
+ }
+
+ encap_port = t->encap_port;
+ goto out;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, encap.sue_assoc_id);
+ if (!asoc && encap.sue_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ pr_debug("%s: failed no association\n", __func__);
+ return -EINVAL;
+ }
+
+ if (asoc) {
+ encap_port = asoc->encap_port;
+ goto out;
+ }
+
+ encap_port = sctp_sk(sk)->encap_port;
+
+out:
+ encap.sue_port = (__force uint16_t)encap_port;
+ if (copy_to_user(optval, &encap, len))
+ return -EFAULT;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ return 0;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -8010,6 +8123,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
retval = sctp_getsockopt_pf_expose(sk, len, optval, optlen);
break;
+ case SCTP_REMOTE_UDP_ENCAPS_PORT:
+ retval = sctp_getsockopt_encap_port(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index c16c80963e55..e92df779af73 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -36,6 +36,7 @@ static int rto_alpha_max = 1000;
static int rto_beta_max = 1000;
static int pf_expose_max = SCTP_PF_EXPOSE_MAX;
static int ps_retrans_max = SCTP_PS_RETRANS_MAX;
+static int udp_port_max = 65535;
static unsigned long max_autoclose_min = 0;
static unsigned long max_autoclose_max =
@@ -48,6 +49,8 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
@@ -291,6 +294,24 @@ static struct ctl_table sctp_net_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "udp_port",
+ .data = &init_net.sctp.udp_port,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_udp_port,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &udp_port_max,
+ },
+ {
+ .procname = "encap_port",
+ .data = &init_net.sctp.encap_port,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &udp_port_max,
+ },
+ {
.procname = "addr_scope_policy",
.data = &init_net.sctp.scope_policy,
.maxlen = sizeof(int),
@@ -477,6 +498,47 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
return ret;
}
+static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ unsigned int min = *(unsigned int *)ctl->extra1;
+ unsigned int max = *(unsigned int *)ctl->extra2;
+ struct ctl_table tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.udp_port;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+ struct sock *sk = net->sctp.ctl_sock;
+
+ if (new_value > max || new_value < min)
+ return -EINVAL;
+
+ net->sctp.udp_port = new_value;
+ sctp_udp_sock_stop(net);
+ if (new_value) {
+ ret = sctp_udp_sock_start(net);
+ if (ret)
+ net->sctp.udp_port = 0;
+ }
+
+ /* Update the value in the control socket */
+ lock_sock(sk);
+ sctp_sk(sk)->udp_port = htons(net->sctp.udp_port);
+ release_sock(sk);
+ }
+
+ return ret;
+}
+
int sctp_sysctl_net_register(struct net *net)
{
struct ctl_table *table;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 60fcf31cdcfb..bf0ac467e757 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -8,7 +8,7 @@
*
* This file is part of the SCTP kernel implementation
*
- * This module provides the abstraction for an SCTP tranport representing
+ * This module provides the abstraction for an SCTP transport representing
* a remote transport address. For local transport addresses, we just use
* union sctp_addr.
*
@@ -123,7 +123,7 @@ void sctp_transport_free(struct sctp_transport *transport)
/* Delete the T3_rtx timer if it's active.
* There is no point in not doing this now and letting
* structure hang around in memory since we know
- * the tranport is going away.
+ * the transport is going away.
*/
if (del_timer(&transport->T3_rtx_timer))
sctp_transport_put(transport);
diff --git a/net/smc/Makefile b/net/smc/Makefile
index cb1254541f37..77e54fe42b1c 100644
--- a/net/smc/Makefile
+++ b/net/smc/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_SMC) += smc.o
obj-$(CONFIG_SMC_DIAG) += smc_diag.o
smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
-smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o
+smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5dd4faaf7d6e..47340b3b514f 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -45,6 +45,7 @@
#include "smc_ib.h"
#include "smc_ism.h"
#include "smc_pnet.h"
+#include "smc_netlink.h"
#include "smc_tx.h"
#include "smc_rx.h"
#include "smc_close.h"
@@ -552,8 +553,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
return smc_connect_fallback(smc, reason_code);
}
-/* abort connecting */
-static void smc_connect_abort(struct smc_sock *smc, int local_first)
+static void smc_conn_abort(struct smc_sock *smc, int local_first)
{
if (local_first)
smc_lgr_cleanup_early(&smc->conn);
@@ -669,7 +669,7 @@ static int smc_find_proposal_devices(struct smc_sock *smc,
ini->smc_type_v1 = SMC_TYPE_N;
} /* else RDMA is supported for this connection */
}
- if (smc_ism_v2_capable && smc_find_ism_v2_device_clnt(smc, ini))
+ if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini))
ini->smc_type_v2 = SMC_TYPE_N;
/* if neither ISM nor RDMA are supported, fallback */
@@ -814,7 +814,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
return 0;
connect_abort:
- smc_connect_abort(smc, ini->first_contact_local);
+ smc_conn_abort(smc, ini->first_contact_local);
mutex_unlock(&smc_client_lgr_pending);
smc->connect_nonblock = 0;
@@ -893,7 +893,7 @@ static int smc_connect_ism(struct smc_sock *smc,
return 0;
connect_abort:
- smc_connect_abort(smc, ini->first_contact_local);
+ smc_conn_abort(smc, ini->first_contact_local);
mutex_unlock(&smc_server_lgr_pending);
smc->connect_nonblock = 0;
@@ -921,7 +921,7 @@ static int smc_connect_check_aclc(struct smc_init_info *ini,
/* perform steps before actually connecting */
static int __smc_connect(struct smc_sock *smc)
{
- u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
+ u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
struct smc_clc_msg_accept_confirm_v2 *aclc2;
struct smc_clc_msg_accept_confirm *aclc;
struct smc_init_info *ini = NULL;
@@ -946,9 +946,9 @@ static int __smc_connect(struct smc_sock *smc)
version);
ini->smcd_version = SMC_V1;
- ini->smcd_version |= smc_ism_v2_capable ? SMC_V2 : 0;
+ ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0;
ini->smc_type_v1 = SMC_TYPE_B;
- ini->smc_type_v2 = smc_ism_v2_capable ? SMC_TYPE_D : SMC_TYPE_N;
+ ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N;
/* get vlan id from IP device */
if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
@@ -1321,10 +1321,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
int local_first, u8 version)
{
/* RDMA setup failed, switch back to TCP */
- if (local_first)
- smc_lgr_cleanup_early(&new_smc->conn);
- else
- smc_conn_free(&new_smc->conn);
+ smc_conn_abort(new_smc, local_first);
if (reason_code < 0) { /* error, no fallback possible */
smc_listen_out_err(new_smc);
return;
@@ -1347,6 +1344,7 @@ static int smc_listen_v2_check(struct smc_sock *new_smc,
{
struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
struct smc_clc_v2_extension *pclc_v2_ext;
+ int rc = SMC_CLC_DECL_PEERNOSMC;
ini->smc_type_v1 = pclc->hdr.typev1;
ini->smc_type_v2 = pclc->hdr.typev2;
@@ -1354,29 +1352,30 @@ static int smc_listen_v2_check(struct smc_sock *new_smc,
if (pclc->hdr.version > SMC_V1)
ini->smcd_version |=
ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
- if (!smc_ism_v2_capable) {
+ if (!(ini->smcd_version & SMC_V2)) {
+ rc = SMC_CLC_DECL_PEERNOSMC;
+ goto out;
+ }
+ if (!smc_ism_is_v2_capable()) {
ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOISM2SUPP;
goto out;
}
pclc_v2_ext = smc_get_clc_v2_ext(pclc);
if (!pclc_v2_ext) {
ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOV2EXT;
goto out;
}
pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
- if (!pclc_smcd_v2_ext)
+ if (!pclc_smcd_v2_ext) {
ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOV2DEXT;
+ }
out:
- if (!ini->smcd_version) {
- if (pclc->hdr.typev1 == SMC_TYPE_B ||
- pclc->hdr.typev2 == SMC_TYPE_B)
- return SMC_CLC_DECL_NOSMCDEV;
- if (pclc->hdr.typev1 == SMC_TYPE_D ||
- pclc->hdr.typev2 == SMC_TYPE_D)
- return SMC_CLC_DECL_NOSMCDDEV;
- return SMC_CLC_DECL_NOSMCRDEV;
- }
+ if (!ini->smcd_version)
+ return rc;
return 0;
}
@@ -1428,10 +1427,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
/* Create send and receive buffers */
rc = smc_buf_create(new_smc, true);
if (rc) {
- if (ini->first_contact_local)
- smc_lgr_cleanup_early(&new_smc->conn);
- else
- smc_conn_free(&new_smc->conn);
+ smc_conn_abort(new_smc, ini->first_contact_local);
return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
SMC_CLC_DECL_MEM;
}
@@ -1474,6 +1470,12 @@ static void smc_check_ism_v2_match(struct smc_init_info *ini,
}
}
+static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
+{
+ if (!ini->rc)
+ ini->rc = rc;
+}
+
static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
struct smc_clc_msg_proposal *pclc,
struct smc_init_info *ini)
@@ -1484,7 +1486,7 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
unsigned int matches = 0;
u8 smcd_version;
u8 *eid = NULL;
- int i;
+ int i, rc;
if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
goto not_found;
@@ -1493,8 +1495,10 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
smc_v2_ext = smc_get_clc_v2_ext(pclc);
smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
if (!smcd_v2_ext ||
- !smc_v2_ext->hdr.flag.seid) /* no system EID support for SMCD */
+ !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */
+ smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini);
goto not_found;
+ }
mutex_lock(&smcd_dev_list.mutex);
if (pclc_smcd->ism.chid)
@@ -1526,9 +1530,12 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
ini->smcd_version = SMC_V2;
ini->is_smcd = true;
ini->ism_selected = i;
- if (smc_listen_ism_init(new_smc, ini))
+ rc = smc_listen_ism_init(new_smc, ini);
+ if (rc) {
+ smc_find_ism_store_rc(rc, ini);
/* try next active ISM device */
continue;
+ }
return; /* matching and usable V2 ISM device found */
}
/* no V2 ISM device could be initialized */
@@ -1545,19 +1552,23 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
struct smc_init_info *ini)
{
struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
+ int rc = 0;
/* check if ISM V1 is available */
if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
goto not_found;
ini->is_smcd = true; /* prepare ISM check */
ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
- if (smc_find_ism_device(new_smc, ini))
+ rc = smc_find_ism_device(new_smc, ini);
+ if (rc)
goto not_found;
ini->ism_selected = 0;
- if (!smc_listen_ism_init(new_smc, ini))
+ rc = smc_listen_ism_init(new_smc, ini);
+ if (!rc)
return; /* V1 ISM device found */
not_found:
+ smc_find_ism_store_rc(rc, ini);
ini->ism_dev[0] = NULL;
ini->is_smcd = false;
}
@@ -1614,16 +1625,16 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
return 0;
if (!(ini->smcd_version & SMC_V1))
- return SMC_CLC_DECL_NOSMCDEV;
+ return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV;
/* check for matching IP prefix and subnet length */
rc = smc_listen_prfx_check(new_smc, pclc);
if (rc)
- return rc;
+ return ini->rc ?: rc;
/* get vlan id from IP device */
if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
- return SMC_CLC_DECL_GETVLANERR;
+ return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
/* check for ISM device matching V1 proposed device */
smc_find_ism_v1_device_serv(new_smc, pclc, ini);
@@ -1631,10 +1642,14 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
return 0;
if (pclc->hdr.typev1 == SMC_TYPE_D)
- return SMC_CLC_DECL_NOSMCDDEV; /* skip RDMA and decline */
+ /* skip RDMA and decline */
+ return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
/* check if RDMA is available */
- return smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
+ rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
+ smc_find_ism_store_rc(rc, ini);
+
+ return (!rc) ? 0 : ini->rc;
}
/* listen worker: finish RDMA setup */
@@ -1667,7 +1682,7 @@ static void smc_listen_work(struct work_struct *work)
{
struct smc_sock *new_smc = container_of(work, struct smc_sock,
smc_listen_work);
- u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
+ u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
struct socket *newclcsock = new_smc->clcsock;
struct smc_clc_msg_accept_confirm *cclc;
struct smc_clc_msg_proposal_area *buf;
@@ -2481,10 +2496,14 @@ static int __init smc_init(void)
smc_ism_init();
smc_clc_init();
- rc = smc_pnet_init();
+ rc = smc_nl_init();
if (rc)
goto out_pernet_subsys;
+ rc = smc_pnet_init();
+ if (rc)
+ goto out_nl;
+
rc = -ENOMEM;
smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
if (!smc_hs_wq)
@@ -2555,6 +2574,8 @@ out_alloc_hs_wq:
destroy_workqueue(smc_hs_wq);
out_pnet:
smc_pnet_exit();
+out_nl:
+ smc_nl_exit();
out_pernet_subsys:
unregister_pernet_subsys(&smc_net_ops);
@@ -2572,6 +2593,7 @@ static void __exit smc_exit(void)
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
smc_pnet_exit();
+ smc_nl_exit();
unregister_pernet_subsys(&smc_net_ops);
rcu_barrier();
}
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index b1ce6ccbfaec..f23f558054a7 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -389,9 +389,9 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
* Context:
* - tasklet context
*/
-static void smcd_cdc_rx_tsklet(unsigned long data)
+static void smcd_cdc_rx_tsklet(struct tasklet_struct *t)
{
- struct smc_connection *conn = (struct smc_connection *)data;
+ struct smc_connection *conn = from_tasklet(conn, t, rx_tsklet);
struct smcd_cdc_msg *data_cdc;
struct smcd_cdc_msg cdc;
struct smc_sock *smc;
@@ -411,7 +411,7 @@ static void smcd_cdc_rx_tsklet(unsigned long data)
*/
void smcd_cdc_rx_init(struct smc_connection *conn)
{
- tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
+ tasklet_setup(&conn->rx_tsklet, smcd_cdc_rx_tsklet);
}
/***************************** init, exit, misc ******************************/
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 696d89c2dce4..e286dafd6e88 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -772,6 +772,11 @@ int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact,
return len > 0 ? 0 : len;
}
+void smc_clc_get_hostname(u8 **host)
+{
+ *host = &smc_hostname[0];
+}
+
void __init smc_clc_init(void)
{
struct new_utsname *u;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index c579d1d5995a..32d37f7b70f2 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -37,6 +37,11 @@
#define SMC_CLC_DECL_NOSMCDEV 0x03030000 /* no SMC device found (R or D) */
#define SMC_CLC_DECL_NOSMCDDEV 0x03030001 /* no SMC-D device found */
#define SMC_CLC_DECL_NOSMCRDEV 0x03030002 /* no SMC-R device found */
+#define SMC_CLC_DECL_NOISM2SUPP 0x03030003 /* hardware has no ISMv2 support */
+#define SMC_CLC_DECL_NOV2EXT 0x03030004 /* peer sent no clc v2 extension */
+#define SMC_CLC_DECL_NOV2DEXT 0x03030005 /* peer sent no clc SMC-Dv2 ext. */
+#define SMC_CLC_DECL_NOSEID 0x03030006 /* peer sent no SEID */
+#define SMC_CLC_DECL_NOSMCD2DEV 0x03030007 /* no SMC-Dv2 device found */
#define SMC_CLC_DECL_MODEUNSUPP 0x03040000 /* smc modes do not match (R or D)*/
#define SMC_CLC_DECL_RMBE_EC 0x03050000 /* peer has eyecatcher in RMBE */
#define SMC_CLC_DECL_OPTUNSUPP 0x03060000 /* fastopen sockopt not supported */
@@ -329,5 +334,6 @@ int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
int smc_clc_send_accept(struct smc_sock *smc, bool srv_first_contact,
u8 version);
void smc_clc_init(void) __init;
+void smc_clc_get_hostname(u8 **host);
#endif
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index af96f813c075..59342b519e34 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -16,6 +16,8 @@
#include <linux/wait.h>
#include <linux/reboot.h>
#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/smc.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <rdma/ib_verbs.h>
@@ -30,12 +32,13 @@
#include "smc_cdc.h"
#include "smc_close.h"
#include "smc_ism.h"
+#include "smc_netlink.h"
#define SMC_LGR_NUM_INCR 256
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
-static struct smc_lgr_list smc_lgr_list = { /* established link groups */
+struct smc_lgr_list smc_lgr_list = { /* established link groups */
.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
.list = LIST_HEAD_INIT(smc_lgr_list.list),
.num = 0,
@@ -63,6 +66,16 @@ static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
return &smc_lgr_list.list;
}
+static void smc_ibdev_cnt_inc(struct smc_link *lnk)
+{
+ atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
+}
+
+static void smc_ibdev_cnt_dec(struct smc_link *lnk)
+{
+ atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
+}
+
static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
{
/* client link group creation always follows the server link group
@@ -139,6 +152,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
}
if (!conn->lnk)
return SMC_CLC_DECL_NOACTLINK;
+ atomic_inc(&conn->lnk->conn_cnt);
return 0;
}
@@ -180,6 +194,8 @@ static void __smc_lgr_unregister_conn(struct smc_connection *conn)
struct smc_link_group *lgr = conn->lgr;
rb_erase(&conn->alert_node, &lgr->conns_all);
+ if (conn->lnk)
+ atomic_dec(&conn->lnk->conn_cnt);
lgr->conns_num--;
conn->alert_token_local = 0;
sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
@@ -201,6 +217,361 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
conn->lgr = NULL;
}
+int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ char hostname[SMC_MAX_HOSTNAME_LEN + 1];
+ char smc_seid[SMC_MAX_EID_LEN + 1];
+ struct smcd_dev *smcd_dev;
+ struct nlattr *attrs;
+ u8 *seid = NULL;
+ u8 *host = NULL;
+ void *nlh;
+
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_SYS_INFO);
+ if (!nlh)
+ goto errmsg;
+ if (cb_ctx->pos[0])
+ goto errout;
+ attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO);
+ if (!attrs)
+ goto errout;
+ if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
+ goto errattr;
+ smc_clc_get_hostname(&host);
+ if (host) {
+ snprintf(hostname, sizeof(hostname), "%s", host);
+ if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
+ goto errattr;
+ }
+ mutex_lock(&smcd_dev_list.mutex);
+ smcd_dev = list_first_entry_or_null(&smcd_dev_list.list,
+ struct smcd_dev, list);
+ if (smcd_dev)
+ smc_ism_get_system_eid(smcd_dev, &seid);
+ mutex_unlock(&smcd_dev_list.mutex);
+ if (seid && smc_ism_is_v2_capable()) {
+ snprintf(smc_seid, sizeof(smc_seid), "%s", seid);
+ if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
+ goto errattr;
+ }
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ cb_ctx->pos[0] = 1;
+ return skb->len;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return skb->len;
+}
+
+static int smc_nl_fill_lgr(struct smc_link_group *lgr,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ char smc_target[SMC_MAX_PNETID_LEN + 1];
+ struct nlattr *attrs;
+
+ attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
+ if (!attrs)
+ goto errout;
+
+ if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id)))
+ goto errattr;
+ if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
+ goto errattr;
+ snprintf(smc_target, sizeof(smc_target), "%s", lgr->pnet_id);
+ if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ return 0;
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
+ struct smc_link *link,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+ u8 smc_gid_target[41];
+ struct nlattr *attrs;
+ u32 link_uid = 0;
+ void *nlh;
+
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_LINK_SMCR);
+ if (!nlh)
+ goto errmsg;
+
+ attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR);
+ if (!attrs)
+ goto errout;
+
+ if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id))
+ goto errattr;
+ if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state))
+ goto errattr;
+ if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT,
+ atomic_read(&link->conn_cnt)))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport))
+ goto errattr;
+ if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx))
+ goto errattr;
+ snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname);
+ if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname))
+ goto errattr;
+ memcpy(&link_uid, link->link_uid, sizeof(link_uid));
+ if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid))
+ goto errattr;
+ memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid));
+ if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid))
+ goto errattr;
+ memset(smc_gid_target, 0, sizeof(smc_gid_target));
+ smc_gid_be16_convert(smc_gid_target, link->gid);
+ if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target))
+ goto errattr;
+ memset(smc_gid_target, 0, sizeof(smc_gid_target));
+ smc_gid_be16_convert(smc_gid_target, link->peer_gid);
+ if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ return 0;
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_handle_lgr(struct smc_link_group *lgr,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ bool list_links)
+{
+ void *nlh;
+ int i;
+
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_LGR_SMCR);
+ if (!nlh)
+ goto errmsg;
+ if (smc_nl_fill_lgr(lgr, skb, cb))
+ goto errout;
+
+ genlmsg_end(skb, nlh);
+ if (!list_links)
+ goto out;
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (!smc_link_usable(&lgr->lnk[i]))
+ continue;
+ if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
+ goto errout;
+ }
+out:
+ return 0;
+
+errout:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return -EMSGSIZE;
+}
+
+static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ bool list_links)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct smc_link_group *lgr;
+ int snum = cb_ctx->pos[0];
+ int num = 0;
+
+ spin_lock_bh(&smc_lgr->lock);
+ list_for_each_entry(lgr, &smc_lgr->list, list) {
+ if (num < snum)
+ goto next;
+ if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
+ goto errout;
+next:
+ num++;
+ }
+errout:
+ spin_unlock_bh(&smc_lgr->lock);
+ cb_ctx->pos[0] = num;
+}
+
+static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
+ char smc_pnet[SMC_MAX_PNETID_LEN + 1];
+ char smc_eid[SMC_MAX_EID_LEN + 1];
+ struct nlattr *v2_attrs;
+ struct nlattr *attrs;
+ void *nlh;
+
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_LGR_SMCD);
+ if (!nlh)
+ goto errmsg;
+
+ attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD);
+ if (!attrs)
+ goto errout;
+
+ if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID, lgr->smcd->local_gid,
+ SMC_NLA_LGR_D_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid,
+ SMC_NLA_LGR_D_PAD))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id))
+ goto errattr;
+ if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num))
+ goto errattr;
+ if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
+ goto errattr;
+ snprintf(smc_pnet, sizeof(smc_pnet), "%s", lgr->smcd->pnetid);
+ if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
+ goto errattr;
+
+ v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_V2);
+ if (!v2_attrs)
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
+ goto errv2attr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
+ goto errv2attr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
+ goto errv2attr;
+ snprintf(smc_host, sizeof(smc_host), "%s", lgr->peer_hostname);
+ if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
+ goto errv2attr;
+ snprintf(smc_eid, sizeof(smc_eid), "%s", lgr->negotiated_eid);
+ if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
+ goto errv2attr;
+
+ nla_nest_end(skb, v2_attrs);
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ return 0;
+
+errv2attr:
+ nla_nest_cancel(skb, v2_attrs);
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct smc_link_group *lgr;
+ int snum = cb_ctx->pos[1];
+ int rc = 0, num = 0;
+
+ spin_lock_bh(&dev->lgr_lock);
+ list_for_each_entry(lgr, &dev->lgr_list, list) {
+ if (!lgr->is_smcd)
+ continue;
+ if (num < snum)
+ goto next;
+ rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
+ if (rc)
+ goto errout;
+next:
+ num++;
+ }
+errout:
+ spin_unlock_bh(&dev->lgr_lock);
+ cb_ctx->pos[1] = num;
+ return rc;
+}
+
+static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct smcd_dev *smcd_dev;
+ int snum = cb_ctx->pos[0];
+ int rc = 0, num = 0;
+
+ mutex_lock(&dev_list->mutex);
+ list_for_each_entry(smcd_dev, &dev_list->list, list) {
+ if (list_empty(&smcd_dev->lgr_list))
+ continue;
+ if (num < snum)
+ goto next;
+ rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
+ if (rc)
+ goto errout;
+next:
+ num++;
+ }
+errout:
+ mutex_unlock(&dev_list->mutex);
+ cb_ctx->pos[0] = num;
+ return rc;
+}
+
+int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ bool list_links = false;
+
+ smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
+ return skb->len;
+}
+
+int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ bool list_links = true;
+
+ smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
+ return skb->len;
+}
+
+int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
+ return skb->len;
+}
+
void smc_lgr_cleanup_early(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
@@ -300,6 +671,15 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr)
return link_id;
}
+static void smcr_copy_dev_info_to_link(struct smc_link *link)
+{
+ struct smc_ib_device *smcibdev = link->smcibdev;
+
+ snprintf(link->ibname, sizeof(link->ibname), "%s",
+ smcibdev->ibdev->name);
+ link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1];
+}
+
int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
u8 link_idx, struct smc_init_info *ini)
{
@@ -313,7 +693,10 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
lnk->link_idx = link_idx;
lnk->smcibdev = ini->ib_dev;
lnk->ibport = ini->ib_port;
+ smc_ibdev_cnt_inc(lnk);
+ smcr_copy_dev_info_to_link(lnk);
lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
+ atomic_set(&lnk->conn_cnt, 0);
smc_llc_link_set_uid(lnk);
INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
if (!ini->ib_dev->initialized) {
@@ -355,6 +738,7 @@ free_link_mem:
clear_llc_lnk:
smc_llc_link_clear(lnk, false);
out:
+ smc_ibdev_cnt_dec(lnk);
put_device(&ini->ib_dev->ibdev->dev);
memset(lnk, 0, sizeof(struct smc_link));
lnk->state = SMC_LNK_UNUSED;
@@ -526,6 +910,14 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
return rc;
}
+static void smc_switch_link_and_count(struct smc_connection *conn,
+ struct smc_link *to_lnk)
+{
+ atomic_dec(&conn->lnk->conn_cnt);
+ conn->lnk = to_lnk;
+ atomic_inc(&conn->lnk->conn_cnt);
+}
+
struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
struct smc_link *from_lnk, bool is_dev_err)
{
@@ -574,7 +966,7 @@ again:
smc->sk.sk_state == SMC_PEERABORTWAIT ||
smc->sk.sk_state == SMC_PROCESSABORT) {
spin_lock_bh(&conn->send_lock);
- conn->lnk = to_lnk;
+ smc_switch_link_and_count(conn, to_lnk);
spin_unlock_bh(&conn->send_lock);
continue;
}
@@ -588,7 +980,7 @@ again:
}
/* avoid race with smcr_tx_sndbuf_nonempty() */
spin_lock_bh(&conn->send_lock);
- conn->lnk = to_lnk;
+ smc_switch_link_and_count(conn, to_lnk);
rc = smc_switch_cursor(smc, pend, wr_buf);
spin_unlock_bh(&conn->send_lock);
sock_put(&smc->sk);
@@ -737,6 +1129,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
smc_ib_destroy_queue_pair(lnk);
smc_ib_dealloc_protection_domain(lnk);
smc_wr_free_link_mem(lnk);
+ smc_ibdev_cnt_dec(lnk);
put_device(&lnk->smcibdev->ibdev->dev);
smcibdev = lnk->smcibdev;
memset(lnk, 0, sizeof(struct smc_link));
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index f1e867ce2e63..e8e448771f85 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -13,7 +13,10 @@
#define _SMC_CORE_H
#include <linux/atomic.h>
+#include <linux/smc.h>
+#include <linux/pci.h>
#include <rdma/ib_verbs.h>
+#include <net/genetlink.h>
#include "smc.h"
#include "smc_ib.h"
@@ -124,11 +127,14 @@ struct smc_link {
u8 link_is_asym; /* is link asymmetric? */
struct smc_link_group *lgr; /* parent link group */
struct work_struct link_down_wrk; /* wrk to bring link down */
+ char ibname[IB_DEVICE_NAME_MAX]; /* ib device name */
+ int ndev_ifidx; /* network device ifindex */
enum smc_link_state state; /* state of link */
struct delayed_work llc_testlink_wrk; /* testlink worker */
struct completion llc_testlink_resp; /* wait for rx of testlink */
int llc_testlink_time; /* testlink interval */
+ atomic_t conn_cnt; /* connections on this link */
};
/* For now we just allow one parallel link per link group. The SMC protocol
@@ -301,6 +307,7 @@ struct smc_init_info {
u8 first_contact_peer;
u8 first_contact_local;
unsigned short vlan_id;
+ u32 rc;
/* SMC-R */
struct smc_clc_msg_local *ib_lcl;
struct smc_ib_device *ib_dev;
@@ -362,6 +369,45 @@ static inline bool smc_link_active(struct smc_link *lnk)
return lnk->state == SMC_LNK_ACTIVE;
}
+static inline void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
+{
+ sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
+ be16_to_cpu(((__be16 *)gid_raw)[0]),
+ be16_to_cpu(((__be16 *)gid_raw)[1]),
+ be16_to_cpu(((__be16 *)gid_raw)[2]),
+ be16_to_cpu(((__be16 *)gid_raw)[3]),
+ be16_to_cpu(((__be16 *)gid_raw)[4]),
+ be16_to_cpu(((__be16 *)gid_raw)[5]),
+ be16_to_cpu(((__be16 *)gid_raw)[6]),
+ be16_to_cpu(((__be16 *)gid_raw)[7]));
+}
+
+struct smc_pci_dev {
+ __u32 pci_fid;
+ __u16 pci_pchid;
+ __u16 pci_vendor;
+ __u16 pci_device;
+ __u8 pci_id[SMC_PCI_ID_STR_LEN];
+};
+
+static inline void smc_set_pci_values(struct pci_dev *pci_dev,
+ struct smc_pci_dev *smc_dev)
+{
+ smc_dev->pci_vendor = pci_dev->vendor;
+ smc_dev->pci_device = pci_dev->device;
+ snprintf(smc_dev->pci_id, sizeof(smc_dev->pci_id), "%s",
+ pci_name(pci_dev));
+#if IS_ENABLED(CONFIG_S390)
+ { /* Set s390 specific PCI information */
+ struct zpci_dev *zdev;
+
+ zdev = to_zpci(pci_dev);
+ smc_dev->pci_fid = zdev->fid;
+ smc_dev->pci_pchid = zdev->pchid;
+ }
+#endif
+}
+
struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
@@ -409,6 +455,10 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
struct smc_link *from_lnk, bool is_dev_err);
void smcr_link_down_cond(struct smc_link *lnk);
void smcr_link_down_cond_sched(struct smc_link *lnk);
+int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb);
+int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
+int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb);
+int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
{
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index f15fca59b4b2..c952986a6aca 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -31,19 +31,6 @@ static struct smc_diag_dump_ctx *smc_dump_context(struct netlink_callback *cb)
return (struct smc_diag_dump_ctx *)cb->ctx;
}
-static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
-{
- sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
- be16_to_cpu(((__be16 *)gid_raw)[0]),
- be16_to_cpu(((__be16 *)gid_raw)[1]),
- be16_to_cpu(((__be16 *)gid_raw)[2]),
- be16_to_cpu(((__be16 *)gid_raw)[3]),
- be16_to_cpu(((__be16 *)gid_raw)[4]),
- be16_to_cpu(((__be16 *)gid_raw)[5]),
- be16_to_cpu(((__be16 *)gid_raw)[6]),
- be16_to_cpu(((__be16 *)gid_raw)[7]));
-}
-
static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
@@ -160,17 +147,17 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
!list_empty(&smc->conn.lgr->list)) {
struct smc_diag_lgrinfo linfo = {
.role = smc->conn.lgr->role,
- .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
- .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
+ .lnk[0].ibport = smc->conn.lnk->ibport,
+ .lnk[0].link_id = smc->conn.lnk->link_id,
};
memcpy(linfo.lnk[0].ibname,
smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
- sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
+ sizeof(smc->conn.lnk->smcibdev->ibdev->name));
smc_gid_be16_convert(linfo.lnk[0].gid,
- smc->conn.lgr->lnk[0].gid);
+ smc->conn.lnk->gid);
smc_gid_be16_convert(linfo.lnk[0].peer_gid,
- smc->conn.lgr->lnk[0].peer_gid);
+ smc->conn.lnk->peer_gid);
if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
goto errout;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index fc766b537ac7..89ea10675a7d 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -25,6 +25,7 @@
#include "smc_core.h"
#include "smc_wr.h"
#include "smc.h"
+#include "smc_netlink.h"
#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
@@ -326,6 +327,161 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
return rc;
}
+static bool smcr_diag_is_dev_critical(struct smc_lgr_list *smc_lgr,
+ struct smc_ib_device *smcibdev)
+{
+ struct smc_link_group *lgr;
+ bool rc = false;
+ int i;
+
+ spin_lock_bh(&smc_lgr->lock);
+ list_for_each_entry(lgr, &smc_lgr->list, list) {
+ if (lgr->is_smcd)
+ continue;
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
+ lgr->lnk[i].smcibdev != smcibdev)
+ continue;
+ if (lgr->type == SMC_LGR_SINGLE ||
+ lgr->type == SMC_LGR_ASYMMETRIC_LOCAL) {
+ rc = true;
+ goto out;
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&smc_lgr->lock);
+ return rc;
+}
+
+static int smc_nl_handle_dev_port(struct sk_buff *skb,
+ struct ib_device *ibdev,
+ struct smc_ib_device *smcibdev,
+ int port)
+{
+ char smc_pnet[SMC_MAX_PNETID_LEN + 1];
+ struct nlattr *port_attrs;
+ unsigned char port_state;
+ int lnk_count = 0;
+
+ port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT + port);
+ if (!port_attrs)
+ goto errout;
+
+ if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR,
+ smcibdev->pnetid_by_user[port]))
+ goto errattr;
+ snprintf(smc_pnet, sizeof(smc_pnet), "%s",
+ (char *)&smcibdev->pnetid[port]);
+ if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
+ goto errattr;
+ if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV,
+ smcibdev->ndev_ifidx[port]))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_DEV_PORT_VALID, 1))
+ goto errattr;
+ port_state = smc_ib_port_active(smcibdev, port + 1);
+ if (nla_put_u8(skb, SMC_NLA_DEV_PORT_STATE, port_state))
+ goto errattr;
+ lnk_count = atomic_read(&smcibdev->lnk_cnt_by_port[port]);
+ if (nla_put_u32(skb, SMC_NLA_DEV_PORT_LNK_CNT, lnk_count))
+ goto errattr;
+ nla_nest_end(skb, port_attrs);
+ return 0;
+errattr:
+ nla_nest_cancel(skb, port_attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+ struct smc_pci_dev smc_pci_dev;
+ struct pci_dev *pci_dev;
+ unsigned char is_crit;
+ struct nlattr *attrs;
+ void *nlh;
+ int i;
+
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_DEV_SMCR);
+ if (!nlh)
+ goto errmsg;
+ attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCR);
+ if (!attrs)
+ goto errout;
+ is_crit = smcr_diag_is_dev_critical(&smc_lgr_list, smcibdev);
+ if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, is_crit))
+ goto errattr;
+ memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
+ pci_dev = to_pci_dev(smcibdev->ibdev->dev.parent);
+ smc_set_pci_values(pci_dev, &smc_pci_dev);
+ if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev.pci_vendor))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev.pci_device))
+ goto errattr;
+ if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev.pci_id))
+ goto errattr;
+ snprintf(smc_ibname, sizeof(smc_ibname), "%s", smcibdev->ibdev->name);
+ if (nla_put_string(skb, SMC_NLA_DEV_IB_NAME, smc_ibname))
+ goto errattr;
+ for (i = 1; i <= SMC_MAX_PORTS; i++) {
+ if (!rdma_is_port_valid(smcibdev->ibdev, i))
+ continue;
+ if (smc_nl_handle_dev_port(skb, smcibdev->ibdev,
+ smcibdev, i - 1))
+ goto errattr;
+ }
+
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ return 0;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return -EMSGSIZE;
+}
+
+static void smc_nl_prep_smcr_dev(struct smc_ib_devices *dev_list,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct smc_ib_device *smcibdev;
+ int snum = cb_ctx->pos[0];
+ int num = 0;
+
+ mutex_lock(&dev_list->mutex);
+ list_for_each_entry(smcibdev, &dev_list->list, list) {
+ if (num < snum)
+ goto next;
+ if (smc_nl_handle_smcr_dev(smcibdev, skb, cb))
+ goto errout;
+next:
+ num++;
+ }
+errout:
+ mutex_unlock(&dev_list->mutex);
+ cb_ctx->pos[0] = num;
+}
+
+int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ smc_nl_prep_smcr_dev(&smc_ib_devices, skb, cb);
+ return skb->len;
+}
+
static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
{
struct smc_link *lnk = (struct smc_link *)priv;
@@ -557,6 +713,49 @@ out:
static struct ib_client smc_ib_client;
+static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port)
+{
+ struct ib_device *ibdev = smcibdev->ibdev;
+ struct net_device *ndev;
+
+ if (!ibdev->ops.get_netdev)
+ return;
+ ndev = ibdev->ops.get_netdev(ibdev, port + 1);
+ if (ndev) {
+ smcibdev->ndev_ifidx[port] = ndev->ifindex;
+ dev_put(ndev);
+ }
+}
+
+void smc_ib_ndev_change(struct net_device *ndev, unsigned long event)
+{
+ struct smc_ib_device *smcibdev;
+ struct ib_device *libdev;
+ struct net_device *lndev;
+ u8 port_cnt;
+ int i;
+
+ mutex_lock(&smc_ib_devices.mutex);
+ list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
+ port_cnt = smcibdev->ibdev->phys_port_cnt;
+ for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) {
+ libdev = smcibdev->ibdev;
+ if (!libdev->ops.get_netdev)
+ continue;
+ lndev = libdev->ops.get_netdev(libdev, i + 1);
+ if (lndev)
+ dev_put(lndev);
+ if (lndev != ndev)
+ continue;
+ if (event == NETDEV_REGISTER)
+ smcibdev->ndev_ifidx[i] = ndev->ifindex;
+ if (event == NETDEV_UNREGISTER)
+ smcibdev->ndev_ifidx[i] = 0;
+ }
+ }
+ mutex_unlock(&smc_ib_devices.mutex);
+}
+
/* callback function for ib_register_client() */
static int smc_ib_add_dev(struct ib_device *ibdev)
{
@@ -596,6 +795,7 @@ static int smc_ib_add_dev(struct ib_device *ibdev)
if (smc_pnetid_by_dev_port(ibdev->dev.parent, i,
smcibdev->pnetid[i]))
smc_pnetid_by_table_ib(smcibdev, i + 1);
+ smc_copy_netdev_ifindex(smcibdev, i);
pr_warn_ratelimited("smc: ib device %s port %d has pnetid "
"%.16s%s\n",
smcibdev->ibdev->name, i + 1,
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 2ce481187dd0..3085f5180da7 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -30,6 +30,7 @@ struct smc_ib_devices { /* list of smc ib devices definition */
};
extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */
+extern struct smc_lgr_list smc_lgr_list; /* list of linkgroups */
struct smc_ib_device { /* ib-device infos for smc */
struct list_head list;
@@ -53,11 +54,15 @@ struct smc_ib_device { /* ib-device infos for smc */
atomic_t lnk_cnt; /* number of links on ibdev */
wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/
struct mutex mutex; /* protect dev setup+cleanup */
+ atomic_t lnk_cnt_by_port[SMC_MAX_PORTS];
+ /* number of links per port */
+ int ndev_ifidx[SMC_MAX_PORTS]; /* ndev if indexes */
};
struct smc_buf_desc;
struct smc_link;
+void smc_ib_ndev_change(struct net_device *ndev, unsigned long event);
int smc_ib_register_client(void) __init;
void smc_ib_unregister_client(void);
bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
@@ -87,4 +92,5 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
unsigned short vlan_id, u8 gid[], u8 *sgid_index);
bool smc_ib_is_valid_local_systemid(void);
+int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
#endif
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 6abbdd09a580..524ef64a191a 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -15,13 +15,14 @@
#include "smc_core.h"
#include "smc_ism.h"
#include "smc_pnet.h"
+#include "smc_netlink.h"
struct smcd_dev_list smcd_dev_list = {
.list = LIST_HEAD_INIT(smcd_dev_list.list),
.mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
};
-bool smc_ism_v2_capable;
+static bool smc_ism_v2_capable;
/* Test if an ISM communication is possible - same CPC */
int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
@@ -51,6 +52,12 @@ u16 smc_ism_get_chid(struct smcd_dev *smcd)
return smcd->ops->get_chid(smcd);
}
+/* HW supports ISM V2 and thus System EID is defined */
+bool smc_ism_is_v2_capable(void)
+{
+ return smc_ism_v2_capable;
+}
+
/* Set a connection using this DMBE. */
void smc_ism_set_conn(struct smc_connection *conn)
{
@@ -201,6 +208,96 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
return rc;
}
+static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ char smc_pnet[SMC_MAX_PNETID_LEN + 1];
+ struct smc_pci_dev smc_pci_dev;
+ struct nlattr *port_attrs;
+ struct nlattr *attrs;
+ int use_cnt = 0;
+ void *nlh;
+
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_DEV_SMCD);
+ if (!nlh)
+ goto errmsg;
+ attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCD);
+ if (!attrs)
+ goto errout;
+ use_cnt = atomic_read(&smcd->lgr_cnt);
+ if (nla_put_u32(skb, SMC_NLA_DEV_USE_CNT, use_cnt))
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0))
+ goto errattr;
+ memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
+ smc_set_pci_values(to_pci_dev(smcd->dev.parent), &smc_pci_dev);
+ if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev.pci_vendor))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev.pci_device))
+ goto errattr;
+ if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev.pci_id))
+ goto errattr;
+
+ port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT);
+ if (!port_attrs)
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user))
+ goto errportattr;
+ snprintf(smc_pnet, sizeof(smc_pnet), "%s", smcd->pnetid);
+ if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
+ goto errportattr;
+
+ nla_nest_end(skb, port_attrs);
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ return 0;
+
+errportattr:
+ nla_nest_cancel(skb, port_attrs);
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ nlmsg_cancel(skb, nlh);
+errmsg:
+ return -EMSGSIZE;
+}
+
+static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ int snum = cb_ctx->pos[0];
+ struct smcd_dev *smcd;
+ int num = 0;
+
+ mutex_lock(&dev_list->mutex);
+ list_for_each_entry(smcd, &dev_list->list, list) {
+ if (num < snum)
+ goto next;
+ if (smc_nl_handle_smcd_dev(smcd, skb, cb))
+ goto errout;
+next:
+ num++;
+ }
+errout:
+ mutex_unlock(&dev_list->mutex);
+ cb_ctx->pos[0] = num;
+}
+
+int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ smc_nl_prep_smcd_dev(&smcd_dev_list, skb, cb);
+ return skb->len;
+}
+
struct smc_ism_event_work {
struct work_struct work;
struct smcd_dev *smcd;
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index 8048e09ddcf8..113efc7352ed 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -10,6 +10,7 @@
#define SMCD_ISM_H
#include <linux/uio.h>
+#include <linux/types.h>
#include <linux/mutex.h>
#include "smc.h"
@@ -20,9 +21,6 @@ struct smcd_dev_list { /* List of SMCD devices */
};
extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */
-extern bool smc_ism_v2_capable; /* HW supports ISM V2 and thus
- * System EID is defined
- */
struct smc_ism_vlanid { /* VLAN id set on ISM device */
struct list_head list;
@@ -52,5 +50,7 @@ int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
int smc_ism_signal_shutdown(struct smc_link_group *lgr);
void smc_ism_get_system_eid(struct smcd_dev *dev, u8 **eid);
u16 smc_ism_get_chid(struct smcd_dev *dev);
+bool smc_ism_is_v2_capable(void);
void smc_ism_init(void);
+int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
#endif
diff --git a/net/smc/smc_netlink.c b/net/smc/smc_netlink.c
new file mode 100644
index 000000000000..140419a19dbf
--- /dev/null
+++ b/net/smc/smc_netlink.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Generic netlink support functions to interact with SMC module
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Guvenc Gulce <guvenc@linux.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/mutex.h>
+#include <linux/if.h>
+#include <linux/smc.h>
+
+#include "smc_core.h"
+#include "smc_ism.h"
+#include "smc_ib.h"
+#include "smc_netlink.h"
+
+#define SMC_CMD_MAX_ATTR 1
+
+/* SMC_GENL generic netlink operation definition */
+static const struct genl_ops smc_gen_nl_ops[] = {
+ {
+ .cmd = SMC_NETLINK_GET_SYS_INFO,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smc_nl_get_sys_info,
+ },
+ {
+ .cmd = SMC_NETLINK_GET_LGR_SMCR,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smcr_nl_get_lgr,
+ },
+ {
+ .cmd = SMC_NETLINK_GET_LINK_SMCR,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smcr_nl_get_link,
+ },
+ {
+ .cmd = SMC_NETLINK_GET_LGR_SMCD,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smcd_nl_get_lgr,
+ },
+ {
+ .cmd = SMC_NETLINK_GET_DEV_SMCD,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smcd_nl_get_device,
+ },
+ {
+ .cmd = SMC_NETLINK_GET_DEV_SMCR,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smcr_nl_get_device,
+ },
+};
+
+static const struct nla_policy smc_gen_nl_policy[2] = {
+ [SMC_CMD_MAX_ATTR] = { .type = NLA_REJECT, },
+};
+
+/* SMC_GENL family definition */
+struct genl_family smc_gen_nl_family __ro_after_init = {
+ .hdrsize = 0,
+ .name = SMC_GENL_FAMILY_NAME,
+ .version = SMC_GENL_FAMILY_VERSION,
+ .maxattr = SMC_CMD_MAX_ATTR,
+ .policy = smc_gen_nl_policy,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = smc_gen_nl_ops,
+ .n_ops = ARRAY_SIZE(smc_gen_nl_ops)
+};
+
+int __init smc_nl_init(void)
+{
+ return genl_register_family(&smc_gen_nl_family);
+}
+
+void smc_nl_exit(void)
+{
+ genl_unregister_family(&smc_gen_nl_family);
+}
diff --git a/net/smc/smc_netlink.h b/net/smc/smc_netlink.h
new file mode 100644
index 000000000000..3477265cba6c
--- /dev/null
+++ b/net/smc/smc_netlink.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * SMC Generic netlink operations
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Guvenc Gulce <guvenc@linux.ibm.com>
+ */
+
+#ifndef _SMC_NETLINK_H
+#define _SMC_NETLINK_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+extern struct genl_family smc_gen_nl_family;
+
+struct smc_nl_dmp_ctx {
+ int pos[2];
+};
+
+static inline struct smc_nl_dmp_ctx *smc_nl_dmp_ctx(struct netlink_callback *c)
+{
+ return (struct smc_nl_dmp_ctx *)c->ctx;
+}
+
+int smc_nl_init(void) __init;
+void smc_nl_exit(void);
+
+#endif
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index f3c18b991d35..6f6d33edb135 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -827,9 +827,11 @@ static int smc_pnet_netdev_event(struct notifier_block *this,
case NETDEV_REBOOT:
case NETDEV_UNREGISTER:
smc_pnet_remove_by_ndev(event_dev);
+ smc_ib_ndev_change(event_dev, event);
return NOTIFY_OK;
case NETDEV_REGISTER:
smc_pnet_add_by_ndev(event_dev);
+ smc_ib_ndev_change(event_dev, event);
return NOTIFY_OK;
case NETDEV_UP:
smc_pnet_add_base_pnetid(net, event_dev, ndev_pnetid);
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 1e23cdd41eb1..cbc73a7e4d59 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -131,9 +131,9 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
wake_up(&link->wr_tx_wait);
}
-static void smc_wr_tx_tasklet_fn(unsigned long data)
+static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
{
- struct smc_ib_device *dev = (struct smc_ib_device *)data;
+ struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
int i = 0, rc;
int polled = 0;
@@ -435,9 +435,9 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
}
}
-static void smc_wr_rx_tasklet_fn(unsigned long data)
+static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
{
- struct smc_ib_device *dev = (struct smc_ib_device *)data;
+ struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
int polled = 0;
int rc;
@@ -698,10 +698,8 @@ void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
void smc_wr_add_dev(struct smc_ib_device *smcibdev)
{
- tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn,
- (unsigned long)smcibdev);
- tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn,
- (unsigned long)smcibdev);
+ tasklet_setup(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn);
+ tasklet_setup(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn);
}
int smc_wr_create_link(struct smc_link *lnk)
diff --git a/net/socket.c b/net/socket.c
index 6e6cccc2104f..9a240b45bdf3 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -52,6 +52,7 @@
* Based upon Swansea University Computer Society NET3.039
*/
+#include <linux/ethtool.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/file.h>
@@ -64,7 +65,6 @@
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/if_bridge.h>
-#include <linux/if_frad.h>
#include <linux/if_vlan.h>
#include <linux/ptp_classify.h>
#include <linux/init.h>
@@ -445,17 +445,15 @@ static int sock_map_fd(struct socket *sock, int flags)
/**
* sock_from_file - Return the &socket bounded to @file.
* @file: file
- * @err: pointer to an error code return
*
- * On failure returns %NULL and assigns -ENOTSOCK to @err.
+ * On failure returns %NULL.
*/
-struct socket *sock_from_file(struct file *file, int *err)
+struct socket *sock_from_file(struct file *file)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */
- *err = -ENOTSOCK;
return NULL;
}
EXPORT_SYMBOL(sock_from_file);
@@ -484,9 +482,11 @@ struct socket *sockfd_lookup(int fd, int *err)
return NULL;
}
- sock = sock_from_file(file, err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ *err = -ENOTSOCK;
fput(file);
+ }
return sock;
}
EXPORT_SYMBOL(sockfd_lookup);
@@ -498,11 +498,12 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
*err = -EBADF;
if (f.file) {
- sock = sock_from_file(f.file, err);
+ sock = sock_from_file(f.file);
if (likely(sock)) {
*fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
+ *err = -ENOTSOCK;
fdput(f);
}
return NULL;
@@ -1027,17 +1028,6 @@ void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
}
EXPORT_SYMBOL(vlan_ioctl_set);
-static DEFINE_MUTEX(dlci_ioctl_mutex);
-static int (*dlci_ioctl_hook) (unsigned int, void __user *);
-
-void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
-{
- mutex_lock(&dlci_ioctl_mutex);
- dlci_ioctl_hook = hook;
- mutex_unlock(&dlci_ioctl_mutex);
-}
-EXPORT_SYMBOL(dlci_ioctl_set);
-
static long sock_do_ioctl(struct net *net, struct socket *sock,
unsigned int cmd, unsigned long arg)
{
@@ -1156,17 +1146,6 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
err = vlan_ioctl_hook(net, argp);
mutex_unlock(&vlan_ioctl_mutex);
break;
- case SIOCADDDLCI:
- case SIOCDELDLCI:
- err = -ENOPKG;
- if (!dlci_ioctl_hook)
- request_module("dlci");
-
- mutex_lock(&dlci_ioctl_mutex);
- if (dlci_ioctl_hook)
- err = dlci_ioctl_hook(cmd, argp);
- mutex_unlock(&dlci_ioctl_mutex);
- break;
case SIOCGSKNS:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -1715,9 +1694,11 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
- sock = sock_from_file(file, &err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ err = -ENOTSOCK;
goto out;
+ }
err = -ENFILE;
newsock = sock_alloc();
@@ -1840,9 +1821,11 @@ int __sys_connect_file(struct file *file, struct sockaddr_storage *address,
struct socket *sock;
int err;
- sock = sock_from_file(file, &err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ err = -ENOTSOCK;
goto out;
+ }
err =
security_socket_connect(sock, (struct sockaddr *)address, addrlen);
@@ -3427,8 +3410,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCBRDELBR:
case SIOCGIFVLAN:
case SIOCSIFVLAN:
- case SIOCADDDLCI:
- case SIOCDELDLCI:
case SIOCGSKNS:
case SIOCGSTAMP_NEW:
case SIOCGSTAMPNS_NEW:
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index eadc0ede928c..8241f5a4a01c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -781,7 +781,8 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
}
/**
- * rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication
+ * rpc_mkpipe_dentry - make an rpc_pipefs file for kernel<->userspace
+ * communication
* @parent: dentry of directory to create new "pipe" in
* @name: name of pipe
* @private: private data to associate with the pipe, for the caller's use
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 0f1eaed1bd1b..abe29d1aa23a 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -55,12 +55,11 @@ bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr)
void tipc_set_node_id(struct net *net, u8 *id)
{
struct tipc_net *tn = tipc_net(net);
- u32 *tmp = (u32 *)id;
memcpy(tn->node_id, id, NODE_ID_LEN);
tipc_nodeid2string(tn->node_id_string, id);
- tn->trial_addr = tmp[0] ^ tmp[1] ^ tmp[2] ^ tmp[3];
- pr_info("Own node identity %s, cluster identity %u\n",
+ tn->trial_addr = hash128to32(id);
+ pr_info("Node identity %s, cluster identity %u\n",
tipc_own_id_string(net), tn->net_id);
}
@@ -76,7 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
}
tn->trial_addr = addr;
tn->addr_trial_end = jiffies;
- pr_info("32-bit node address hash set to %x\n", addr);
+ pr_info("Node number set to %u\n", addr);
}
char *tipc_nodeid2string(char *str, u8 *id)
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 31bee0ea7b3e..1a11831bef62 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -3,6 +3,7 @@
*
* Copyright (c) 2000-2006, 2018, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 650414110452..a4389ef08a98 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -72,6 +72,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
/**
* tipc_media_find - locates specified media object by name
+ * @name: name to locate
*/
struct tipc_media *tipc_media_find(const char *name)
{
@@ -86,6 +87,7 @@ struct tipc_media *tipc_media_find(const char *name)
/**
* media_find_id - locates specified media object by type identifier
+ * @type: type identifier to locate
*/
static struct tipc_media *media_find_id(u8 type)
{
@@ -100,6 +102,9 @@ static struct tipc_media *media_find_id(u8 type)
/**
* tipc_media_addr_printf - record media address in print buffer
+ * @buf: output buffer
+ * @len: output buffer size remaining
+ * @a: input media address
*/
int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
{
@@ -127,7 +132,7 @@ int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
* @name: ptr to bearer name string
* @name_parts: ptr to area for bearer name components (or NULL if not needed)
*
- * Returns 1 if bearer name is valid, otherwise 0.
+ * Return: 1 if bearer name is valid, otherwise 0.
*/
static int bearer_name_validate(const char *name,
struct tipc_bearer_names *name_parts)
@@ -139,10 +144,7 @@ static int bearer_name_validate(const char *name,
u32 if_len;
/* copy bearer name & ensure length is OK */
- name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
- /* need above in case non-Posix strncpy() doesn't pad with nulls */
- strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
- if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
+ if (strscpy(name_copy, name, TIPC_MAX_BEARER_NAME) < 0)
return 0;
/* ensure all component parts of bearer name are present */
@@ -169,6 +171,8 @@ static int bearer_name_validate(const char *name,
/**
* tipc_bearer_find - locates bearer object with matching bearer name
+ * @net: the applicable net namespace
+ * @name: bearer name to locate
*/
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
{
@@ -231,6 +235,11 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
/**
* tipc_enable_bearer - enable bearer with the given name
+ * @net: the applicable net namespace
+ * @name: bearer name to enable
+ * @disc_domain: bearer domain
+ * @prio: bearer priority
+ * @attr: nlattr array
*/
static int tipc_enable_bearer(struct net *net, const char *name,
u32 disc_domain, u32 prio,
@@ -345,6 +354,8 @@ rejected:
/**
* tipc_reset_bearer - Reset all links established over this bearer
+ * @net: the applicable net namespace
+ * @b: the target bearer
*/
static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
{
@@ -366,7 +377,9 @@ void tipc_bearer_put(struct tipc_bearer *b)
}
/**
- * bearer_disable
+ * bearer_disable - disable this bearer
+ * @net: the applicable net namespace
+ * @b: the bearer to disable
*
* Note: This routine assumes caller holds RTNL lock.
*/
@@ -437,6 +450,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
}
/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
+ * @b: the target bearer
*
* Mark L2 bearer as inactive so that incoming buffers are thrown away
*/
@@ -453,6 +467,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
/**
* tipc_l2_send_msg - send a TIPC packet out over an L2 interface
+ * @net: the associated network namespace
* @skb: the packet to be sent
* @b: the bearer through which the packet is to be sent
* @dest: peer destination address
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index bc0023119da2..6bf4550aa1ac 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -93,7 +93,8 @@ struct tipc_bearer;
* @raw2addr: convert from raw addr format to media addr format
* @priority: default link (and bearer) priority
* @tolerance: default time (in ms) before declaring link failure
- * @window: default window (in packets) before declaring link congestion
+ * @min_win: minimum window (in packets) before declaring link congestion
+ * @max_win: maximum window (in packets) before declaring link congestion
* @mtu: max packet size bearer can support for media type not dependent on
* underlying device MTU
* @type_id: TIPC media identifier
@@ -138,12 +139,15 @@ struct tipc_media {
* @pt: packet type for bearer
* @rcu: rcu struct for tipc_bearer
* @priority: default link priority for bearer
- * @window: default window size for bearer
+ * @min_win: minimum window (in packets) before declaring link congestion
+ * @max_win: maximum window (in packets) before declaring link congestion
* @tolerance: default link tolerance for bearer
* @domain: network domain to which links can be established
* @identity: array index of this bearer within TIPC bearer array
- * @link_req: ptr to (optional) structure making periodic link setup requests
+ * @disc: ptr to link setup request
* @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @up: bearer up flag (bit 0)
+ * @refcnt: tipc_bearer reference counter
*
* Note: media-specific code is responsible for initialization of the fields
* indicated below when a bearer is enabled; TIPC's generic bearer code takes
diff --git a/net/tipc/core.c b/net/tipc/core.c
index c2ff42900b53..5cc1f0307215 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -81,8 +81,6 @@ static int __net_init tipc_init_net(struct net *net)
if (err)
goto out_nametbl;
- INIT_LIST_HEAD(&tn->dist_queue);
-
err = tipc_bcast_init(net);
if (err)
goto out_bclink;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1d57a4d3b05e..03de7b213f55 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -3,6 +3,7 @@
*
* Copyright (c) 2005-2006, 2013-2018 Ericsson AB
* Copyright (c) 2005-2007, 2010-2013, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,9 +133,6 @@ struct tipc_net {
spinlock_t nametbl_lock;
struct name_table *nametbl;
- /* Name dist queue */
- struct list_head dist_queue;
-
/* Topology subscription server */
struct tipc_topsrv *topsrv;
atomic_t subscription_count;
@@ -213,6 +211,17 @@ static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
}
+static inline u32 hash128to32(char *bytes)
+{
+ __be32 *tmp = (__be32 *)bytes;
+ u32 res;
+
+ res = ntohl(tmp[0] ^ tmp[1] ^ tmp[2] ^ tmp[3]);
+ if (likely(res))
+ return res;
+ return ntohl(tmp[0] | tmp[1] | tmp[2] | tmp[3]);
+}
+
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 740ab9ae41a6..f4fca8f7f63f 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption
*
* Copyright (c) 2019, Ericsson AB
@@ -51,7 +51,7 @@
#define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */
-/**
+/*
* TIPC Key ids
*/
enum {
@@ -63,7 +63,7 @@ enum {
KEY_MAX = KEY_3,
};
-/**
+/*
* TIPC Crypto statistics
*/
enum {
@@ -90,7 +90,7 @@ int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
/* Key exchange switch, default: on */
int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
-/**
+/*
* struct tipc_key - TIPC keys' status indicator
*
* 7 6 5 4 3 2 1 0
@@ -123,6 +123,8 @@ struct tipc_key {
/**
* struct tipc_tfm - TIPC TFM structure to form a list of TFMs
+ * @tfm: cipher handle/key
+ * @list: linked list of TFMs
*/
struct tipc_tfm {
struct crypto_aead *tfm;
@@ -138,7 +140,7 @@ struct tipc_tfm {
* @salt: the key's SALT value
* @authsize: authentication tag size (max = 16)
* @mode: crypto mode is applied to the key
- * @hint[]: a hint for user key
+ * @hint: a hint for user key
* @rcu: struct rcu_head
* @key: the aead key
* @gen: the key's generation
@@ -166,6 +168,7 @@ struct tipc_aead {
/**
* struct tipc_crypto_stats - TIPC Crypto statistics
+ * @stat: array of crypto statistics
*/
struct tipc_crypto_stats {
unsigned int stat[MAX_STATS];
@@ -194,6 +197,7 @@ struct tipc_crypto_stats {
* @key_master: flag indicates if master key exists
* @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.)
* @nokey: no key indication
+ * @flags: combined flags field
* @lock: tipc_key lock
*/
struct tipc_crypto {
@@ -324,6 +328,8 @@ do { \
/**
* tipc_aead_key_validate - Validate a AEAD user key
+ * @ukey: pointer to user key data
+ * @info: netlink info pointer
*/
int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
{
@@ -477,6 +483,7 @@ static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
/**
* tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
+ * @aead: the AEAD key pointer
*/
static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
{
@@ -714,9 +721,9 @@ static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
* @__dnode: TIPC dest node if "known"
*
* Return:
- * 0 : if the encryption has completed
- * -EINPROGRESS/-EBUSY : if a callback will be performed
- * < 0 : the encryption has failed
+ * * 0 : if the encryption has completed
+ * * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * * < 0 : the encryption has failed
*/
static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
struct tipc_bearer *b,
@@ -870,9 +877,9 @@ static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
* @b: TIPC bearer where the message has been received
*
* Return:
- * 0 : if the decryption has completed
- * -EINPROGRESS/-EBUSY : if a callback will be performed
- * < 0 : the decryption has failed
+ * * 0 : if the decryption has completed
+ * * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * * < 0 : the decryption has failed
*/
static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
struct sk_buff *skb, struct tipc_bearer *b)
@@ -1001,7 +1008,7 @@ static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
* tipc_ehdr_validate - Validate an encryption message
* @skb: the message buffer
*
- * Returns "true" if this is a valid encryption message, otherwise "false"
+ * Return: "true" if this is a valid encryption message, otherwise "false"
*/
bool tipc_ehdr_validate(struct sk_buff *skb)
{
@@ -1674,12 +1681,12 @@ static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
* Otherwise, the skb is freed!
*
* Return:
- * 0 : the encryption has succeeded (or no encryption)
- * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
- * -ENOKEK : the encryption has failed due to no key
- * -EKEYREVOKED : the encryption has failed due to key revoked
- * -ENOMEM : the encryption has failed due to no memory
- * < 0 : the encryption has failed due to other reasons
+ * * 0 : the encryption has succeeded (or no encryption)
+ * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
+ * * -ENOKEK : the encryption has failed due to no key
+ * * -EKEYREVOKED : the encryption has failed due to key revoked
+ * * -ENOMEM : the encryption has failed due to no memory
+ * * < 0 : the encryption has failed due to other reasons
*/
int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
struct tipc_bearer *b, struct tipc_media_addr *dst,
@@ -1799,12 +1806,12 @@ exit:
* cluster key(s) can be taken for decryption (- recursive).
*
* Return:
- * 0 : the decryption has successfully completed
- * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
- * -ENOKEY : the decryption has failed due to no key
- * -EBADMSG : the decryption has failed due to bad message
- * -ENOMEM : the decryption has failed due to no memory
- * < 0 : the decryption has failed due to other reasons
+ * * 0 : the decryption has successfully completed
+ * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
+ * * -ENOKEY : the decryption has failed due to no key
+ * * -EBADMSG : the decryption has failed due to bad message
+ * * -ENOMEM : the decryption has failed due to no memory
+ * * < 0 : the decryption has failed due to other reasons
*/
int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
struct sk_buff **skb, struct tipc_bearer *b)
diff --git a/net/tipc/crypto.h b/net/tipc/crypto.h
index e71193bd5e36..ce7d4cc8a9e0 100644
--- a/net/tipc/crypto.h
+++ b/net/tipc/crypto.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* net/tipc/crypto.h: Include file for TIPC crypto
*
* Copyright (c) 2019, Ericsson AB
@@ -53,7 +53,7 @@
#define TIPC_AES_GCM_IV_SIZE 12
#define TIPC_AES_GCM_TAG_SIZE 16
-/**
+/*
* TIPC crypto modes:
* - CLUSTER_KEY:
* One single key is used for both TX & RX in all nodes in the cluster.
@@ -69,7 +69,7 @@ enum {
extern int sysctl_tipc_max_tfms __read_mostly;
extern int sysctl_tipc_key_exchange_enabled __read_mostly;
-/**
+/*
* TIPC encryption message format:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index d4ecacddb40c..5380f605b851 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -74,6 +74,7 @@ struct tipc_discoverer {
/**
* tipc_disc_init_msg - initialize a link setup message
* @net: the applicable net namespace
+ * @skb: buffer containing message
* @mtyp: message type (request or response)
* @b: ptr to bearer issuing message
*/
@@ -341,7 +342,7 @@ exit:
* @dest: destination address for request messages
* @skb: pointer to created frame
*
- * Returns 0 if successful, otherwise -errno.
+ * Return: 0 if successful, otherwise -errno.
*/
int tipc_disc_create(struct net *net, struct tipc_bearer *b,
struct tipc_media_addr *dest, struct sk_buff **skb)
@@ -380,7 +381,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
/**
* tipc_disc_delete - destroy object sending periodic link setup requests
- * @d: ptr to link duest structure
+ * @d: ptr to link dest structure
*/
void tipc_disc_delete(struct tipc_discoverer *d)
{
diff --git a/net/tipc/group.c b/net/tipc/group.c
index b1fcd2ad5ecf..3e137d8c9d2f 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -2,6 +2,7 @@
* net/tipc/group.c: TIPC group messaging code
*
* Copyright (c) 2017, Ericsson AB
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -359,7 +360,7 @@ struct tipc_nlist *tipc_group_dests(struct tipc_group *grp)
return &grp->dests;
}
-void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
+void tipc_group_self(struct tipc_group *grp, struct tipc_service_range *seq,
int *scope)
{
seq->type = grp->type;
diff --git a/net/tipc/group.h b/net/tipc/group.h
index 76b4e5a7b39d..ea4c3be64c78 100644
--- a/net/tipc/group.h
+++ b/net/tipc/group.h
@@ -2,6 +2,7 @@
* net/tipc/group.h: Include file for TIPC group unicast/multicast functions
*
* Copyright (c) 2017, Ericsson AB
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,7 +51,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp);
void tipc_group_add_member(struct tipc_group *grp, u32 node,
u32 port, u32 instance);
struct tipc_nlist *tipc_group_dests(struct tipc_group *grp);
-void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
+void tipc_group_self(struct tipc_group *grp, struct tipc_service_range *seq,
int *scope);
u32 tipc_group_exclude(struct tipc_group *grp);
void tipc_group_filter_msg(struct tipc_group *grp,
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 06b880da2a8e..6ae2140eb4f7 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -120,6 +120,34 @@ struct tipc_stats {
* @reasm_buf: head of partially reassembled inbound message fragments
* @bc_rcvr: marks that this is a broadcast receiver link
* @stats: collects statistics regarding link activity
+ * @session: session to be used by link
+ * @snd_nxt_state: next send seq number
+ * @rcv_nxt_state: next rcv seq number
+ * @in_session: have received ACTIVATE_MSG from peer
+ * @active: link is active
+ * @if_name: associated interface name
+ * @rst_cnt: link reset counter
+ * @drop_point: seq number for failover handling (FIXME)
+ * @failover_reasm_skb: saved failover msg ptr (FIXME)
+ * @failover_deferdq: deferred message queue for failover processing (FIXME)
+ * @transmq: the link's transmit queue
+ * @backlog: link's backlog by priority (importance)
+ * @snd_nxt: next sequence number to be used
+ * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @deferdq: deferred receive queue
+ * @window: sliding window size for congestion handling
+ * @min_win: minimal send window to be used by link
+ * @ssthresh: slow start threshold for congestion handling
+ * @max_win: maximal send window to be used by link
+ * @cong_acks: congestion acks for congestion avoidance (FIXME)
+ * @checkpoint: seq number for congestion window size handling
+ * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
+ * @last_gap: last gap ack blocks for bcast (FIXME)
+ * @last_ga: ptr to gap ack blocks
+ * @bc_rcvlink: the peer specific link used for broadcast reception
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ * @nack_state: bcast nack state
+ * @bc_peer_is_up: peer has acked the bcast init msg
*/
struct tipc_link {
u32 addr;
@@ -450,7 +478,6 @@ u32 tipc_link_state(struct tipc_link *l)
* @min_win: minimal send window to be used by link
* @max_win: maximal send window to be used by link
* @session: session to be used by link
- * @ownnode: identity of own node
* @peer: node id of peer node
* @peer_caps: bitmap describing peer node capabilities
* @bc_sndlink: the namespace global link used for broadcast sending
@@ -458,8 +485,10 @@ u32 tipc_link_state(struct tipc_link *l)
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
+ * @self: local unicast link id
+ * @peer_id: 128-bit ID of peer
*
- * Returns true if link was created, otherwise false
+ * Return: true if link was created, otherwise false
*/
bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
int tolerance, char net_plane, u32 mtu, int priority,
@@ -532,8 +561,13 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
+ * @ownnode: identity of own node
+ * @peer: node id of peer node
+ * @peer_id: 128-bit ID of peer
+ * @peer_caps: bitmap describing peer node capabilities
+ * @bc_sndlink: the namespace global link used for broadcast sending
*
- * Returns true if link was created, otherwise false
+ * Return: true if link was created, otherwise false
*/
bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
int mtu, u32 min_win, u32 max_win, u16 peer_caps,
@@ -788,7 +822,7 @@ static void link_profile_stats(struct tipc_link *l)
* tipc_link_too_silent - check if link is "too silent"
* @l: tipc link to be checked
*
- * Returns true if the link 'silent_intv_cnt' is about to reach the
+ * Return: true if the link 'silent_intv_cnt' is about to reach the
* 'abort_limit' value, otherwise false
*/
bool tipc_link_too_silent(struct tipc_link *l)
@@ -990,8 +1024,8 @@ void tipc_link_reset(struct tipc_link *l)
* @xmitq: returned list of packets to be sent by caller
*
* Consumes the buffer chain.
- * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
* Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
*/
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
@@ -1260,7 +1294,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
pr_warn("Dropping received illegal msg type\n");
kfree_skb(skb);
return true;
- };
+ }
}
/* tipc_link_input - process packet that has passed link protocol check
@@ -2376,7 +2410,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
if (!msg_peer_node_is_up(hdr))
return rc;
- /* Open when peer ackowledges our bcast init msg (pkt #1) */
+ /* Open when peer acknowledges our bcast init msg (pkt #1) */
if (msg_ack(hdr))
l->bc_peer_is_up = true;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 32c79c59052b..2aca86021df5 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -58,11 +58,13 @@ static unsigned int align(unsigned int i)
/**
* tipc_buf_acquire - creates a TIPC message buffer
* @size: message size (including TIPC header)
+ * @gfp: memory allocation flags
*
- * Returns a new buffer with data pointers set to the specified size.
+ * Return: a new buffer with data pointers set to the specified size.
*
- * NOTE: Headroom is reserved to allow prepending of a data link header.
- * There may also be unrequested tailroom present at the buffer's end.
+ * NOTE:
+ * Headroom is reserved to allow prepending of a data link header.
+ * There may also be unrequested tailroom present at the buffer's end.
*/
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
@@ -207,8 +209,9 @@ err:
* @m: the data to be appended
* @mss: max allowable size of buffer
* @dlen: size of data to be appended
- * @txq: queue to appand to
- * Returns the number og 1k blocks appended or errno value
+ * @txq: queue to append to
+ *
+ * Return: the number of 1k blocks appended or errno value
*/
int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
int mss, struct sk_buff_head *txq)
@@ -312,7 +315,7 @@ bool tipc_msg_validate(struct sk_buff **_skb)
* @pktmax: max size of a fragment incl. the header
* @frags: returned fragment skb list
*
- * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
+ * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
* or -ENOMEM
*/
int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
@@ -367,6 +370,7 @@ error:
* tipc_msg_build - create buffer chain containing specified header and data
* @mhdr: Message header, to be prepended to data
* @m: User message
+ * @offset: buffer offset for fragmented messages (FIXME)
* @dsz: Total length of user data
* @pktmax: Max packet size that can be used
* @list: Buffer or chain of buffers to be returned to caller
@@ -374,7 +378,7 @@ error:
* Note that the recursive call we are making here is safe, since it can
* logically go only one further level down.
*
- * Returns message data size or errno: -ENOMEM, -EFAULT
+ * Return: message data size or errno: -ENOMEM, -EFAULT
*/
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
int dsz, int pktmax, struct sk_buff_head *list)
@@ -485,7 +489,7 @@ error:
* @msg: message to be appended
* @max: max allowable size for the bundle buffer
*
- * Returns "true" if bundling has been performed, otherwise "false"
+ * Return: "true" if bundling has been performed, otherwise "false"
*/
static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
u32 max)
@@ -580,9 +584,9 @@ bundle:
* @skb: buffer to be extracted from.
* @iskb: extracted inner buffer, to be returned
* @pos: position in outer message of msg to be extracted.
- * Returns position of next msg
+ * Returns position of next msg.
* Consumes outer buffer when last packet extracted
- * Returns true when there is an extracted buffer, otherwise false
+ * Return: true when there is an extracted buffer, otherwise false
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
@@ -626,7 +630,7 @@ none:
* @skb: buffer containing message to be reversed; will be consumed
* @err: error code to be set in message, if any
* Replaces consumed buffer with new one when successful
- * Returns true if success, otherwise false
+ * Return: true if success, otherwise false
*/
bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
@@ -698,10 +702,11 @@ bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
/**
* tipc_msg_lookup_dest(): try to find new destination for named message
+ * @net: pointer to associated network namespace
* @skb: the buffer containing the message.
* @err: error code to be used by caller if lookup fails
* Does not consume buffer
- * Returns true if a destination is found, false otherwise
+ * Return: true if a destination is found, false otherwise
*/
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
{
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index fe4edce459ad..6cf57c3bfa27 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -50,6 +50,8 @@ struct distr_queue_item {
/**
* publ_to_item - add publication info to a publication message
+ * @p: publication info
+ * @i: location of item in the message
*/
static void publ_to_item(struct distr_item *i, struct publication *p)
{
@@ -62,6 +64,10 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
/**
* named_prepare_buf - allocate & initialize a publication message
+ * @net: the associated network namespace
+ * @type: message type
+ * @size: payload size
+ * @dest: destination node
*
* The buffer returned is of size INT_H_SIZE + payload size
*/
@@ -83,6 +89,8 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
/**
* tipc_named_publish - tell other nodes about a new publication by this node
+ * @net: the associated network namespace
+ * @publ: the new publication
*/
struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
{
@@ -111,6 +119,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
+ * @net: the associated network namespace
+ * @publ: the withdrawn publication
*/
struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
{
@@ -138,9 +148,11 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
/**
* named_distribute - prepare name info for bulk distribution to another node
+ * @net: the associated network namespace
* @list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain
+ * @seqno: sequence number for this message
*/
static void named_distribute(struct net *net, struct sk_buff_head *list,
u32 dnode, struct list_head *pls, u16 seqno)
@@ -194,6 +206,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
/**
* tipc_named_node_up - tell specified node about all publications by this node
+ * @net: the associated network namespace
+ * @dnode: destination node
+ * @capabilities: peer node's capabilities
*/
void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
{
@@ -217,6 +232,9 @@ void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
/**
* tipc_publ_purge - remove publication associated with a failed node
+ * @net: the associated network namespace
+ * @publ: the publication to remove
+ * @addr: failed node's address
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
@@ -244,24 +262,6 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
kfree_rcu(p, rcu);
}
-/**
- * tipc_dist_queue_purge - remove deferred updates from a node that went down
- */
-static void tipc_dist_queue_purge(struct net *net, u32 addr)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct distr_queue_item *e, *tmp;
-
- spin_lock_bh(&tn->nametbl_lock);
- list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
- if (e->node != addr)
- continue;
- list_del(&e->next);
- kfree(e);
- }
- spin_unlock_bh(&tn->nametbl_lock);
-}
-
void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
u32 addr, u16 capabilities)
{
@@ -272,7 +272,6 @@ void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
tipc_publ_purge(net, publ, addr);
- tipc_dist_queue_purge(net, addr);
spin_lock_bh(&tn->nametbl_lock);
if (!(capabilities & TIPC_NAMED_BCAST))
nt->rc_dests--;
@@ -282,9 +281,13 @@ void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
/**
* tipc_update_nametbl - try to process a nametable update and notify
* subscribers
+ * @net: the associated network namespace
+ * @i: location of item in the message
+ * @node: node address
+ * @dtype: name distributor message type
*
* tipc_nametbl_lock must be held.
- * Returns the publication item if successful, otherwise NULL.
+ * Return: the publication item if successful, otherwise NULL.
*/
static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
u32 node, u32 dtype)
@@ -366,6 +369,10 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
/**
* tipc_named_rcv - process name table update messages sent by another node
+ * @net: the associated network namespace
+ * @namedq: queue to receive from
+ * @rcv_nxt: store last received seqno here
+ * @open: last bulk msg was received (FIXME)
*/
void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
u16 *rcv_nxt, bool *open)
@@ -393,6 +400,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
/**
* tipc_named_reinit - re-initialize local publications
+ * @net: the associated network namespace
*
* This routine is called whenever TIPC networking is enabled.
* All name table entries published by this node are updated to reflect
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 092323158f06..e231e6964d61 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -46,7 +46,7 @@
* @type: name sequence type
* @lower: name sequence lower bound
* @upper: name sequence upper bound
- * @ref: publishing port reference
+ * @port: publishing port reference
* @key: publication key
*
* ===> All fields are stored in network byte order. <===
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 2ac33d32edc2..ee5ac40ea2b6 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2000-2006, 2014-2018, Ericsson AB
* Copyright (c) 2004-2008, 2010-2014, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -103,7 +104,8 @@ RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks,
* range match
* @sr: the service range pointer as a loop cursor
* @sc: the pointer to tipc service which holds the service range rbtree
- * @start, end: the range (end >= start) for matching
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
*/
#define service_range_foreach_match(sr, sc, start, end) \
for (sr = service_range_match_first((sc)->ranges.rb_node, \
@@ -117,7 +119,8 @@ RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks,
/**
* service_range_match_first - find first service range matching a range
* @n: the root node of service range rbtree for searching
- * @start, end: the range (end >= start) for matching
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
*
* Return: the leftmost service range node in the rbtree that overlaps the
* specific range if any. Otherwise, returns NULL.
@@ -166,7 +169,8 @@ static struct service_range *service_range_match_first(struct rb_node *n,
/**
* service_range_match_next - find next service range matching a range
* @n: a node in service range rbtree from which the searching starts
- * @start, end: the range (end >= start) for matching
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
*
* Return: the next service range node to the given node in the rbtree that
* overlaps the specific range if any. Otherwise, returns NULL.
@@ -218,6 +222,13 @@ static int hash(int x)
/**
* tipc_publ_create - create a publication structure
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @scope: publication scope
+ * @node: network address of publishing socket
+ * @port: publishing port
+ * @key: publication key
*/
static struct publication *tipc_publ_create(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port,
@@ -245,6 +256,8 @@ static struct publication *tipc_publ_create(u32 type, u32 lower, u32 upper,
/**
* tipc_service_create - create a service structure for the specified 'type'
+ * @type: service type
+ * @hd: name_table services list
*
* Allocates a single range structure and sets it to all 0's.
*/
@@ -361,6 +374,9 @@ err:
/**
* tipc_service_remove_publ - remove a publication from a service
+ * @sr: service_range to remove publication from
+ * @node: target node
+ * @key: target publication key
*/
static struct publication *tipc_service_remove_publ(struct service_range *sr,
u32 node, u32 key)
@@ -377,7 +393,7 @@ static struct publication *tipc_service_remove_publ(struct service_range *sr,
return NULL;
}
-/**
+/*
* Code reused: time_after32() for the same purpose
*/
#define publication_after(pa, pb) time_after32((pa)->id, (pb)->id)
@@ -395,6 +411,8 @@ static int tipc_publ_sort(void *priv, struct list_head *a,
* tipc_service_subscribe - attach a subscription, and optionally
* issue the prescribed number of events if there is any service
* range overlapping with the requested range
+ * @service: the tipc_service to attach the @sub to
+ * @sub: the subscription to attach
*/
static void tipc_service_subscribe(struct tipc_service *service,
struct tipc_subscription *sub)
@@ -403,12 +421,12 @@ static void tipc_service_subscribe(struct tipc_service *service,
struct publication *p, *first, *tmp;
struct list_head publ_list;
struct service_range *sr;
- struct tipc_name_seq ns;
+ struct tipc_service_range r;
u32 filter;
- ns.type = tipc_sub_read(sb, seq.type);
- ns.lower = tipc_sub_read(sb, seq.lower);
- ns.upper = tipc_sub_read(sb, seq.upper);
+ r.type = tipc_sub_read(sb, seq.type);
+ r.lower = tipc_sub_read(sb, seq.lower);
+ r.upper = tipc_sub_read(sb, seq.upper);
filter = tipc_sub_read(sb, filter);
tipc_sub_get(sub);
@@ -418,7 +436,7 @@ static void tipc_service_subscribe(struct tipc_service *service,
return;
INIT_LIST_HEAD(&publ_list);
- service_range_foreach_match(sr, service, ns.lower, ns.upper) {
+ service_range_foreach_match(sr, service, r.lower, r.upper) {
first = NULL;
list_for_each_entry(p, &sr->all_publ, all_publ) {
if (filter & TIPC_SUB_PORTS)
@@ -528,14 +546,16 @@ exit:
/**
* tipc_nametbl_translate - perform service instance to socket translation
- *
- * On entry, 'dnode' is the search domain used during translation.
+ * @net: network namespace
+ * @type: message type
+ * @instance: message instance
+ * @dnode: the search domain used during translation
*
* On exit:
* - if translation is deferred to another node, leave 'dnode' unchanged and
- * return 0
+ * return 0
* - if translation is attempted and succeeds, set 'dnode' to the publishing
- * node and return the published (non-zero) port number
+ * node and return the published (non-zero) port number
* - if translation is attempted and fails, set 'dnode' to 0 and return 0
*
* Note that for legacy users (node configured with Z.C.N address format) the
@@ -756,6 +776,11 @@ exit:
/**
* tipc_nametbl_withdraw - withdraw a service binding
+ * @net: network namespace
+ * @type: service type
+ * @lower: service range lower bound
+ * @upper: service range upper bound
+ * @key: target publication key
*/
int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower,
u32 upper, u32 key)
@@ -791,6 +816,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower,
/**
* tipc_nametbl_subscribe - add a subscription object to the name table
+ * @sub: subscription to add
*/
bool tipc_nametbl_subscribe(struct tipc_subscription *sub)
{
@@ -821,6 +847,7 @@ bool tipc_nametbl_subscribe(struct tipc_subscription *sub)
/**
* tipc_nametbl_unsubscribe - remove a subscription object from name table
+ * @sub: subscription to remove
*/
void tipc_nametbl_unsubscribe(struct tipc_subscription *sub)
{
@@ -870,7 +897,9 @@ int tipc_nametbl_init(struct net *net)
}
/**
- * tipc_service_delete - purge all publications for a service and delete it
+ * tipc_service_delete - purge all publications for a service and delete it
+ * @net: the associated network namespace
+ * @sc: tipc_service to delete
*/
static void tipc_service_delete(struct net *net, struct tipc_service *sc)
{
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 8064e1986e2c..5a82a01369d6 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -60,8 +60,8 @@ struct tipc_group;
* @key: publication key, unique across the cluster
* @id: publication id
* @binding_node: all publications from the same node which bound this one
- * - Remote publications: in node->publ_list
- * Used by node/name distr to withdraw publications when node is lost
+ * - Remote publications: in node->publ_list;
+ * Used by node/name distr to withdraw publications when node is lost
* - Local/node scope publications: in name_table->node_scope list
* - Local/cluster scope publications: in name_table->cluster_scope list
* @binding_sock: all publications from the same socket which bound this one
@@ -92,13 +92,16 @@ struct publication {
/**
* struct name_table - table containing all existing port name publications
- * @seq_hlist: name sequence hash lists
+ * @services: name sequence hash lists
* @node_scope: all local publications with node scope
* - used by name_distr during re-init of name table
* @cluster_scope: all local publications with cluster scope
* - used by name_distr to send bulk updates to new nodes
* - used by name_distr during re-init of name table
+ * @cluster_scope_lock: lock for accessing @cluster_scope
* @local_publ_count: number of publications issued by this node
+ * @rc_dests: destination node counter
+ * @snd_nxt: next sequence number to be used
*/
struct name_table {
struct hlist_head services[TIPC_NAMETBL_SIZE];
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 0bb2323201da..a129f661bee3 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -132,7 +132,7 @@ static void tipc_net_finalize(struct net *net, u32 addr)
tipc_named_reinit(net);
tipc_sk_reinit(net);
tipc_mon_reinit_self(net);
- tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+ tipc_nametbl_publish(net, TIPC_NODE_STATE, addr, addr,
TIPC_CLUSTER_SCOPE, 0, addr);
}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 1c7aa51cc2a3..82f154989418 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -118,7 +118,8 @@ static void tipc_tlv_init(struct sk_buff *skb, u16 type)
skb_put(skb, sizeof(struct tlv_desc));
}
-static int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...)
+static __printf(2, 3) int tipc_tlv_sprintf(struct sk_buff *skb,
+ const char *fmt, ...)
{
int n;
u16 len;
@@ -588,7 +589,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
return 0;
tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n",
- nla_data(link[TIPC_NLA_LINK_NAME]));
+ (char *)nla_data(link[TIPC_NLA_LINK_NAME]));
if (link[TIPC_NLA_LINK_BROADCAST]) {
__fill_bc_link_stat(msg, prop, stats);
@@ -695,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
- nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
TIPC_MAX_LINK_NAME);
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 83978d5dae59..83d9eb830592 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -82,7 +82,7 @@ struct tipc_bclink_entry {
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
- * @ref: reference counter to node object
+ * @kref: reference counter to node object
* @lock: rwlock governing access to structure
* @net: the applicable net namespace
* @hash: links to adjacent nodes in unsorted hash chain
@@ -90,9 +90,11 @@ struct tipc_bclink_entry {
* @namedq: pointer to name table input queue with name table messages
* @active_links: bearer ids of active links, used as index into links[] array
* @links: array containing references to all links to node
+ * @bc_entry: broadcast link entry
* @action_flags: bit mask of different types of node actions
* @state: connectivity state vs peer node
* @preliminary: a preliminary node or not
+ * @failover_sent: failover sent or not
* @sync_point: sequence number where synch/failover is finished
* @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
@@ -100,9 +102,16 @@ struct tipc_bclink_entry {
* @capabilities: bitmap, indicating peer node's functional capabilities
* @signature: node instance identifier
* @link_id: local and remote bearer ids of changing link, if any
+ * @peer_id: 128-bit ID of peer
+ * @peer_id_string: ID string of peer
* @publ_list: list of publications
+ * @conn_sks: list of connections (FIXME)
+ * @timer: node's keepalive timer
+ * @keepalive_intv: keepalive interval in milliseconds
* @rcu: rcu struct for tipc_node
* @delete_at: indicates the time for deleting a down node
+ * @peer_net: peer's net namespace
+ * @peer_hash_mix: hash for this peer (FIXME)
* @crypto_rx: RX crypto handler
*/
struct tipc_node {
@@ -267,6 +276,7 @@ char *tipc_node_get_id_str(struct tipc_node *node)
#ifdef CONFIG_TIPC_CRYPTO
/**
* tipc_node_crypto_rx - Retrieve crypto RX handle from node
+ * @__n: target tipc_node
* Note: node ref counter must be held first!
*/
struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
@@ -814,6 +824,9 @@ static void tipc_node_timeout(struct timer_list *t)
/**
* __tipc_node_link_up - handle addition of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
* Node lock must be held by caller
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
@@ -880,6 +893,9 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
/**
* tipc_node_link_up - handle addition of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
*
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
@@ -900,10 +916,11 @@ static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
*
* This function is only called in a very special situation where link
* failover can be already started on peer node but not on this node.
- * This can happen when e.g.
+ * This can happen when e.g.::
+ *
* 1. Both links <1A-2A>, <1B-2B> down
* 2. Link endpoint 2A up, but 1A still down (e.g. due to network
- * disturbance, wrong session, etc.)
+ * disturbance, wrong session, etc.)
* 3. Link <1B-2B> up
* 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
* 5. Node 2 starts failover onto link <1B-2B>
@@ -940,6 +957,10 @@ static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
/**
* __tipc_node_link_down - handle loss of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
+ * @maddr: output media address of the bearer
*/
static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
struct sk_buff_head *xmitq,
@@ -1525,11 +1546,13 @@ static void node_lost_contact(struct tipc_node *n,
/**
* tipc_node_get_linkname - get the name of a link
*
+ * @net: the applicable net namespace
* @bearer_id: id of the bearer
* @addr: peer node address
* @linkname: link name output buffer
+ * @len: size of @linkname output buffer
*
- * Returns 0 on success
+ * Return: 0 on success
*/
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
char *linkname, size_t len)
@@ -1638,7 +1661,7 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
return;
default:
return;
- };
+ }
}
/**
@@ -1648,7 +1671,7 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
* @dnode: address of destination node
* @selector: a number used for deterministic link selection
* Consumes the buffer chain.
- * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
+ * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
*/
int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
u32 dnode, int selector)
@@ -1881,9 +1904,11 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
/**
* tipc_node_check_state - check and if necessary update node state
+ * @n: target tipc_node
* @skb: TIPC packet
* @bearer_id: identity of bearer delivering the packet
- * Returns true if state and msg are ok, otherwise false
+ * @xmitq: queue for messages to be xmited on
+ * Return: true if state and msg are ok, otherwise false
*/
static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
int bearer_id, struct sk_buff_head *xmitq)
@@ -2199,6 +2224,9 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
struct tipc_node *peer, *temp_node;
+ u8 node_id[NODE_ID_LEN];
+ u64 *w0 = (u64 *)&node_id[0];
+ u64 *w1 = (u64 *)&node_id[8];
u32 addr;
int err;
@@ -2212,10 +2240,22 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- if (!attrs[TIPC_NLA_NET_ADDR])
- return -EINVAL;
+ /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
+ * mutually exclusive cases
+ */
+ if (attrs[TIPC_NLA_NET_ADDR]) {
+ addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+ if (!addr)
+ return -EINVAL;
+ }
- addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+ if (attrs[TIPC_NLA_NET_NODEID]) {
+ if (!attrs[TIPC_NLA_NET_NODEID_W1])
+ return -EINVAL;
+ *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
+ *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
+ addr = hash128to32(node_id);
+ }
if (in_own_node(net, addr))
return -ENOTSUPP;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e795a8a2955b..cebcc104dc70 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,8 +1,9 @@
/*
* net/tipc/socket.c: TIPC socket API
*
- * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -79,19 +80,32 @@ struct sockaddr_pair {
* @maxnagle: maximum size of msg which can be subject to nagle
* @portid: unique port identity in TIPC socket hash table
* @phdr: preformatted message header used when sending messages
- * #cong_links: list of congested links
+ * @cong_links: list of congested links
* @publications: list of publications for port
* @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
* @conn_timeout: the time we can wait for an unresponded setup request
+ * @probe_unacked: probe has not received ack yet
* @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
* @cong_link_cnt: number of congested links
* @snt_unacked: # messages sent by socket, and not yet acked by peer
+ * @snd_win: send window size
+ * @peer_caps: peer capabilities mask
* @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @rcv_win: receive window size
* @peer: 'connected' peer for dgram/rdm
* @node: hash table node
* @mc_method: cookie for use between socket and broadcast layer
* @rcu: rcu struct for tipc_sock
+ * @group: TIPC communications group
+ * @oneway: message count in one direction (FIXME)
+ * @nagle_start: current nagle value
+ * @snd_backlog: send backlog count
+ * @msg_acc: messages accepted; used in managing backlog and nagle
+ * @pkt_cnt: TIPC socket packet count
+ * @expect_ack: whether this TIPC socket is expecting an ack
+ * @nodelay: setsockopt() TIPC_NODELAY setting
+ * @group_is_open: TIPC socket group is fully open (FIXME)
*/
struct tipc_sock {
struct sock sk;
@@ -138,9 +152,9 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
bool kern);
static void tipc_sk_timeout(struct timer_list *t);
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq);
+ struct tipc_service_range const *seq);
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq);
+ struct tipc_service_range const *seq);
static int tipc_sk_leave(struct tipc_sock *tsk);
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
static int tipc_sk_insert(struct tipc_sock *tsk);
@@ -260,6 +274,7 @@ static void tsk_set_nagle(struct tipc_sock *tsk)
/**
* tsk_advance_rx_queue - discard first buffer in socket receive queue
+ * @sk: network socket
*
* Caller must hold socket lock
*/
@@ -288,6 +303,8 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
/**
* tsk_rej_rx_queue - reject all buffers in socket receive queue
+ * @sk: network socket
+ * @error: response error code
*
* Caller must hold socket lock
*/
@@ -441,7 +458,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
* This routine creates additional data structures used by the TIPC socket,
* initializes them, and links them together.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
@@ -606,7 +623,7 @@ static void __tipc_shutdown(struct socket *sock, int error)
* are returned or discarded according to the "destination droppable" setting
* specified for the message by the sender.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_release(struct socket *sock)
{
@@ -644,75 +661,77 @@ static int tipc_release(struct socket *sock)
}
/**
- * tipc_bind - associate or disassocate TIPC name(s) with a socket
+ * __tipc_bind - associate or disassocate TIPC name(s) with a socket
* @sock: socket structure
- * @uaddr: socket address describing name(s) and desired operation
- * @uaddr_len: size of socket address data structure
+ * @skaddr: socket address describing name(s) and desired operation
+ * @alen: size of socket address data structure
*
* Name and name sequence binding is indicated using a positive scope value;
* a negative scope value unbinds the specified name. Specifying no name
* (i.e. a socket address length of 0) unbinds all names from the socket.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
-static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
- int uaddr_len)
+static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
{
- struct sock *sk = sock->sk;
- struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- struct tipc_sock *tsk = tipc_sk(sk);
- int res = -EINVAL;
+ struct sockaddr_tipc *addr = (struct sockaddr_tipc *)skaddr;
+ struct tipc_sock *tsk = tipc_sk(sock->sk);
- lock_sock(sk);
- if (unlikely(!uaddr_len)) {
- res = tipc_sk_withdraw(tsk, 0, NULL);
- goto exit;
- }
- if (tsk->group) {
- res = -EACCES;
- goto exit;
- }
- if (uaddr_len < sizeof(struct sockaddr_tipc)) {
- res = -EINVAL;
- goto exit;
- }
- if (addr->family != AF_TIPC) {
- res = -EAFNOSUPPORT;
- goto exit;
- }
+ if (unlikely(!alen))
+ return tipc_sk_withdraw(tsk, 0, NULL);
- if (addr->addrtype == TIPC_ADDR_NAME)
+ if (addr->addrtype == TIPC_SERVICE_ADDR)
addr->addr.nameseq.upper = addr->addr.nameseq.lower;
- else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
- res = -EAFNOSUPPORT;
- goto exit;
- }
- if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
- (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
- (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
- res = -EACCES;
- goto exit;
- }
+ if (tsk->group)
+ return -EACCES;
- res = (addr->scope >= 0) ?
- tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
- tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
-exit:
- release_sock(sk);
+ if (addr->scope >= 0)
+ return tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq);
+ else
+ return tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
+}
+
+int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
+{
+ int res;
+
+ lock_sock(sock->sk);
+ res = __tipc_bind(sock, skaddr, alen);
+ release_sock(sock->sk);
return res;
}
+static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
+{
+ struct sockaddr_tipc *addr = (struct sockaddr_tipc *)skaddr;
+
+ if (alen) {
+ if (alen < sizeof(struct sockaddr_tipc))
+ return -EINVAL;
+ if (addr->family != AF_TIPC)
+ return -EAFNOSUPPORT;
+ if (addr->addrtype > TIPC_SERVICE_ADDR)
+ return -EAFNOSUPPORT;
+ if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES) {
+ pr_warn_once("Can't bind to reserved service type %u\n",
+ addr->addr.nameseq.type);
+ return -EACCES;
+ }
+ }
+ return tipc_sk_bind(sock, skaddr, alen);
+}
+
/**
* tipc_getname - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
* @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it only
* accesses socket information that is unchanging (or which changes in
@@ -737,7 +756,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
addr->addr.id.node = tipc_own_addr(sock_net(sk));
}
- addr->addrtype = TIPC_ADDR_ID;
+ addr->addrtype = TIPC_SOCKET_ADDR;
addr->family = AF_TIPC;
addr->scope = 0;
addr->addr.name.domain = 0;
@@ -751,7 +770,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
* @sock: socket for which to calculate the poll bits
* @wait: ???
*
- * Returns pollmask value
+ * Return: pollmask value
*
* COMMENTARY:
* It appears that the usual socket locking mechanisms are not useful here
@@ -813,9 +832,9 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
- * Returns the number of bytes sent on success, or errno
+ * Return: the number of bytes sent on success, or errno
*/
-static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
+static int tipc_sendmcast(struct socket *sock, struct tipc_service_range *seq,
struct msghdr *msg, size_t dlen, long timeout)
{
struct sock *sk = sock->sk;
@@ -873,6 +892,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
/**
* tipc_send_group_msg - send a message to a member in the group
* @net: network namespace
+ * @tsk: tipc socket
* @m: message to send
* @mb: group member
* @dnode: destination node
@@ -928,7 +948,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
- * Returns the number of bytes sent on success, or errno
+ * Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
@@ -972,7 +992,7 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
- * Returns the number of bytes sent on success, or errno
+ * Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
@@ -1057,7 +1077,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
- * Returns the number of bytes sent on success, or errno
+ * Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
@@ -1131,7 +1151,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
- * Returns the number of bytes sent on success, or errno
+ * Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
@@ -1168,6 +1188,7 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
/**
* tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
+ * @net: the associated network namespace
* @arrvq: queue with arriving messages, to be cloned after destination lookup
* @inputq: queue with cloned messages, delivered to socket after dest lookup
*
@@ -1307,6 +1328,8 @@ static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
* tipc_sk_conn_proto_rcv - receive a connection mng protocol message
* @tsk: receiving socket
* @skb: pointer to message buffer.
+ * @inputq: buffer list containing the buffers
+ * @xmitq: output message area
*/
static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
struct sk_buff_head *inputq,
@@ -1374,7 +1397,7 @@ exit:
* and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
* (Note: 'SYN+' is prohibited on SOCK_STREAM.)
*
- * Returns the number of bytes sent on success, or errno otherwise
+ * Return: the number of bytes sent on success, or errno otherwise
*/
static int tipc_sendmsg(struct socket *sock,
struct msghdr *m, size_t dsz)
@@ -1400,7 +1423,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
bool syn = !tipc_sk_type_connectionless(sk);
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
- struct tipc_name_seq *seq;
+ struct tipc_service_range *seq;
struct sk_buff_head pkts;
u32 dport = 0, dnode = 0;
u32 type = 0, inst = 0;
@@ -1419,9 +1442,9 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
if (grp) {
if (!dest)
return tipc_send_group_bcast(sock, m, dlen, timeout);
- if (dest->addrtype == TIPC_ADDR_NAME)
+ if (dest->addrtype == TIPC_SERVICE_ADDR)
return tipc_send_group_anycast(sock, m, dlen, timeout);
- if (dest->addrtype == TIPC_ADDR_ID)
+ if (dest->addrtype == TIPC_SOCKET_ADDR)
return tipc_send_group_unicast(sock, m, dlen, timeout);
if (dest->addrtype == TIPC_ADDR_MCAST)
return tipc_send_group_mcast(sock, m, dlen, timeout);
@@ -1441,7 +1464,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
return -EISCONN;
if (tsk->published)
return -EOPNOTSUPP;
- if (dest->addrtype == TIPC_ADDR_NAME) {
+ if (dest->addrtype == TIPC_SERVICE_ADDR) {
tsk->conn_type = dest->addr.name.name.type;
tsk->conn_instance = dest->addr.name.name.instance;
}
@@ -1452,14 +1475,14 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
if (dest->addrtype == TIPC_ADDR_MCAST)
return tipc_sendmcast(sock, seq, m, dlen, timeout);
- if (dest->addrtype == TIPC_ADDR_NAME) {
+ if (dest->addrtype == TIPC_SERVICE_ADDR) {
type = dest->addr.name.name.type;
inst = dest->addr.name.name.instance;
dnode = dest->addr.name.domain;
dport = tipc_nametbl_translate(net, type, inst, &dnode);
if (unlikely(!dport && !dnode))
return -EHOSTUNREACH;
- } else if (dest->addrtype == TIPC_ADDR_ID) {
+ } else if (dest->addrtype == TIPC_SOCKET_ADDR) {
dnode = dest->addr.id.node;
} else {
return -EINVAL;
@@ -1471,7 +1494,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
if (unlikely(rc))
return rc;
- if (dest->addrtype == TIPC_ADDR_NAME) {
+ if (dest->addrtype == TIPC_SERVICE_ADDR) {
msg_set_type(hdr, TIPC_NAMED_MSG);
msg_set_hdr_sz(hdr, NAMED_H_SIZE);
msg_set_nametype(hdr, type);
@@ -1479,7 +1502,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
msg_set_destnode(hdr, dnode);
msg_set_destport(hdr, dport);
- } else { /* TIPC_ADDR_ID */
+ } else { /* TIPC_SOCKET_ADDR */
msg_set_type(hdr, TIPC_DIRECT_MSG);
msg_set_lookup_scope(hdr, 0);
msg_set_destnode(hdr, dnode);
@@ -1519,7 +1542,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
*
* Used for SOCK_STREAM data.
*
- * Returns the number of bytes sent on success (or partial success),
+ * Return: the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
@@ -1627,7 +1650,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
*
* Used for SOCK_SEQPACKET messages.
*
- * Returns the number of bytes sent on success, or errno otherwise
+ * Return: the number of bytes sent on success, or errno otherwise
*/
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
{
@@ -1684,7 +1707,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
return;
srcaddr->sock.family = AF_TIPC;
- srcaddr->sock.addrtype = TIPC_ADDR_ID;
+ srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
srcaddr->sock.scope = 0;
srcaddr->sock.addr.id.ref = msg_origport(hdr);
srcaddr->sock.addr.id.node = msg_orignode(hdr);
@@ -1696,7 +1719,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
/* Group message users may also want to know sending member's id */
srcaddr->member.family = AF_TIPC;
- srcaddr->member.addrtype = TIPC_ADDR_NAME;
+ srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
srcaddr->member.scope = 0;
srcaddr->member.addr.name.name.type = msg_nametype(hdr);
srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
@@ -1712,7 +1735,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
*
* Note: Ancillary data is not captured if not requested by receiver.
*
- * Returns 0 if successful, otherwise errno
+ * Return: 0 if successful, otherwise errno
*/
static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
struct tipc_sock *tsk)
@@ -1862,6 +1885,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
/**
* tipc_recvmsg - receive packet-oriented message
+ * @sock: network socket
* @m: descriptor for message info
* @buflen: length of user buffer area
* @flags: receive flags
@@ -1869,7 +1893,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
* Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
* If the complete message doesn't fit in user area, truncate it.
*
- * Returns size of returned message data, errno otherwise
+ * Return: size of returned message data, errno otherwise
*/
static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
size_t buflen, int flags)
@@ -1970,6 +1994,7 @@ exit:
/**
* tipc_recvstream - receive stream-oriented data
+ * @sock: network socket
* @m: descriptor for message info
* @buflen: total size of user buffer area
* @flags: receive flags
@@ -1977,7 +2002,7 @@ exit:
* Used for SOCK_STREAM messages only. If not enough data is available
* will optionally wait for more; never truncates data.
*
- * Returns size of returned message data, errno otherwise
+ * Return: size of returned message data, errno otherwise
*/
static int tipc_recvstream(struct socket *sock, struct msghdr *m,
size_t buflen, int flags)
@@ -2155,7 +2180,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
* @tsk: TIPC socket
* @skb: pointer to message buffer.
* @xmitq: for Nagle ACK if any
- * Returns true if message should be added to receive queue, false otherwise
+ * Return: true if message should be added to receive queue, false otherwise
*/
static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
struct sk_buff_head *xmitq)
@@ -2269,7 +2294,7 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
* TIPC_HIGH_IMPORTANCE (8 MB)
* TIPC_CRITICAL_IMPORTANCE (16 MB)
*
- * Returns overload limit according to corresponding message importance
+ * Return: overload limit according to corresponding message importance
*/
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
{
@@ -2292,12 +2317,12 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
* tipc_sk_filter_rcv - validate incoming message
* @sk: socket
* @skb: pointer to message.
+ * @xmitq: output message area (FIXME)
*
* Enqueues message on receive queue if acceptable; optionally handles
* disconnect indication for a connected socket.
*
* Called with socket lock already taken
- *
*/
static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
struct sk_buff_head *xmitq)
@@ -2387,6 +2412,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
* @inputq: list of incoming buffers with potentially different destinations
* @sk: socket where the buffers should be enqueued
* @dport: port number for the socket
+ * @xmitq: output queue
*
* Caller must hold socket lock
*/
@@ -2439,6 +2465,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
/**
* tipc_sk_rcv - handle a chain of incoming buffers
+ * @net: the associated network namespace
* @inputq: buffer list containing the buffers
* Consumes all buffers in list until inputq is empty
* Note: may be called in multiple threads referring to the same queue
@@ -2531,7 +2558,7 @@ static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
* @destlen: size of socket address data structure
* @flags: file-related flags associated with socket
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
int destlen, int flags)
@@ -2624,7 +2651,7 @@ exit:
* @sock: socket structure
* @len: (unused)
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_listen(struct socket *sock, int len)
{
@@ -2676,8 +2703,9 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
* @sock: listening socket
* @new_sock: new socket that is to be connected
* @flags: file-related flags associated with socket
+ * @kern: caused by kernel or by userspace?
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
bool kern)
@@ -2756,7 +2784,7 @@ exit:
*
* Terminates connection (if necessary), then purges socket's receive queue.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_shutdown(struct socket *sock, int how)
{
@@ -2864,7 +2892,7 @@ static void tipc_sk_timeout(struct timer_list *t)
}
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq)
+ struct tipc_service_range const *seq)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
@@ -2892,7 +2920,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
}
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq)
+ struct tipc_service_range const *seq)
{
struct net *net = sock_net(&tsk->sk);
struct publication *publ;
@@ -3039,7 +3067,7 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
struct net *net = sock_net(&tsk->sk);
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
- struct tipc_name_seq seq;
+ struct tipc_service_range seq;
int rc;
if (mreq->type < TIPC_RESERVED_TYPES)
@@ -3076,7 +3104,7 @@ static int tipc_sk_leave(struct tipc_sock *tsk)
{
struct net *net = sock_net(&tsk->sk);
struct tipc_group *grp = tsk->group;
- struct tipc_name_seq seq;
+ struct tipc_service_range seq;
int scope;
if (!grp)
@@ -3099,7 +3127,7 @@ static int tipc_sk_leave(struct tipc_sock *tsk)
* For stream sockets only, accepts and ignores all IPPROTO_TCP options
* (to ease compatibility).
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
sockptr_t ov, unsigned int ol)
@@ -3193,14 +3221,14 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
* For stream sockets only, returns 0 length result for all IPPROTO_TCP options
* (to ease compatibility).
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
char __user *ov, int __user *ol)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_name_seq seq;
+ struct tipc_service_range seq;
int len, scope;
u32 value;
int res;
@@ -3301,12 +3329,12 @@ static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
u32 onode = tipc_own_addr(sock_net(sock1->sk));
tsk1->peer.family = AF_TIPC;
- tsk1->peer.addrtype = TIPC_ADDR_ID;
+ tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
tsk1->peer.scope = TIPC_NODE_SCOPE;
tsk1->peer.addr.id.ref = tsk2->portid;
tsk1->peer.addr.id.node = onode;
tsk2->peer.family = AF_TIPC;
- tsk2->peer.addrtype = TIPC_ADDR_ID;
+ tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
tsk2->peer.scope = TIPC_NODE_SCOPE;
tsk2->peer.addr.id.ref = tsk1->portid;
tsk2->peer.addr.id.node = onode;
@@ -3397,7 +3425,7 @@ static struct proto tipc_proto = {
/**
* tipc_socket_init - initialize TIPC socket interface
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
int tipc_socket_init(void)
{
@@ -3796,10 +3824,11 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
/**
* tipc_sk_filtering - check if a socket should be traced
* @sk: the socket to be examined
- * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
- * (portid, sock type, name type, name lower, name upper)
*
- * Returns true if the socket meets the socket tuple data
+ * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
+ * (portid, sock type, name type, name lower, name upper)
+ *
+ * Return: true if the socket meets the socket tuple data
* (value 0 = 'any') or when there is no tuple set (all = 0),
* otherwise false
*/
@@ -3864,7 +3893,7 @@ u32 tipc_sock_get_portid(struct sock *sk)
* @sk: tipc sk to be checked
* @skb: tipc msg to be checked
*
- * Returns true if the socket rx queue allocation is > 90%, otherwise false
+ * Return: true if the socket rx queue allocation is > 90%, otherwise false
*/
bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
@@ -3882,7 +3911,7 @@ bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
* @sk: tipc sk to be checked
* @skb: tipc msg to be checked
*
- * Returns true if the socket rx queue allocation is > 90%, otherwise false
+ * Return: true if the socket rx queue allocation is > 90%, otherwise false
*/
bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index b11575afc66f..02cdf166807d 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -74,7 +74,7 @@ int tipc_dump_done(struct netlink_callback *cb);
u32 tipc_sock_get_portid(struct sock *sk);
bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
-
+int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen);
int tsk_set_importance(struct sock *sk, int imp);
#endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index f340e53da625..f6ad0005218c 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2000-2017, Ericsson AB
* Copyright (c) 2005-2007, 2010-2013, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,12 +56,14 @@ static void tipc_sub_send_event(struct tipc_subscription *sub,
}
/**
- * tipc_sub_check_overlap - test for subscription overlap with the
- * given values
+ * tipc_sub_check_overlap - test for subscription overlap with the given values
+ * @seq: tipc_name_seq to check
+ * @found_lower: lower value to test
+ * @found_upper: upper value to test
*
- * Returns 1 if there is overlap, otherwise 0.
+ * Return: 1 if there is overlap, otherwise 0.
*/
-int tipc_sub_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
+int tipc_sub_check_overlap(struct tipc_service_range *seq, u32 found_lower,
u32 found_upper)
{
if (found_lower < seq->lower)
@@ -79,7 +82,7 @@ void tipc_sub_report_overlap(struct tipc_subscription *sub,
{
struct tipc_subscr *s = &sub->evt.s;
u32 filter = tipc_sub_read(s, filter);
- struct tipc_name_seq seq;
+ struct tipc_service_range seq;
seq.type = tipc_sub_read(s, seq.type);
seq.lower = tipc_sub_read(s, seq.lower);
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 6ebbec1bedd1..3ded27391d54 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -3,6 +3,7 @@
*
* Copyright (c) 2003-2017, Ericsson AB
* Copyright (c) 2005-2007, 2012-2013, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,12 +48,15 @@ struct tipc_conn;
/**
* struct tipc_subscription - TIPC network topology subscription object
- * @subscriber: pointer to its subscriber
- * @seq: name sequence associated with subscription
+ * @kref: reference count for this subscription
+ * @net: network namespace associated with subscription
* @timer: timer governing subscription duration (optional)
- * @nameseq_list: adjacent subscriptions in name sequence's subscription list
+ * @service_list: adjacent subscriptions in name sequence's subscription list
* @sub_list: adjacent subscriptions in subscriber's subscription list
* @evt: template for events generated by subscription
+ * @conid: connection identifier of topology server
+ * @inactive: true if this subscription is inactive
+ * @lock: serialize up/down and timer events
*/
struct tipc_subscription {
struct kref kref;
@@ -63,7 +67,7 @@ struct tipc_subscription {
struct tipc_event evt;
int conid;
bool inactive;
- spinlock_t lock; /* serialize up/down and timer events */
+ spinlock_t lock;
};
struct tipc_subscription *tipc_sub_subscribe(struct net *net,
@@ -71,8 +75,8 @@ struct tipc_subscription *tipc_sub_subscribe(struct net *net,
int conid);
void tipc_sub_unsubscribe(struct tipc_subscription *sub);
-int tipc_sub_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
- u32 found_upper);
+int tipc_sub_check_overlap(struct tipc_service_range *seq,
+ u32 found_lower, u32 found_upper);
void tipc_sub_report_overlap(struct tipc_subscription *sub,
u32 found_lower, u32 found_upper,
u32 event, u32 port, u32 node,
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 13f3143609f9..5522865deae9 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -519,13 +519,13 @@ static int tipc_topsrv_create_listener(struct tipc_topsrv *srv)
goto err;
saddr.family = AF_TIPC;
- saddr.addrtype = TIPC_ADDR_NAMESEQ;
- saddr.addr.nameseq.type = TIPC_TOP_SRV;
+ saddr.addrtype = TIPC_SERVICE_RANGE;
+ saddr.addr.nameseq.type = TIPC_TOP_SRV;
saddr.addr.nameseq.lower = TIPC_TOP_SRV;
saddr.addr.nameseq.upper = TIPC_TOP_SRV;
saddr.scope = TIPC_NODE_SCOPE;
- rc = kernel_bind(lsock, (struct sockaddr *)&saddr, sizeof(saddr));
+ rc = tipc_sk_bind(lsock, (struct sockaddr *)&saddr, sizeof(saddr));
if (rc < 0)
goto err;
rc = kernel_listen(lsock, 0);
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
index 265f6a26aa3d..7d2931521e0e 100644
--- a/net/tipc/trace.c
+++ b/net/tipc/trace.c
@@ -36,7 +36,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-/**
+/*
* socket tuples for filtering in socket traces:
* (portid, sock type, name type, name lower, name upper)
*/
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 1d17f4470ee2..21e75e28e86a 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -64,6 +64,11 @@
*
* This is the bearer level originating address used in neighbor discovery
* messages, and all fields should be in network byte order
+ *
+ * @proto: Ethernet protocol in use
+ * @port: port being used
+ * @ipv4: IPv4 address of neighbor
+ * @ipv6: IPv6 address of neighbor
*/
struct udp_media_addr {
__be16 proto;
@@ -88,6 +93,7 @@ struct udp_replicast {
* @ubsock: bearer associated socket
* @ifindex: local address scope
* @work: used to schedule deferred work on a bearer
+ * @rcast: associated udp_replicast container
*/
struct udp_bearer {
struct tipc_bearer __rcu *bearer;
@@ -772,7 +778,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (err)
goto free;
- /**
+ /*
* The bcast media address port is used for all peers and the ip
* is used if it's a multicast address.
*/
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index a3ab2d3d4e4e..f7fb7d2c1de1 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -327,7 +327,7 @@ static int tls_device_record_close(struct sock *sk,
/* fill prepend */
tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
record->len - prot->overhead_size,
- record_type, prot->version);
+ record_type);
return ret;
}
@@ -998,7 +998,7 @@ static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
- u16 nonce_size, tag_size, iv_size, rec_seq_size;
+ u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_record_info *start_marker_record;
@@ -1039,6 +1039,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
rec_seq =
((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
break;
@@ -1059,6 +1060,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
prot->tag_size = tag_size;
prot->overhead_size = prot->prepend_size + prot->tag_size;
prot->iv_size = iv_size;
+ prot->salt_size = salt_size;
ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
GFP_KERNEL);
if (!ctx->tx.iv) {
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 28895333701e..d946817ed065 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -49,7 +49,8 @@ static int tls_enc_record(struct aead_request *aead_req,
struct crypto_aead *aead, char *aad,
char *iv, __be64 rcd_sn,
struct scatter_walk *in,
- struct scatter_walk *out, int *in_len)
+ struct scatter_walk *out, int *in_len,
+ struct tls_prot_info *prot)
{
unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
struct scatterlist sg_in[3];
@@ -73,8 +74,7 @@ static int tls_enc_record(struct aead_request *aead_req,
len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
- (char *)&rcd_sn, sizeof(rcd_sn), buf[0],
- TLS_1_2_VERSION);
+ (char *)&rcd_sn, buf[0], prot);
memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
TLS_CIPHER_AES_GCM_128_IV_SIZE);
@@ -140,7 +140,7 @@ static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
static int tls_enc_records(struct aead_request *aead_req,
struct crypto_aead *aead, struct scatterlist *sg_in,
struct scatterlist *sg_out, char *aad, char *iv,
- u64 rcd_sn, int len)
+ u64 rcd_sn, int len, struct tls_prot_info *prot)
{
struct scatter_walk out, in;
int rc;
@@ -150,7 +150,7 @@ static int tls_enc_records(struct aead_request *aead_req,
do {
rc = tls_enc_record(aead_req, aead, aad, iv,
- cpu_to_be64(rcd_sn), &in, &out, &len);
+ cpu_to_be64(rcd_sn), &in, &out, &len, prot);
rcd_sn++;
} while (rc == 0 && len);
@@ -348,7 +348,8 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
payload_len, sync_size, dummy_buf);
if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
- rcd_sn, sync_size + payload_len) < 0)
+ rcd_sn, sync_size + payload_len,
+ &tls_ctx->prot_info) < 0)
goto free_nskb;
complete_skb(nskb, skb, tcp_payload_offset);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 8d93cea99f2c..47b7c5334c34 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -521,6 +521,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
case TLS_CIPHER_AES_CCM_128:
optsize = sizeof(struct tls12_crypto_info_aes_ccm_128);
break;
+ case TLS_CIPHER_CHACHA20_POLY1305:
+ optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
+ break;
default:
rc = -EINVAL;
goto err_crypto_info;
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
index 3a5dd1e07233..feeceb0e4cb4 100644
--- a/net/tls/tls_proc.c
+++ b/net/tls/tls_proc.c
@@ -37,9 +37,12 @@ static int tls_statistics_seq_show(struct seq_file *seq, void *v)
int __net_init tls_proc_init(struct net *net)
{
+#ifdef CONFIG_PROC_FS
if (!proc_create_net_single("tls_stat", 0444, net->proc_net,
tls_statistics_seq_show, NULL))
return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
+
return 0;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 845c628ac1b2..01d933ae5f16 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -505,7 +505,7 @@ static int tls_do_encryption(struct sock *sk,
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
- xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
+ xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -748,14 +748,13 @@ static int tls_push_record(struct sock *sk, int flags,
sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
- tls_ctx->tx.rec_seq, prot->rec_seq_size,
- record_type, prot->version);
+ tls_ctx->tx.rec_seq, record_type, prot);
tls_fill_prepend(tls_ctx,
page_address(sg_page(&msg_en->sg.data[i])) +
msg_en->sg.data[i].offset,
msg_pl->sg.size + prot->tail_size,
- record_type, prot->version);
+ record_type);
tls_ctx->pending_open_record_frags = false;
@@ -1471,19 +1470,19 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
kfree(mem);
return err;
}
- if (prot->version == TLS_1_3_VERSION)
+ if (prot->version == TLS_1_3_VERSION ||
+ prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
memcpy(iv + iv_offset, tls_ctx->rx.iv,
crypto_aead_ivsize(ctx->aead_recv));
else
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
- xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
+ xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
prot->tail_size,
- tls_ctx->rx.rec_seq, prot->rec_seq_size,
- ctx->control, prot->version);
+ tls_ctx->rx.rec_seq, ctx->control, prot);
/* Prepare sgin */
sg_init_table(sgin, n_sgin);
@@ -2076,7 +2075,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
data_len = ((header[4] & 0xFF) | (header[3] << 8));
cipher_overhead = prot->tag_size;
- if (prot->version != TLS_1_3_VERSION)
+ if (prot->version != TLS_1_3_VERSION &&
+ prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
cipher_overhead += prot->iv_size;
if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
@@ -2296,6 +2296,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
+ struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
struct tls_sw_context_rx *sw_ctx_rx = NULL;
struct cipher_context *cctx;
@@ -2408,6 +2409,21 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
cipher_name = "ccm(aes)";
break;
}
+ case TLS_CIPHER_CHACHA20_POLY1305: {
+ chacha20_poly1305_info = (void *)crypto_info;
+ nonce_size = 0;
+ tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
+ iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
+ iv = chacha20_poly1305_info->iv;
+ rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
+ rec_seq = chacha20_poly1305_info->rec_seq;
+ keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
+ key = chacha20_poly1305_info->key;
+ salt = chacha20_poly1305_info->salt;
+ salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
+ cipher_name = "rfc7539(chacha20,poly1305)";
+ break;
+ }
default:
rc = -EINVAL;
goto free_priv;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index d10916ab4526..b12d3a322242 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -421,7 +421,8 @@ static void vsock_deassign_transport(struct vsock_sock *vsk)
* The vsk->remote_addr is used to decide which transport to use:
* - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
* g2h is not loaded, will use local transport;
- * - remote CID <= VMADDR_CID_HOST will use guest->host transport;
+ * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
+ * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
* - remote CID > VMADDR_CID_HOST will use host->guest transport;
*/
int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
@@ -429,8 +430,23 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
const struct vsock_transport *new_transport;
struct sock *sk = sk_vsock(vsk);
unsigned int remote_cid = vsk->remote_addr.svm_cid;
+ __u8 remote_flags;
int ret;
+ /* If the packet is coming with the source and destination CIDs higher
+ * than VMADDR_CID_HOST, then a vsock channel where all the packets are
+ * forwarded to the host should be established. Then the host will
+ * need to forward the packets to the guest.
+ *
+ * The flag is set on the (listen) receive path (psk is not NULL). On
+ * the connect path the flag can be set by the user space application.
+ */
+ if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
+ vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
+ vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
+
+ remote_flags = vsk->remote_addr.svm_flags;
+
switch (sk->sk_type) {
case SOCK_DGRAM:
new_transport = transport_dgram;
@@ -438,7 +454,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
case SOCK_STREAM:
if (vsock_use_local_transport(remote_cid))
new_transport = transport_local;
- else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g)
+ else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
+ (remote_flags & VMADDR_FLAG_TO_HOST))
new_transport = transport_g2h;
else
new_transport = transport_h2g;
@@ -2072,8 +2089,7 @@ static long vsock_dev_do_ioctl(struct file *filp,
break;
default:
- pr_err("Unknown ioctl %d\n", cmd);
- retval = -EINVAL;
+ retval = -ENOIOCTLCMD;
}
return retval;
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
index 909de26cb0e7..223b9660a759 100644
--- a/net/vmw_vsock/vsock_addr.c
+++ b/net/vmw_vsock/vsock_addr.c
@@ -22,13 +22,15 @@ EXPORT_SYMBOL_GPL(vsock_addr_init);
int vsock_addr_validate(const struct sockaddr_vm *addr)
{
+ __u8 svm_valid_flags = VMADDR_FLAG_TO_HOST;
+
if (!addr)
return -EFAULT;
if (addr->svm_family != AF_VSOCK)
return -EAFNOSUPPORT;
- if (addr->svm_zero[0] != 0)
+ if (addr->svm_flags & ~svm_valid_flags)
return -EINVAL;
return 0;
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
deleted file mode 100644
index d13762bc4abc..000000000000
--- a/net/wimax/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# WiMAX LAN device configuration
-#
-
-menuconfig WIMAX
- tristate "WiMAX Wireless Broadband support"
- depends on RFKILL || !RFKILL
- help
-
- Select to configure support for devices that provide
- wireless broadband connectivity using the WiMAX protocol
- (IEEE 802.16).
-
- Please note that most of these devices require signing up
- for a service plan with a provider.
-
- The different WiMAX drivers can be enabled in the menu entry
-
- Device Drivers > Network device support > WiMAX Wireless
- Broadband devices
-
- If unsure, it is safe to select M (module).
-
-config WIMAX_DEBUG_LEVEL
- int "WiMAX debug level"
- depends on WIMAX
- default 8
- help
-
- Select the maximum debug verbosity level to be compiled into
- the WiMAX stack code.
-
- By default, debug messages are disabled at runtime and can
- be selectively enabled for different parts of the code using
- the sysfs debug-levels file.
-
- If set at zero, this will compile out all the debug code.
-
- It is recommended that it is left at 8.
diff --git a/net/wimax/Makefile b/net/wimax/Makefile
deleted file mode 100644
index c2a71ae487ac..000000000000
--- a/net/wimax/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_WIMAX) += wimax.o
-
-wimax-y := \
- id-table.o \
- op-msg.o \
- op-reset.o \
- op-rfkill.o \
- op-state-get.o \
- stack.o
-
-wimax-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h
deleted file mode 100644
index ebc287cde336..000000000000
--- a/net/wimax/debug-levels.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Linux WiMAX Stack
- * Debug levels control file for the wimax module
- *
- * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- */
-#ifndef __debug_levels__h__
-#define __debug_levels__h__
-
-/* Maximum compile and run time debug level for all submodules */
-#define D_MODULENAME wimax
-#define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL
-
-#include <linux/wimax/debug.h>
-
-/* List of all the enabled modules */
-enum d_module {
- D_SUBMODULE_DECLARE(debugfs),
- D_SUBMODULE_DECLARE(id_table),
- D_SUBMODULE_DECLARE(op_msg),
- D_SUBMODULE_DECLARE(op_reset),
- D_SUBMODULE_DECLARE(op_rfkill),
- D_SUBMODULE_DECLARE(op_state_get),
- D_SUBMODULE_DECLARE(stack),
-};
-
-#endif /* #ifndef __debug_levels__h__ */
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
deleted file mode 100644
index 3c54bb6b925a..000000000000
--- a/net/wimax/debugfs.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Linux WiMAX
- * Debugfs support
- *
- * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- */
-#include <linux/debugfs.h>
-#include <linux/wimax.h>
-#include "wimax-internal.h"
-
-#define D_SUBMODULE debugfs
-#include "debug-levels.h"
-
-void wimax_debugfs_add(struct wimax_dev *wimax_dev)
-{
- struct net_device *net_dev = wimax_dev->net_dev;
- struct dentry *dentry;
- char buf[128];
-
- snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name);
- dentry = debugfs_create_dir(buf, NULL);
- wimax_dev->debugfs_dentry = dentry;
-
- d_level_register_debugfs("wimax_dl_", debugfs, dentry);
- d_level_register_debugfs("wimax_dl_", id_table, dentry);
- d_level_register_debugfs("wimax_dl_", op_msg, dentry);
- d_level_register_debugfs("wimax_dl_", op_reset, dentry);
- d_level_register_debugfs("wimax_dl_", op_rfkill, dentry);
- d_level_register_debugfs("wimax_dl_", op_state_get, dentry);
- d_level_register_debugfs("wimax_dl_", stack, dentry);
-}
-
-void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
-{
- debugfs_remove_recursive(wimax_dev->debugfs_dentry);
-}
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
deleted file mode 100644
index 02eee37b7e31..000000000000
--- a/net/wimax/id-table.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Linux WiMAX
- * Mappping of generic netlink family IDs to net devices
- *
- * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * We assign a single generic netlink family ID to each device (to
- * simplify lookup).
- *
- * We need a way to map family ID to a wimax_dev pointer.
- *
- * The idea is to use a very simple lookup. Using a netlink attribute
- * with (for example) the interface name implies a heavier search over
- * all the network devices; seemed kind of a waste given that we know
- * we are looking for a WiMAX device and that most systems will have
- * just a single WiMAX adapter.
- *
- * We put all the WiMAX devices in the system in a linked list and
- * match the generic link family ID against the list.
- *
- * By using a linked list, the case of a single adapter in the system
- * becomes (almost) no overhead, while still working for many more. If
- * it ever goes beyond two, I'll be surprised.
- */
-#include <linux/device.h>
-#include <net/genetlink.h>
-#include <linux/netdevice.h>
-#include <linux/list.h>
-#include <linux/wimax.h>
-#include "wimax-internal.h"
-
-
-#define D_SUBMODULE id_table
-#include "debug-levels.h"
-
-
-static DEFINE_SPINLOCK(wimax_id_table_lock);
-static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table);
-
-
-/*
- * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping
- *
- * @wimax_dev: WiMAX device descriptor to associate to the Generic
- * Netlink family ID.
- *
- * Look for an empty spot in the ID table; if none found, double the
- * table's size and get the first spot.
- */
-void wimax_id_table_add(struct wimax_dev *wimax_dev)
-{
- d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
- spin_lock(&wimax_id_table_lock);
- list_add(&wimax_dev->id_table_node, &wimax_id_table);
- spin_unlock(&wimax_id_table_lock);
- d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev);
-}
-
-
-/*
- * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info
- *
- * The generic netlink family ID has been filled out in the
- * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in
- * the mapping table and reference the wimax_dev.
- *
- * When done, the reference should be dropped with
- * 'dev_put(wimax_dev->net_dev)'.
- */
-struct wimax_dev *wimax_dev_get_by_genl_info(
- struct genl_info *info, int ifindex)
-{
- struct wimax_dev *wimax_dev = NULL;
-
- d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex);
- spin_lock(&wimax_id_table_lock);
- list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
- if (wimax_dev->net_dev->ifindex == ifindex) {
- dev_hold(wimax_dev->net_dev);
- goto found;
- }
- }
- wimax_dev = NULL;
- d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
- ifindex);
-found:
- spin_unlock(&wimax_id_table_lock);
- d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
- info, ifindex, wimax_dev);
- return wimax_dev;
-}
-
-
-/*
- * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping
- *
- * @id: family ID to remove from the table
- */
-void wimax_id_table_rm(struct wimax_dev *wimax_dev)
-{
- spin_lock(&wimax_id_table_lock);
- list_del_init(&wimax_dev->id_table_node);
- spin_unlock(&wimax_id_table_lock);
-}
-
-
-/*
- * Release the gennetlink family id / mapping table
- *
- * On debug, verify that the table is empty upon removal. We want the
- * code always compiled, to ensure it doesn't bit rot. It will be
- * compiled out if CONFIG_BUG is disabled.
- */
-void wimax_id_table_release(void)
-{
- struct wimax_dev *wimax_dev;
-
-#ifndef CONFIG_BUG
- return;
-#endif
- spin_lock(&wimax_id_table_lock);
- list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
- pr_err("BUG: %s wimax_dev %p ifindex %d not cleared\n",
- __func__, wimax_dev, wimax_dev->net_dev->ifindex);
- WARN_ON(1);
- }
- spin_unlock(&wimax_id_table_lock);
-}
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
deleted file mode 100644
index 6460b5785758..000000000000
--- a/net/wimax/op-msg.c
+++ /dev/null
@@ -1,391 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Linux WiMAX
- * Generic messaging interface between userspace and driver/device
- *
- * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This implements a direct communication channel between user space and
- * the driver/device, by which free form messages can be sent back and
- * forth.
- *
- * This is intended for device-specific features, vendor quirks, etc.
- *
- * See include/net/wimax.h
- *
- * GENERIC NETLINK ENCODING AND CAPACITY
- *
- * A destination "pipe name" is added to each message; it is up to the
- * drivers to assign or use those names (if using them at all).
- *
- * Messages are encoded as a binary netlink attribute using nla_put()
- * using type NLA_UNSPEC (as some versions of libnl still in
- * deployment don't yet understand NLA_BINARY).
- *
- * The maximum capacity of this transport is PAGESIZE per message (so
- * the actual payload will be bit smaller depending on the
- * netlink/generic netlink attributes and headers).
- *
- * RECEPTION OF MESSAGES
- *
- * When a message is received from user space, it is passed verbatim
- * to the driver calling wimax_dev->op_msg_from_user(). The return
- * value from this function is passed back to user space as an ack
- * over the generic netlink protocol.
- *
- * The stack doesn't do any processing or interpretation of these
- * messages.
- *
- * SENDING MESSAGES
- *
- * Messages can be sent with wimax_msg().
- *
- * If the message delivery needs to happen on a different context to
- * that of its creation, wimax_msg_alloc() can be used to get a
- * pointer to the message that can be delivered later on with
- * wimax_msg_send().
- *
- * ROADMAP
- *
- * wimax_gnl_doit_msg_from_user() Process a message from user space
- * wimax_dev_get_by_genl_info()
- * wimax_dev->op_msg_from_user() Delivery of message to the driver
- *
- * wimax_msg() Send a message to user space
- * wimax_msg_alloc()
- * wimax_msg_send()
- */
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <net/genetlink.h>
-#include <linux/netdevice.h>
-#include <linux/wimax.h>
-#include <linux/security.h>
-#include <linux/export.h>
-#include "wimax-internal.h"
-
-
-#define D_SUBMODULE op_msg
-#include "debug-levels.h"
-
-
-/**
- * wimax_msg_alloc - Create a new skb for sending a message to userspace
- *
- * @wimax_dev: WiMAX device descriptor
- * @pipe_name: "named pipe" the message will be sent to
- * @msg: pointer to the message data to send
- * @size: size of the message to send (in bytes), including the header.
- * @gfp_flags: flags for memory allocation.
- *
- * Returns: %0 if ok, negative errno code on error
- *
- * Description:
- *
- * Allocates an skb that will contain the message to send to user
- * space over the messaging pipe and initializes it, copying the
- * payload.
- *
- * Once this call is done, you can deliver it with
- * wimax_msg_send().
- *
- * IMPORTANT:
- *
- * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
- * wimax_msg_send() depends on skb->data being placed at the
- * beginning of the user message.
- *
- * Unlike other WiMAX stack calls, this call can be used way early,
- * even before wimax_dev_add() is called, as long as the
- * wimax_dev->net_dev pointer is set to point to a proper
- * net_dev. This is so that drivers can use it early in case they need
- * to send stuff around or communicate with user space.
- */
-struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
- const char *pipe_name,
- const void *msg, size_t size,
- gfp_t gfp_flags)
-{
- int result;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- size_t msg_size;
- void *genl_msg;
- struct sk_buff *skb;
-
- msg_size = nla_total_size(size)
- + nla_total_size(sizeof(u32))
- + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0);
- result = -ENOMEM;
- skb = genlmsg_new(msg_size, gfp_flags);
- if (skb == NULL)
- goto error_new;
- genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family,
- 0, WIMAX_GNL_OP_MSG_TO_USER);
- if (genl_msg == NULL) {
- dev_err(dev, "no memory to create generic netlink message\n");
- goto error_genlmsg_put;
- }
- result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX,
- wimax_dev->net_dev->ifindex);
- if (result < 0) {
- dev_err(dev, "no memory to add ifindex attribute\n");
- goto error_nla_put;
- }
- if (pipe_name) {
- result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME,
- pipe_name);
- if (result < 0) {
- dev_err(dev, "no memory to add pipe_name attribute\n");
- goto error_nla_put;
- }
- }
- result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
- if (result < 0) {
- dev_err(dev, "no memory to add payload (msg %p size %zu) in "
- "attribute: %d\n", msg, size, result);
- goto error_nla_put;
- }
- genlmsg_end(skb, genl_msg);
- return skb;
-
-error_nla_put:
-error_genlmsg_put:
-error_new:
- nlmsg_free(skb);
- return ERR_PTR(result);
-}
-EXPORT_SYMBOL_GPL(wimax_msg_alloc);
-
-
-/**
- * wimax_msg_data_len - Return a pointer and size of a message's payload
- *
- * @msg: Pointer to a message created with wimax_msg_alloc()
- * @size: Pointer to where to store the message's size
- *
- * Returns the pointer to the message data.
- */
-const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size)
-{
- struct nlmsghdr *nlh = (void *) msg->head;
- struct nlattr *nla;
-
- nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
- WIMAX_GNL_MSG_DATA);
- if (nla == NULL) {
- pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
- return NULL;
- }
- *size = nla_len(nla);
- return nla_data(nla);
-}
-EXPORT_SYMBOL_GPL(wimax_msg_data_len);
-
-
-/**
- * wimax_msg_data - Return a pointer to a message's payload
- *
- * @msg: Pointer to a message created with wimax_msg_alloc()
- */
-const void *wimax_msg_data(struct sk_buff *msg)
-{
- struct nlmsghdr *nlh = (void *) msg->head;
- struct nlattr *nla;
-
- nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
- WIMAX_GNL_MSG_DATA);
- if (nla == NULL) {
- pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
- return NULL;
- }
- return nla_data(nla);
-}
-EXPORT_SYMBOL_GPL(wimax_msg_data);
-
-
-/**
- * wimax_msg_len - Return a message's payload length
- *
- * @msg: Pointer to a message created with wimax_msg_alloc()
- */
-ssize_t wimax_msg_len(struct sk_buff *msg)
-{
- struct nlmsghdr *nlh = (void *) msg->head;
- struct nlattr *nla;
-
- nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
- WIMAX_GNL_MSG_DATA);
- if (nla == NULL) {
- pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
- return -EINVAL;
- }
- return nla_len(nla);
-}
-EXPORT_SYMBOL_GPL(wimax_msg_len);
-
-
-/**
- * wimax_msg_send - Send a pre-allocated message to user space
- *
- * @wimax_dev: WiMAX device descriptor
- *
- * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the
- * ownership of @skb is transferred to this function.
- *
- * Returns: 0 if ok, < 0 errno code on error
- *
- * Description:
- *
- * Sends a free-form message that was preallocated with
- * wimax_msg_alloc() and filled up.
- *
- * Assumes that once you pass an skb to this function for sending, it
- * owns it and will release it when done (on success).
- *
- * IMPORTANT:
- *
- * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
- * wimax_msg_send() depends on skb->data being placed at the
- * beginning of the user message.
- *
- * Unlike other WiMAX stack calls, this call can be used way early,
- * even before wimax_dev_add() is called, as long as the
- * wimax_dev->net_dev pointer is set to point to a proper
- * net_dev. This is so that drivers can use it early in case they need
- * to send stuff around or communicate with user space.
- */
-int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
-{
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- void *msg = skb->data;
- size_t size = skb->len;
- might_sleep();
-
- d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
- d_dump(2, dev, msg, size);
- genlmsg_multicast(&wimax_gnl_family, skb, 0, 0, GFP_KERNEL);
- d_printf(1, dev, "CTX: genl multicast done\n");
- return 0;
-}
-EXPORT_SYMBOL_GPL(wimax_msg_send);
-
-
-/**
- * wimax_msg - Send a message to user space
- *
- * @wimax_dev: WiMAX device descriptor (properly referenced)
- * @pipe_name: "named pipe" the message will be sent to
- * @buf: pointer to the message to send.
- * @size: size of the buffer pointed to by @buf (in bytes).
- * @gfp_flags: flags for memory allocation.
- *
- * Returns: %0 if ok, negative errno code on error.
- *
- * Description:
- *
- * Sends a free-form message to user space on the device @wimax_dev.
- *
- * NOTES:
- *
- * Once the @skb is given to this function, who will own it and will
- * release it when done (unless it returns error).
- */
-int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
- const void *buf, size_t size, gfp_t gfp_flags)
-{
- int result = -ENOMEM;
- struct sk_buff *skb;
-
- skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
- if (IS_ERR(skb))
- result = PTR_ERR(skb);
- else
- result = wimax_msg_send(wimax_dev, skb);
- return result;
-}
-EXPORT_SYMBOL_GPL(wimax_msg);
-
-/*
- * Relays a message from user space to the driver
- *
- * The skb is passed to the driver-specific function with the netlink
- * and generic netlink headers already stripped.
- *
- * This call will block while handling/relaying the message.
- */
-int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
-{
- int result, ifindex;
- struct wimax_dev *wimax_dev;
- struct device *dev;
- struct nlmsghdr *nlh = info->nlhdr;
- char *pipe_name;
- void *msg_buf;
- size_t msg_len;
-
- might_sleep();
- d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
- result = -ENODEV;
- if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) {
- pr_err("WIMAX_GNL_MSG_FROM_USER: can't find IFIDX attribute\n");
- goto error_no_wimax_dev;
- }
- ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]);
- wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
- if (wimax_dev == NULL)
- goto error_no_wimax_dev;
- dev = wimax_dev_to_dev(wimax_dev);
-
- /* Unpack arguments */
- result = -EINVAL;
- if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) {
- dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA "
- "attribute\n");
- goto error_no_data;
- }
- msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]);
- msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]);
-
- if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL)
- pipe_name = NULL;
- else {
- struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME];
- size_t attr_len = nla_len(attr);
- /* libnl-1.1 does not yet support NLA_NUL_STRING */
- result = -ENOMEM;
- pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL);
- if (pipe_name == NULL)
- goto error_alloc;
- pipe_name[attr_len] = 0;
- }
- mutex_lock(&wimax_dev->mutex);
- result = wimax_dev_is_ready(wimax_dev);
- if (result == -ENOMEDIUM)
- result = 0;
- if (result < 0)
- goto error_not_ready;
- result = -ENOSYS;
- if (wimax_dev->op_msg_from_user == NULL)
- goto error_noop;
-
- d_printf(1, dev,
- "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n",
- nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags,
- nlh->nlmsg_seq, nlh->nlmsg_pid);
- d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len);
- d_dump(2, dev, msg_buf, msg_len);
-
- result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name,
- msg_buf, msg_len, info);
-error_noop:
-error_not_ready:
- mutex_unlock(&wimax_dev->mutex);
-error_alloc:
- kfree(pipe_name);
-error_no_data:
- dev_put(wimax_dev->net_dev);
-error_no_wimax_dev:
- d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
- return result;
-}
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
deleted file mode 100644
index 9899b2e56721..000000000000
--- a/net/wimax/op-reset.c
+++ /dev/null
@@ -1,108 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Linux WiMAX
- * Implement and export a method for resetting a WiMAX device
- *
- * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This implements a simple synchronous call to reset a WiMAX device.
- *
- * Resets aim at being warm, keeping the device handles active;
- * however, when that fails, it falls back to a cold reset (that will
- * disconnect and reconnect the device).
- */
-
-#include <net/wimax.h>
-#include <net/genetlink.h>
-#include <linux/wimax.h>
-#include <linux/security.h>
-#include <linux/export.h>
-#include "wimax-internal.h"
-
-#define D_SUBMODULE op_reset
-#include "debug-levels.h"
-
-
-/**
- * wimax_reset - Reset a WiMAX device
- *
- * @wimax_dev: WiMAX device descriptor
- *
- * Returns:
- *
- * %0 if ok and a warm reset was done (the device still exists in
- * the system).
- *
- * -%ENODEV if a cold/bus reset had to be done (device has
- * disconnected and reconnected, so current handle is not valid
- * any more).
- *
- * -%EINVAL if the device is not even registered.
- *
- * Any other negative error code shall be considered as
- * non-recoverable.
- *
- * Description:
- *
- * Called when wanting to reset the device for any reason. Device is
- * taken back to power on status.
- *
- * This call blocks; on successful return, the device has completed the
- * reset process and is ready to operate.
- */
-int wimax_reset(struct wimax_dev *wimax_dev)
-{
- int result = -EINVAL;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- enum wimax_st state;
-
- might_sleep();
- d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
- mutex_lock(&wimax_dev->mutex);
- dev_hold(wimax_dev->net_dev);
- state = wimax_dev->state;
- mutex_unlock(&wimax_dev->mutex);
-
- if (state >= WIMAX_ST_DOWN) {
- mutex_lock(&wimax_dev->mutex_reset);
- result = wimax_dev->op_reset(wimax_dev);
- mutex_unlock(&wimax_dev->mutex_reset);
- }
- dev_put(wimax_dev->net_dev);
-
- d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
- return result;
-}
-EXPORT_SYMBOL(wimax_reset);
-
-
-/*
- * Exporting to user space over generic netlink
- *
- * Parse the reset command from user space, return error code.
- *
- * No attributes.
- */
-int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
-{
- int result, ifindex;
- struct wimax_dev *wimax_dev;
-
- d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
- result = -ENODEV;
- if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) {
- pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
- goto error_no_wimax_dev;
- }
- ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]);
- wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
- if (wimax_dev == NULL)
- goto error_no_wimax_dev;
- /* Execute the operation and send the result back to user space */
- result = wimax_reset(wimax_dev);
- dev_put(wimax_dev->net_dev);
-error_no_wimax_dev:
- d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
- return result;
-}
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
deleted file mode 100644
index 248d10b60b05..000000000000
--- a/net/wimax/op-rfkill.c
+++ /dev/null
@@ -1,431 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Linux WiMAX
- * RF-kill framework integration
- *
- * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This integrates into the Linux Kernel rfkill susbystem so that the
- * drivers just have to do the bare minimal work, which is providing a
- * method to set the software RF-Kill switch and to report changes in
- * the software and hardware switch status.
- *
- * A non-polled generic rfkill device is embedded into the WiMAX
- * subsystem's representation of a device.
- *
- * FIXME: Need polled support? Let drivers provide a poll routine
- * and hand it to rfkill ops then?
- *
- * All device drivers have to do is after wimax_dev_init(), call
- * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
- * initial state and then every time it changes. See wimax.h:struct
- * wimax_dev for more information.
- *
- * ROADMAP
- *
- * wimax_gnl_doit_rfkill() User space calling wimax_rfkill()
- * wimax_rfkill() Kernel calling wimax_rfkill()
- * __wimax_rf_toggle_radio()
- *
- * wimax_rfkill_set_radio_block() RF-Kill subsystem calling
- * __wimax_rf_toggle_radio()
- *
- * __wimax_rf_toggle_radio()
- * wimax_dev->op_rfkill_sw_toggle() Driver backend
- * __wimax_state_change()
- *
- * wimax_report_rfkill_sw() Driver reports state change
- * __wimax_state_change()
- *
- * wimax_report_rfkill_hw() Driver reports state change
- * __wimax_state_change()
- *
- * wimax_rfkill_add() Initialize/shutdown rfkill support
- * wimax_rfkill_rm() [called by wimax_dev_add/rm()]
- */
-
-#include <net/wimax.h>
-#include <net/genetlink.h>
-#include <linux/wimax.h>
-#include <linux/security.h>
-#include <linux/rfkill.h>
-#include <linux/export.h>
-#include "wimax-internal.h"
-
-#define D_SUBMODULE op_rfkill
-#include "debug-levels.h"
-
-/**
- * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
- *
- * @wimax_dev: WiMAX device descriptor
- *
- * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on,
- * %WIMAX_RF_OFF radio off.
- *
- * When the device detects a change in the state of thehardware RF
- * switch, it must call this function to let the WiMAX kernel stack
- * know that the state has changed so it can be properly propagated.
- *
- * The WiMAX stack caches the state (the driver doesn't need to). As
- * well, as the change is propagated it will come back as a request to
- * change the software state to mirror the hardware state.
- *
- * If the device doesn't have a hardware kill switch, just report
- * it on initialization as always on (%WIMAX_RF_ON, radio on).
- */
-void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
- enum wimax_rf_state state)
-{
- int result;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- enum wimax_st wimax_state;
-
- d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
- BUG_ON(state == WIMAX_RF_QUERY);
- BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
-
- mutex_lock(&wimax_dev->mutex);
- result = wimax_dev_is_ready(wimax_dev);
- if (result < 0)
- goto error_not_ready;
-
- if (state != wimax_dev->rf_hw) {
- wimax_dev->rf_hw = state;
- if (wimax_dev->rf_hw == WIMAX_RF_ON &&
- wimax_dev->rf_sw == WIMAX_RF_ON)
- wimax_state = WIMAX_ST_READY;
- else
- wimax_state = WIMAX_ST_RADIO_OFF;
-
- result = rfkill_set_hw_state(wimax_dev->rfkill,
- state == WIMAX_RF_OFF);
-
- __wimax_state_change(wimax_dev, wimax_state);
- }
-error_not_ready:
- mutex_unlock(&wimax_dev->mutex);
- d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
- wimax_dev, state, result);
-}
-EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
-
-
-/**
- * wimax_report_rfkill_sw - Reports changes in the software RF switch
- *
- * @wimax_dev: WiMAX device descriptor
- *
- * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
- * %WIMAX_RF_OFF radio off.
- *
- * Reports changes in the software RF switch state to the WiMAX stack.
- *
- * The main use is during initialization, so the driver can query the
- * device for its current software radio kill switch state and feed it
- * to the system.
- *
- * On the side, the device does not change the software state by
- * itself. In practice, this can happen, as the device might decide to
- * switch (in software) the radio off for different reasons.
- */
-void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
- enum wimax_rf_state state)
-{
- int result;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- enum wimax_st wimax_state;
-
- d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
- BUG_ON(state == WIMAX_RF_QUERY);
- BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
-
- mutex_lock(&wimax_dev->mutex);
- result = wimax_dev_is_ready(wimax_dev);
- if (result < 0)
- goto error_not_ready;
-
- if (state != wimax_dev->rf_sw) {
- wimax_dev->rf_sw = state;
- if (wimax_dev->rf_hw == WIMAX_RF_ON &&
- wimax_dev->rf_sw == WIMAX_RF_ON)
- wimax_state = WIMAX_ST_READY;
- else
- wimax_state = WIMAX_ST_RADIO_OFF;
- __wimax_state_change(wimax_dev, wimax_state);
- rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
- }
-error_not_ready:
- mutex_unlock(&wimax_dev->mutex);
- d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
- wimax_dev, state, result);
-}
-EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
-
-
-/*
- * Callback for the RF Kill toggle operation
- *
- * This function is called by:
- *
- * - The rfkill subsystem when the RF-Kill key is pressed in the
- * hardware and the driver notifies through
- * wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back
- * here so the software RF Kill switch state is changed to reflect
- * the hardware switch state.
- *
- * - When the user sets the state through sysfs' rfkill/state file
- *
- * - When the user calls wimax_rfkill().
- *
- * This call blocks!
- *
- * WARNING! When we call rfkill_unregister(), this will be called with
- * state 0!
- *
- * WARNING: wimax_dev must be locked
- */
-static
-int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev,
- enum wimax_rf_state state)
-{
- int result = 0;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- enum wimax_st wimax_state;
-
- might_sleep();
- d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
- if (wimax_dev->rf_sw == state)
- goto out_no_change;
- if (wimax_dev->op_rfkill_sw_toggle != NULL)
- result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state);
- else if (state == WIMAX_RF_OFF) /* No op? can't turn off */
- result = -ENXIO;
- else /* No op? can turn on */
- result = 0; /* should never happen tho */
- if (result >= 0) {
- result = 0;
- wimax_dev->rf_sw = state;
- wimax_state = state == WIMAX_RF_ON ?
- WIMAX_ST_READY : WIMAX_ST_RADIO_OFF;
- __wimax_state_change(wimax_dev, wimax_state);
- }
-out_no_change:
- d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
- wimax_dev, state, result);
- return result;
-}
-
-
-/*
- * Translate from rfkill state to wimax state
- *
- * NOTE: Special state handling rules here
- *
- * Just pretend the call didn't happen if we are in a state where
- * we know for sure it cannot be handled (WIMAX_ST_DOWN or
- * __WIMAX_ST_QUIESCING). rfkill() needs it to register and
- * unregister, as it will run this path.
- *
- * NOTE: This call will block until the operation is completed.
- */
-static int wimax_rfkill_set_radio_block(void *data, bool blocked)
-{
- int result;
- struct wimax_dev *wimax_dev = data;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- enum wimax_rf_state rf_state;
-
- d_fnstart(3, dev, "(wimax_dev %p blocked %u)\n", wimax_dev, blocked);
- rf_state = WIMAX_RF_ON;
- if (blocked)
- rf_state = WIMAX_RF_OFF;
- mutex_lock(&wimax_dev->mutex);
- if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
- result = 0;
- else
- result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
- mutex_unlock(&wimax_dev->mutex);
- d_fnend(3, dev, "(wimax_dev %p blocked %u) = %d\n",
- wimax_dev, blocked, result);
- return result;
-}
-
-static const struct rfkill_ops wimax_rfkill_ops = {
- .set_block = wimax_rfkill_set_radio_block,
-};
-
-/**
- * wimax_rfkill - Set the software RF switch state for a WiMAX device
- *
- * @wimax_dev: WiMAX device descriptor
- *
- * @state: New RF state.
- *
- * Returns:
- *
- * >= 0 toggle state if ok, < 0 errno code on error. The toggle state
- * is returned as a bitmap, bit 0 being the hardware RF state, bit 1
- * the software RF state.
- *
- * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio
- * off (%WIMAX_RF_OFF).
- *
- * Description:
- *
- * Called by the user when he wants to request the WiMAX radio to be
- * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With
- * %WIMAX_RF_QUERY, just the current state is returned.
- *
- * NOTE:
- *
- * This call will block until the operation is complete.
- */
-int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
-{
- int result;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
-
- d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
- mutex_lock(&wimax_dev->mutex);
- result = wimax_dev_is_ready(wimax_dev);
- if (result < 0) {
- /* While initializing, < 1.4.3 wimax-tools versions use
- * this call to check if the device is a valid WiMAX
- * device; so we allow it to proceed always,
- * considering the radios are all off. */
- if (result == -ENOMEDIUM && state == WIMAX_RF_QUERY)
- result = WIMAX_RF_OFF << 1 | WIMAX_RF_OFF;
- goto error_not_ready;
- }
- switch (state) {
- case WIMAX_RF_ON:
- case WIMAX_RF_OFF:
- result = __wimax_rf_toggle_radio(wimax_dev, state);
- if (result < 0)
- goto error;
- rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
- break;
- case WIMAX_RF_QUERY:
- break;
- default:
- result = -EINVAL;
- goto error;
- }
- result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw;
-error:
-error_not_ready:
- mutex_unlock(&wimax_dev->mutex);
- d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
- wimax_dev, state, result);
- return result;
-}
-EXPORT_SYMBOL(wimax_rfkill);
-
-
-/*
- * Register a new WiMAX device's RF Kill support
- *
- * WARNING: wimax_dev->mutex must be unlocked
- */
-int wimax_rfkill_add(struct wimax_dev *wimax_dev)
-{
- int result;
- struct rfkill *rfkill;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
-
- d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
- /* Initialize RF Kill */
- result = -ENOMEM;
- rfkill = rfkill_alloc(wimax_dev->name, dev, RFKILL_TYPE_WIMAX,
- &wimax_rfkill_ops, wimax_dev);
- if (rfkill == NULL)
- goto error_rfkill_allocate;
-
- d_printf(1, dev, "rfkill %p\n", rfkill);
-
- wimax_dev->rfkill = rfkill;
-
- rfkill_init_sw_state(rfkill, 1);
- result = rfkill_register(wimax_dev->rfkill);
- if (result < 0)
- goto error_rfkill_register;
-
- /* If there is no SW toggle op, SW RFKill is always on */
- if (wimax_dev->op_rfkill_sw_toggle == NULL)
- wimax_dev->rf_sw = WIMAX_RF_ON;
-
- d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
- return 0;
-
-error_rfkill_register:
- rfkill_destroy(wimax_dev->rfkill);
-error_rfkill_allocate:
- d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
- return result;
-}
-
-
-/*
- * Deregister a WiMAX device's RF Kill support
- *
- * Ick, we can't call rfkill_free() after rfkill_unregister()...oh
- * well.
- *
- * WARNING: wimax_dev->mutex must be unlocked
- */
-void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
-{
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
- rfkill_unregister(wimax_dev->rfkill);
- rfkill_destroy(wimax_dev->rfkill);
- d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
-}
-
-
-/*
- * Exporting to user space over generic netlink
- *
- * Parse the rfkill command from user space, return a combination
- * value that describe the states of the different toggles.
- *
- * Only one attribute: the new state requested (on, off or no change,
- * just query).
- */
-
-int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
-{
- int result, ifindex;
- struct wimax_dev *wimax_dev;
- struct device *dev;
- enum wimax_rf_state new_state;
-
- d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
- result = -ENODEV;
- if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) {
- pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
- goto error_no_wimax_dev;
- }
- ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]);
- wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
- if (wimax_dev == NULL)
- goto error_no_wimax_dev;
- dev = wimax_dev_to_dev(wimax_dev);
- result = -EINVAL;
- if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) {
- dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE "
- "attribute\n");
- goto error_no_pid;
- }
- new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]);
-
- /* Execute the operation and send the result back to user space */
- result = wimax_rfkill(wimax_dev, new_state);
-error_no_pid:
- dev_put(wimax_dev->net_dev);
-error_no_wimax_dev:
- d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
- return result;
-}
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
deleted file mode 100644
index 5bc712de1563..000000000000
--- a/net/wimax/op-state-get.c
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Linux WiMAX
- * Implement and export a method for getting a WiMAX device current state
- *
- * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * Based on previous WiMAX core work by:
- * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- */
-
-#include <net/wimax.h>
-#include <net/genetlink.h>
-#include <linux/wimax.h>
-#include <linux/security.h>
-#include "wimax-internal.h"
-
-#define D_SUBMODULE op_state_get
-#include "debug-levels.h"
-
-
-/*
- * Exporting to user space over generic netlink
- *
- * Parse the state get command from user space, return a combination
- * value that describe the current state.
- *
- * No attributes.
- */
-int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
-{
- int result, ifindex;
- struct wimax_dev *wimax_dev;
-
- d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
- result = -ENODEV;
- if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
- pr_err("WIMAX_GNL_OP_STATE_GET: can't find IFIDX attribute\n");
- goto error_no_wimax_dev;
- }
- ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
- wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
- if (wimax_dev == NULL)
- goto error_no_wimax_dev;
- /* Execute the operation and send the result back to user space */
- result = wimax_state_get(wimax_dev);
- dev_put(wimax_dev->net_dev);
-error_no_wimax_dev:
- d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
- return result;
-}
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
deleted file mode 100644
index b6dd9d956ed8..000000000000
--- a/net/wimax/stack.c
+++ /dev/null
@@ -1,609 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Linux WiMAX
- * Initialization, addition and removal of wimax devices
- *
- * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This implements:
- *
- * - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on
- * addition/registration initialize all subfields and allocate
- * generic netlink resources for user space communication. On
- * removal/unregistration, undo all that.
- *
- * - device state machine [wimax_state_change()] and support to send
- * reports to user space when the state changes
- * [wimax_gnl_re_state_change*()].
- *
- * See include/net/wimax.h for rationales and design.
- *
- * ROADMAP
- *
- * [__]wimax_state_change() Called by drivers to update device's state
- * wimax_gnl_re_state_change_alloc()
- * wimax_gnl_re_state_change_send()
- *
- * wimax_dev_init() Init a device
- * wimax_dev_add() Register
- * wimax_rfkill_add()
- * wimax_gnl_add() Register all the generic netlink resources.
- * wimax_id_table_add()
- * wimax_dev_rm() Unregister
- * wimax_id_table_rm()
- * wimax_gnl_rm()
- * wimax_rfkill_rm()
- */
-#include <linux/device.h>
-#include <linux/gfp.h>
-#include <net/genetlink.h>
-#include <linux/netdevice.h>
-#include <linux/wimax.h>
-#include <linux/module.h>
-#include "wimax-internal.h"
-
-
-#define D_SUBMODULE stack
-#include "debug-levels.h"
-
-static char wimax_debug_params[128];
-module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params),
- 0644);
-MODULE_PARM_DESC(debug,
- "String of space-separated NAME:VALUE pairs, where NAMEs "
- "are the different debug submodules and VALUE are the "
- "initial debug value to set.");
-
-/*
- * Authoritative source for the RE_STATE_CHANGE attribute policy
- *
- * We don't really use it here, but /me likes to keep the definition
- * close to where the data is generated.
- */
-/*
-static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
- [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
- [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
-};
-*/
-
-
-/*
- * Allocate a Report State Change message
- *
- * @header: save it, you need it for _send()
- *
- * Creates and fills a basic state change message; different code
- * paths can then add more attributes to the message as needed.
- *
- * Use wimax_gnl_re_state_change_send() to send the returned skb.
- *
- * Returns: skb with the genl message if ok, IS_ERR() ptr on error
- * with an errno code.
- */
-static
-struct sk_buff *wimax_gnl_re_state_change_alloc(
- struct wimax_dev *wimax_dev,
- enum wimax_st new_state, enum wimax_st old_state,
- void **header)
-{
- int result;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- void *data;
- struct sk_buff *report_skb;
-
- d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n",
- wimax_dev, new_state, old_state);
- result = -ENOMEM;
- report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (report_skb == NULL) {
- dev_err(dev, "RE_STCH: can't create message\n");
- goto error_new;
- }
- /* FIXME: sending a group ID as the seq is wrong */
- data = genlmsg_put(report_skb, 0, wimax_gnl_family.mcgrp_offset,
- &wimax_gnl_family, 0, WIMAX_GNL_RE_STATE_CHANGE);
- if (data == NULL) {
- dev_err(dev, "RE_STCH: can't put data into message\n");
- goto error_put;
- }
- *header = data;
-
- result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state);
- if (result < 0) {
- dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result);
- goto error_put;
- }
- result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state);
- if (result < 0) {
- dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result);
- goto error_put;
- }
- result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX,
- wimax_dev->net_dev->ifindex);
- if (result < 0) {
- dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n");
- goto error_put;
- }
- d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n",
- wimax_dev, new_state, old_state, report_skb);
- return report_skb;
-
-error_put:
- nlmsg_free(report_skb);
-error_new:
- d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n",
- wimax_dev, new_state, old_state, result);
- return ERR_PTR(result);
-}
-
-
-/*
- * Send a Report State Change message (as created with _alloc).
- *
- * @report_skb: as returned by wimax_gnl_re_state_change_alloc()
- * @header: as returned by wimax_gnl_re_state_change_alloc()
- *
- * Returns: 0 if ok, < 0 errno code on error.
- *
- * If the message is NULL, pretend it didn't happen.
- */
-static
-int wimax_gnl_re_state_change_send(
- struct wimax_dev *wimax_dev, struct sk_buff *report_skb,
- void *header)
-{
- int result = 0;
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
- wimax_dev, report_skb);
- if (report_skb == NULL) {
- result = -ENOMEM;
- goto out;
- }
- genlmsg_end(report_skb, header);
- genlmsg_multicast(&wimax_gnl_family, report_skb, 0, 0, GFP_KERNEL);
-out:
- d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
- wimax_dev, report_skb, result);
- return result;
-}
-
-
-static
-void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
- unsigned int allowed_states_bm)
-{
- if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
- pr_err("SW BUG! Forbidden state change %u -> %u\n",
- old_state, new_state);
- }
-}
-
-
-/*
- * Set the current state of a WiMAX device [unlocking version of
- * wimax_state_change().
- */
-void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
-{
- struct device *dev = wimax_dev_to_dev(wimax_dev);
- enum wimax_st old_state = wimax_dev->state;
- struct sk_buff *stch_skb;
- void *header;
-
- d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n",
- wimax_dev, new_state, old_state);
-
- if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) {
- dev_err(dev, "SW BUG: requesting invalid state %u\n",
- new_state);
- goto out;
- }
- if (old_state == new_state)
- goto out;
- header = NULL; /* gcc complains? can't grok why */
- stch_skb = wimax_gnl_re_state_change_alloc(
- wimax_dev, new_state, old_state, &header);
-
- /* Verify the state transition and do exit-from-state actions */
- switch (old_state) {
- case __WIMAX_ST_NULL:
- __check_new_state(old_state, new_state,
- 1 << WIMAX_ST_DOWN);
- break;
- case WIMAX_ST_DOWN:
- __check_new_state(old_state, new_state,
- 1 << __WIMAX_ST_QUIESCING
- | 1 << WIMAX_ST_UNINITIALIZED
- | 1 << WIMAX_ST_RADIO_OFF);
- break;
- case __WIMAX_ST_QUIESCING:
- __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN);
- break;
- case WIMAX_ST_UNINITIALIZED:
- __check_new_state(old_state, new_state,
- 1 << __WIMAX_ST_QUIESCING
- | 1 << WIMAX_ST_RADIO_OFF);
- break;
- case WIMAX_ST_RADIO_OFF:
- __check_new_state(old_state, new_state,
- 1 << __WIMAX_ST_QUIESCING
- | 1 << WIMAX_ST_READY);
- break;
- case WIMAX_ST_READY:
- __check_new_state(old_state, new_state,
- 1 << __WIMAX_ST_QUIESCING
- | 1 << WIMAX_ST_RADIO_OFF
- | 1 << WIMAX_ST_SCANNING
- | 1 << WIMAX_ST_CONNECTING
- | 1 << WIMAX_ST_CONNECTED);
- break;
- case WIMAX_ST_SCANNING:
- __check_new_state(old_state, new_state,
- 1 << __WIMAX_ST_QUIESCING
- | 1 << WIMAX_ST_RADIO_OFF
- | 1 << WIMAX_ST_READY
- | 1 << WIMAX_ST_CONNECTING
- | 1 << WIMAX_ST_CONNECTED);
- break;
- case WIMAX_ST_CONNECTING:
- __check_new_state(old_state, new_state,
- 1 << __WIMAX_ST_QUIESCING
- | 1 << WIMAX_ST_RADIO_OFF
- | 1 << WIMAX_ST_READY
- | 1 << WIMAX_ST_SCANNING
- | 1 << WIMAX_ST_CONNECTED);
- break;
- case WIMAX_ST_CONNECTED:
- __check_new_state(old_state, new_state,
- 1 << __WIMAX_ST_QUIESCING
- | 1 << WIMAX_ST_RADIO_OFF
- | 1 << WIMAX_ST_READY);
- netif_tx_disable(wimax_dev->net_dev);
- netif_carrier_off(wimax_dev->net_dev);
- break;
- case __WIMAX_ST_INVALID:
- default:
- dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n",
- wimax_dev, wimax_dev->state);
- WARN_ON(1);
- goto out;
- }
-
- /* Execute the actions of entry to the new state */
- switch (new_state) {
- case __WIMAX_ST_NULL:
- dev_err(dev, "SW BUG: wimax_dev %p entering NULL state "
- "from %u\n", wimax_dev, wimax_dev->state);
- WARN_ON(1); /* Nobody can enter this state */
- break;
- case WIMAX_ST_DOWN:
- break;
- case __WIMAX_ST_QUIESCING:
- break;
- case WIMAX_ST_UNINITIALIZED:
- break;
- case WIMAX_ST_RADIO_OFF:
- break;
- case WIMAX_ST_READY:
- break;
- case WIMAX_ST_SCANNING:
- break;
- case WIMAX_ST_CONNECTING:
- break;
- case WIMAX_ST_CONNECTED:
- netif_carrier_on(wimax_dev->net_dev);
- netif_wake_queue(wimax_dev->net_dev);
- break;
- case __WIMAX_ST_INVALID:
- default:
- BUG();
- }
- __wimax_state_set(wimax_dev, new_state);
- if (!IS_ERR(stch_skb))
- wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
-out:
- d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
- wimax_dev, new_state, old_state);
-}
-
-
-/**
- * wimax_state_change - Set the current state of a WiMAX device
- *
- * @wimax_dev: WiMAX device descriptor (properly referenced)
- * @new_state: New state to switch to
- *
- * This implements the state changes for the wimax devices. It will
- *
- * - verify that the state transition is legal (for now it'll just
- * print a warning if not) according to the table in
- * linux/wimax.h's documentation for 'enum wimax_st'.
- *
- * - perform the actions needed for leaving the current state and
- * whichever are needed for entering the new state.
- *
- * - issue a report to user space indicating the new state (and an
- * optional payload with information about the new state).
- *
- * NOTE: @wimax_dev must be locked
- */
-void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
-{
- /*
- * A driver cannot take the wimax_dev out of the
- * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
- * the wimax_dev's state is still NULL, we ignore any request
- * to change its state because it means it hasn't been yet
- * registered.
- *
- * There is no need to complain about it, as routines that
- * call this might be shared from different code paths that
- * are called before or after wimax_dev_add() has done its
- * job.
- */
- mutex_lock(&wimax_dev->mutex);
- if (wimax_dev->state > __WIMAX_ST_NULL)
- __wimax_state_change(wimax_dev, new_state);
- mutex_unlock(&wimax_dev->mutex);
-}
-EXPORT_SYMBOL_GPL(wimax_state_change);
-
-
-/**
- * wimax_state_get() - Return the current state of a WiMAX device
- *
- * @wimax_dev: WiMAX device descriptor
- *
- * Returns: Current state of the device according to its driver.
- */
-enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev)
-{
- enum wimax_st state;
- mutex_lock(&wimax_dev->mutex);
- state = wimax_dev->state;
- mutex_unlock(&wimax_dev->mutex);
- return state;
-}
-EXPORT_SYMBOL_GPL(wimax_state_get);
-
-
-/**
- * wimax_dev_init - initialize a newly allocated instance
- *
- * @wimax_dev: WiMAX device descriptor to initialize.
- *
- * Initializes fields of a freshly allocated @wimax_dev instance. This
- * function assumes that after allocation, the memory occupied by
- * @wimax_dev was zeroed.
- */
-void wimax_dev_init(struct wimax_dev *wimax_dev)
-{
- INIT_LIST_HEAD(&wimax_dev->id_table_node);
- __wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
- mutex_init(&wimax_dev->mutex);
- mutex_init(&wimax_dev->mutex_reset);
-}
-EXPORT_SYMBOL_GPL(wimax_dev_init);
-
-static const struct nla_policy wimax_gnl_policy[WIMAX_GNL_ATTR_MAX + 1] = {
- [WIMAX_GNL_RESET_IFIDX] = { .type = NLA_U32, },
- [WIMAX_GNL_RFKILL_IFIDX] = { .type = NLA_U32, },
- [WIMAX_GNL_RFKILL_STATE] = {
- .type = NLA_U32 /* enum wimax_rf_state */
- },
- [WIMAX_GNL_STGET_IFIDX] = { .type = NLA_U32, },
- [WIMAX_GNL_MSG_IFIDX] = { .type = NLA_U32, },
- [WIMAX_GNL_MSG_DATA] = {
- .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */
- },
-};
-
-static const struct genl_small_ops wimax_gnl_ops[] = {
- {
- .cmd = WIMAX_GNL_OP_MSG_FROM_USER,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
- .doit = wimax_gnl_doit_msg_from_user,
- },
- {
- .cmd = WIMAX_GNL_OP_RESET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
- .doit = wimax_gnl_doit_reset,
- },
- {
- .cmd = WIMAX_GNL_OP_RFKILL,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
- .doit = wimax_gnl_doit_rfkill,
- },
- {
- .cmd = WIMAX_GNL_OP_STATE_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
- .doit = wimax_gnl_doit_state_get,
- },
-};
-
-
-static
-size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
- unsigned char *addr, size_t addr_len)
-{
- unsigned int cnt, total;
-
- for (total = cnt = 0; cnt < addr_len; cnt++)
- total += scnprintf(addr_str + total, addr_str_size - total,
- "%02x%c", addr[cnt],
- cnt == addr_len - 1 ? '\0' : ':');
- return total;
-}
-
-
-/**
- * wimax_dev_add - Register a new WiMAX device
- *
- * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's
- * priv data). You must have called wimax_dev_init() on it before.
- *
- * @net_dev: net device the @wimax_dev is associated with. The
- * function expects SET_NETDEV_DEV() and register_netdev() were
- * already called on it.
- *
- * Registers the new WiMAX device, sets up the user-kernel control
- * interface (generic netlink) and common WiMAX infrastructure.
- *
- * Note that the parts that will allow interaction with user space are
- * setup at the very end, when the rest is in place, as once that
- * happens, the driver might get user space control requests via
- * netlink or from debugfs that might translate into calls into
- * wimax_dev->op_*().
- */
-int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
-{
- int result;
- struct device *dev = net_dev->dev.parent;
- char addr_str[32];
-
- d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
-
- /* Do the RFKILL setup before locking, as RFKILL will call
- * into our functions.
- */
- wimax_dev->net_dev = net_dev;
- result = wimax_rfkill_add(wimax_dev);
- if (result < 0)
- goto error_rfkill_add;
-
- /* Set up user-space interaction */
- mutex_lock(&wimax_dev->mutex);
- wimax_id_table_add(wimax_dev);
- wimax_debugfs_add(wimax_dev);
-
- __wimax_state_set(wimax_dev, WIMAX_ST_DOWN);
- mutex_unlock(&wimax_dev->mutex);
-
- wimax_addr_scnprint(addr_str, sizeof(addr_str),
- net_dev->dev_addr, net_dev->addr_len);
- dev_err(dev, "WiMAX interface %s (%s) ready\n",
- net_dev->name, addr_str);
- d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev);
- return 0;
-
-error_rfkill_add:
- d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n",
- wimax_dev, net_dev, result);
- return result;
-}
-EXPORT_SYMBOL_GPL(wimax_dev_add);
-
-
-/**
- * wimax_dev_rm - Unregister an existing WiMAX device
- *
- * @wimax_dev: WiMAX device descriptor
- *
- * Unregisters a WiMAX device previously registered for use with
- * wimax_add_rm().
- *
- * IMPORTANT! Must call before calling unregister_netdev().
- *
- * After this function returns, you will not get any more user space
- * control requests (via netlink or debugfs) and thus to wimax_dev->ops.
- *
- * Reentrancy control is ensured by setting the state to
- * %__WIMAX_ST_QUIESCING. rfkill operations coming through
- * wimax_*rfkill*() will be stopped by the quiescing state; ops coming
- * from the rfkill subsystem will be stopped by the support being
- * removed by wimax_rfkill_rm().
- */
-void wimax_dev_rm(struct wimax_dev *wimax_dev)
-{
- d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
-
- mutex_lock(&wimax_dev->mutex);
- __wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
- wimax_debugfs_rm(wimax_dev);
- wimax_id_table_rm(wimax_dev);
- __wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
- mutex_unlock(&wimax_dev->mutex);
- wimax_rfkill_rm(wimax_dev);
- d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev);
-}
-EXPORT_SYMBOL_GPL(wimax_dev_rm);
-
-
-/* Debug framework control of debug levels */
-struct d_level D_LEVEL[] = {
- D_SUBMODULE_DEFINE(debugfs),
- D_SUBMODULE_DEFINE(id_table),
- D_SUBMODULE_DEFINE(op_msg),
- D_SUBMODULE_DEFINE(op_reset),
- D_SUBMODULE_DEFINE(op_rfkill),
- D_SUBMODULE_DEFINE(op_state_get),
- D_SUBMODULE_DEFINE(stack),
-};
-size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
-
-
-static const struct genl_multicast_group wimax_gnl_mcgrps[] = {
- { .name = "msg", },
-};
-
-struct genl_family wimax_gnl_family __ro_after_init = {
- .name = "WiMAX",
- .version = WIMAX_GNL_VERSION,
- .hdrsize = 0,
- .maxattr = WIMAX_GNL_ATTR_MAX,
- .policy = wimax_gnl_policy,
- .module = THIS_MODULE,
- .small_ops = wimax_gnl_ops,
- .n_small_ops = ARRAY_SIZE(wimax_gnl_ops),
- .mcgrps = wimax_gnl_mcgrps,
- .n_mcgrps = ARRAY_SIZE(wimax_gnl_mcgrps),
-};
-
-
-
-/* Shutdown the wimax stack */
-static
-int __init wimax_subsys_init(void)
-{
- int result;
-
- d_fnstart(4, NULL, "()\n");
- d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
- "wimax.debug");
-
- result = genl_register_family(&wimax_gnl_family);
- if (unlikely(result < 0)) {
- pr_err("cannot register generic netlink family: %d\n", result);
- goto error_register_family;
- }
-
- d_fnend(4, NULL, "() = 0\n");
- return 0;
-
-error_register_family:
- d_fnend(4, NULL, "() = %d\n", result);
- return result;
-
-}
-module_init(wimax_subsys_init);
-
-
-/* Shutdown the wimax stack */
-static
-void __exit wimax_subsys_exit(void)
-{
- wimax_id_table_release();
- genl_unregister_family(&wimax_gnl_family);
-}
-module_exit(wimax_subsys_exit);
-
-MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
-MODULE_DESCRIPTION("Linux WiMAX stack");
-MODULE_LICENSE("GPL");
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
deleted file mode 100644
index 40751207296c..000000000000
--- a/net/wimax/wimax-internal.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Linux WiMAX
- * Internal API for kernel space WiMAX stack
- *
- * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This header file is for declarations and definitions internal to
- * the WiMAX stack. For public APIs and documentation, see
- * include/net/wimax.h and include/linux/wimax.h.
- */
-
-#ifndef __WIMAX_INTERNAL_H__
-#define __WIMAX_INTERNAL_H__
-#ifdef __KERNEL__
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/device.h>
-#include <net/wimax.h>
-
-
-/*
- * Decide if a (locked) device is ready for use
- *
- * Before using the device structure, it must be locked
- * (wimax_dev->mutex). As well, most operations need to call this
- * function to check if the state is the right one.
- *
- * An error value will be returned if the state is not the right
- * one. In that case, the caller should not attempt to use the device
- * and just unlock it.
- */
-static inline __must_check
-int wimax_dev_is_ready(struct wimax_dev *wimax_dev)
-{
- if (wimax_dev->state == __WIMAX_ST_NULL)
- return -EINVAL; /* Device is not even registered! */
- if (wimax_dev->state == WIMAX_ST_DOWN)
- return -ENOMEDIUM;
- if (wimax_dev->state == __WIMAX_ST_QUIESCING)
- return -ESHUTDOWN;
- return 0;
-}
-
-
-static inline
-void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
-{
- wimax_dev->state = state;
-}
-void __wimax_state_change(struct wimax_dev *, enum wimax_st);
-
-#ifdef CONFIG_DEBUG_FS
-void wimax_debugfs_add(struct wimax_dev *);
-void wimax_debugfs_rm(struct wimax_dev *);
-#else
-static inline void wimax_debugfs_add(struct wimax_dev *wimax_dev) {}
-static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
-#endif
-
-void wimax_id_table_add(struct wimax_dev *);
-struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
-void wimax_id_table_rm(struct wimax_dev *);
-void wimax_id_table_release(void);
-
-int wimax_rfkill_add(struct wimax_dev *);
-void wimax_rfkill_rm(struct wimax_dev *);
-
-/* generic netlink */
-extern struct genl_family wimax_gnl_family;
-
-/* ops */
-int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info);
-int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info);
-int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info);
-int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info);
-
-#endif /* #ifdef __KERNEL__ */
-#endif /* #ifndef __WIMAX_INTERNAL_H__ */
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 22d1779ab2b1..e4030f1fbc60 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -530,10 +530,10 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
break;
+ case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
WARN_ON(1);
@@ -677,12 +677,12 @@ bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev)
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_P2P_DEVICE:
/* Can NAN type be considered as beaconing interface? */
case NL80211_IFTYPE_NAN:
break;
case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_WDS:
case NUM_NL80211_IFTYPES:
WARN_ON(1);
}
@@ -1324,12 +1324,12 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
break;
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
/* these interface types don't really have a channel */
return;
case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_WDS:
case NUM_NL80211_IFTYPES:
WARN_ON(1);
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 240282c083aa..4b1f35e976e7 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -631,10 +631,8 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
return -EINVAL;
}
-#ifndef CONFIG_WIRELESS_WDS
if (WARN_ON(all_iftypes & BIT(NL80211_IFTYPE_WDS)))
return -EINVAL;
-#endif
/* You can't even choose that many! */
if (WARN_ON(cnt < c->max_interfaces))
@@ -675,10 +673,8 @@ int wiphy_register(struct wiphy *wiphy)
!(wiphy->nan_supported_bands & BIT(NL80211_BAND_2GHZ)))))
return -EINVAL;
-#ifndef CONFIG_WIRELESS_WDS
if (WARN_ON(wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS)))
return -EINVAL;
-#endif
if (WARN_ON(wiphy->pmsr_capa && !wiphy->pmsr_capa->ftm.supported))
return -EINVAL;
@@ -1202,9 +1198,6 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_OCB:
__cfg80211_leave_ocb(rdev, dev);
break;
- case NL80211_IFTYPE_WDS:
- /* must be handled by mac80211/driver, has no APIs */
- break;
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
/* cannot happen, has no netdev */
@@ -1214,6 +1207,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
/* nothing to do */
break;
case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_WDS:
case NUM_NL80211_IFTYPES:
/* invalid */
break;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e3e9686859d4..7df91f940212 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -433,6 +433,8 @@ void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev);
/* internal helpers */
bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
+bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
+ int key_idx, bool pairwise);
int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 0ac820780437..e1e90761dc00 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
* Copyright (c) 2015 Intel Deutschland GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019-2020 Intel Corporation
*/
#include <linux/kernel.h>
@@ -81,7 +81,8 @@ static void cfg80211_process_auth(struct wireless_dev *wdev,
}
static void cfg80211_process_deauth(struct wireless_dev *wdev,
- const u8 *buf, size_t len)
+ const u8 *buf, size_t len,
+ bool reconnect)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
@@ -89,7 +90,7 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev,
u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr);
- nl80211_send_deauth(rdev, wdev->netdev, buf, len, GFP_KERNEL);
+ nl80211_send_deauth(rdev, wdev->netdev, buf, len, reconnect, GFP_KERNEL);
if (!wdev->current_bss ||
!ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
@@ -100,7 +101,8 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev,
}
static void cfg80211_process_disassoc(struct wireless_dev *wdev,
- const u8 *buf, size_t len)
+ const u8 *buf, size_t len,
+ bool reconnect)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
@@ -108,7 +110,8 @@ static void cfg80211_process_disassoc(struct wireless_dev *wdev,
u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr);
- nl80211_send_disassoc(rdev, wdev->netdev, buf, len, GFP_KERNEL);
+ nl80211_send_disassoc(rdev, wdev->netdev, buf, len, reconnect,
+ GFP_KERNEL);
if (WARN_ON(!wdev->current_bss ||
!ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
@@ -133,9 +136,9 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len)
if (ieee80211_is_auth(mgmt->frame_control))
cfg80211_process_auth(wdev, buf, len);
else if (ieee80211_is_deauth(mgmt->frame_control))
- cfg80211_process_deauth(wdev, buf, len);
+ cfg80211_process_deauth(wdev, buf, len, false);
else if (ieee80211_is_disassoc(mgmt->frame_control))
- cfg80211_process_disassoc(wdev, buf, len);
+ cfg80211_process_disassoc(wdev, buf, len, false);
}
EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt);
@@ -180,22 +183,23 @@ void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss)
}
EXPORT_SYMBOL(cfg80211_abandon_assoc);
-void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len)
+void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len,
+ bool reconnect)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct ieee80211_mgmt *mgmt = (void *)buf;
ASSERT_WDEV_LOCK(wdev);
- trace_cfg80211_tx_mlme_mgmt(dev, buf, len);
+ trace_cfg80211_tx_mlme_mgmt(dev, buf, len, reconnect);
if (WARN_ON(len < 2))
return;
if (ieee80211_is_deauth(mgmt->frame_control))
- cfg80211_process_deauth(wdev, buf, len);
+ cfg80211_process_deauth(wdev, buf, len, reconnect);
else
- cfg80211_process_disassoc(wdev, buf, len);
+ cfg80211_process_disassoc(wdev, buf, len, reconnect);
}
EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f67ddf2cebcb..775d0c4d86c3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -399,6 +399,18 @@ nl80211_unsol_bcast_probe_resp_policy[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX +
.len = IEEE80211_MAX_DATA_LEN }
};
+static const struct nla_policy
+sar_specs_policy[NL80211_SAR_ATTR_SPECS_MAX + 1] = {
+ [NL80211_SAR_ATTR_SPECS_POWER] = { .type = NLA_S32 },
+ [NL80211_SAR_ATTR_SPECS_RANGE_INDEX] = {.type = NLA_U32 },
+};
+
+static const struct nla_policy
+sar_policy[NL80211_SAR_ATTR_MAX + 1] = {
+ [NL80211_SAR_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_SAR_TYPE),
+ [NL80211_SAR_ATTR_SPECS] = NLA_POLICY_NESTED_ARRAY(sar_specs_policy),
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -715,6 +727,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
NLA_POLICY_EXACT_LEN(IEEE80211_S1G_CAPABILITY_LEN),
[NL80211_ATTR_S1G_CAPABILITY_MASK] =
NLA_POLICY_EXACT_LEN(IEEE80211_S1G_CAPABILITY_LEN),
+ [NL80211_ATTR_SAE_PWE] =
+ NLA_POLICY_RANGE(NLA_U8, NL80211_SAE_PWE_HUNT_AND_PECK,
+ NL80211_SAE_PWE_BOTH),
+ [NL80211_ATTR_RECONNECT_REQUESTED] = { .type = NLA_REJECT },
+ [NL80211_ATTR_SAR_SPEC] = NLA_POLICY_NESTED(sar_policy),
};
/* policy for the key attributes */
@@ -1882,7 +1899,6 @@ static int nl80211_add_commands_unsplit(struct cfg80211_registered_device *rdev,
if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
goto nla_put_failure;
}
- CMD(set_wds_peer, SET_WDS_PEER);
if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
CMD(tdls_mgmt, TDLS_MGMT);
CMD(tdls_oper, TDLS_OPER);
@@ -2092,6 +2108,56 @@ fail:
return -ENOBUFS;
}
+static int
+nl80211_put_sar_specs(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg)
+{
+ struct nlattr *sar_capa, *specs, *sub_freq_range;
+ u8 num_freq_ranges;
+ int i;
+
+ if (!rdev->wiphy.sar_capa)
+ return 0;
+
+ num_freq_ranges = rdev->wiphy.sar_capa->num_freq_ranges;
+
+ sar_capa = nla_nest_start(msg, NL80211_ATTR_SAR_SPEC);
+ if (!sar_capa)
+ return -ENOSPC;
+
+ if (nla_put_u32(msg, NL80211_SAR_ATTR_TYPE, rdev->wiphy.sar_capa->type))
+ goto fail;
+
+ specs = nla_nest_start(msg, NL80211_SAR_ATTR_SPECS);
+ if (!specs)
+ goto fail;
+
+ /* report supported freq_ranges */
+ for (i = 0; i < num_freq_ranges; i++) {
+ sub_freq_range = nla_nest_start(msg, i + 1);
+ if (!sub_freq_range)
+ goto fail;
+
+ if (nla_put_u32(msg, NL80211_SAR_ATTR_SPECS_START_FREQ,
+ rdev->wiphy.sar_capa->freq_ranges[i].start_freq))
+ goto fail;
+
+ if (nla_put_u32(msg, NL80211_SAR_ATTR_SPECS_END_FREQ,
+ rdev->wiphy.sar_capa->freq_ranges[i].end_freq))
+ goto fail;
+
+ nla_nest_end(msg, sub_freq_range);
+ }
+
+ nla_nest_end(msg, specs);
+ nla_nest_end(msg, sar_capa);
+
+ return 0;
+fail:
+ nla_nest_cancel(msg, sar_capa);
+ return -ENOBUFS;
+}
+
struct nl80211_dump_wiphy_state {
s64 filter_wiphy;
long start;
@@ -2341,6 +2407,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
CMD(set_multicast_to_unicast, SET_MULTICAST_TO_UNICAST);
CMD(update_connect_params, UPDATE_CONNECT_PARAMS);
CMD(update_ft_ies, UPDATE_FT_IES);
+ if (rdev->wiphy.sar_capa)
+ CMD(set_sar_specs, SET_SAR_SPECS);
}
#undef CMD
@@ -2666,6 +2734,11 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
if (nl80211_put_tid_config_support(rdev, msg))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 16:
+ if (nl80211_put_sar_specs(rdev, msg))
+ goto nla_put_failure;
/* done */
state->split_start = 0;
@@ -2860,8 +2933,8 @@ static int parse_txq_params(struct nlattr *tb[],
static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
{
/*
- * You can only set the channel explicitly for WDS interfaces,
- * all others have their channel managed via their respective
+ * You can only set the channel explicitly for some interfaces,
+ * most have their channel managed via their respective
* "establish a connection" command (connect, join, ...)
*
* For AP/GO and mesh mode, the channel can be set with the
@@ -3066,29 +3139,6 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
return __nl80211_set_channel(rdev, netdev, info);
}
-static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
-{
- struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- const u8 *bssid;
-
- if (!info->attrs[NL80211_ATTR_MAC])
- return -EINVAL;
-
- if (netif_running(dev))
- return -EBUSY;
-
- if (!rdev->ops->set_wds_peer)
- return -EOPNOTSUPP;
-
- if (wdev->iftype != NL80211_IFTYPE_WDS)
- return -EOPNOTSUPP;
-
- bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- return rdev_set_wds_peer(rdev, dev, bssid);
-}
-
static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
@@ -4260,9 +4310,6 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- if (key.idx < 0)
- return -EINVAL;
-
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -4278,6 +4325,10 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
key.type != NL80211_KEYTYPE_GROUP)
return -EINVAL;
+ if (!cfg80211_valid_key_idx(rdev, key.idx,
+ key.type == NL80211_KEYTYPE_PAIRWISE))
+ return -EINVAL;
+
if (!rdev->ops->del_key)
return -EOPNOTSUPP;
@@ -4595,7 +4646,8 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
struct nlattr *attrs[],
enum nl80211_attrs attr,
struct cfg80211_bitrate_mask *mask,
- struct net_device *dev)
+ struct net_device *dev,
+ bool default_all_enabled)
{
struct nlattr *tb[NL80211_TXRATE_MAX + 1];
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4610,6 +4662,9 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
for (i = 0; i < NUM_NL80211_BANDS; i++) {
const struct ieee80211_sta_he_cap *he_cap;
+ if (!default_all_enabled)
+ break;
+
sband = rdev->wiphy.bands[i];
if (!sband)
@@ -4677,6 +4732,7 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
mask->control[band].ht_mcs))
return -EINVAL;
}
+
if (tb[NL80211_TXRATE_VHT]) {
if (!vht_set_mcs_mask(
sband,
@@ -4684,6 +4740,7 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
mask->control[band].vht_mcs))
return -EINVAL;
}
+
if (tb[NL80211_TXRATE_GI]) {
mask->control[band].gi =
nla_get_u8(tb[NL80211_TXRATE_GI]);
@@ -4695,6 +4752,7 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
nla_data(tb[NL80211_TXRATE_HE]),
mask->control[band].he_mcs))
return -EINVAL;
+
if (tb[NL80211_TXRATE_HE_GI])
mask->control[band].he_gi =
nla_get_u8(tb[NL80211_TXRATE_HE_GI]);
@@ -4736,7 +4794,7 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
enum nl80211_band band,
struct cfg80211_bitrate_mask *beacon_rate)
{
- u32 count_ht, count_vht, i;
+ u32 count_ht, count_vht, count_he, i;
u32 rate = beacon_rate->control[band].legacy;
/* Allow only one rate */
@@ -4769,7 +4827,21 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
- if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht))
+ count_he = 0;
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ if (hweight16(beacon_rate->control[band].he_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (beacon_rate->control[band].he_mcs[i]) {
+ count_he++;
+ if (count_he > 1)
+ return -EINVAL;
+ }
+ if (count_he && rate)
+ return -EINVAL;
+ }
+
+ if ((count_ht && count_vht && count_he) ||
+ (!rate && !count_ht && !count_vht && !count_he))
return -EINVAL;
if (rate &&
@@ -4784,6 +4856,10 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
!wiphy_ext_feature_isset(&rdev->wiphy,
NL80211_EXT_FEATURE_BEACON_RATE_VHT))
return -EINVAL;
+ if (count_he &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_HE))
+ return -EINVAL;
return 0;
}
@@ -5013,6 +5089,8 @@ static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params,
params->vht_required = true;
if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_HE_PHY)
params->he_required = true;
+ if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_SAE_H2E)
+ params->sae_h2e_required = true;
}
}
@@ -5244,7 +5322,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES,
&params.beacon_rate,
- dev);
+ dev, false);
if (err)
return err;
@@ -8237,12 +8315,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) {
- if (!wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_SET_SCAN_DWELL)) {
- err = -EOPNOTSUPP;
- goto out_free;
- }
-
request->duration =
nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]);
request->duration_mandatory =
@@ -9732,6 +9804,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
nla_len(info->attrs[NL80211_ATTR_SAE_PASSWORD]);
}
+ if (info->attrs[NL80211_ATTR_SAE_PWE])
+ settings->sae_pwe =
+ nla_get_u8(info->attrs[NL80211_ATTR_SAE_PWE]);
+ else
+ settings->sae_pwe = NL80211_SAE_PWE_UNSPECIFIED;
+
return 0;
}
@@ -11088,7 +11166,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES, &mask,
- dev);
+ dev, true);
if (err)
return err;
@@ -11165,6 +11243,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_P2P_DEVICE:
if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
return -EINVAL;
+ break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_P2P_CLIENT:
@@ -11697,7 +11776,7 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES,
&setup.beacon_rate,
- dev);
+ dev, false);
if (err)
return err;
@@ -14477,7 +14556,8 @@ static int parse_tid_conf(struct cfg80211_registered_device *rdev,
if (tid_conf->txrate_type != NL80211_TX_RATE_AUTOMATIC) {
attr = NL80211_TID_CONFIG_ATTR_TX_RATE;
err = nl80211_parse_tx_bitrate_mask(info, attrs, attr,
- &tid_conf->txrate_mask, dev);
+ &tid_conf->txrate_mask, dev,
+ true);
if (err)
return err;
@@ -14658,6 +14738,111 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
}
}
+static int nl80211_set_sar_sub_specs(struct cfg80211_registered_device *rdev,
+ struct cfg80211_sar_specs *sar_specs,
+ struct nlattr *spec[], int index)
+{
+ u32 range_index, i;
+
+ if (!sar_specs || !spec)
+ return -EINVAL;
+
+ if (!spec[NL80211_SAR_ATTR_SPECS_POWER] ||
+ !spec[NL80211_SAR_ATTR_SPECS_RANGE_INDEX])
+ return -EINVAL;
+
+ range_index = nla_get_u32(spec[NL80211_SAR_ATTR_SPECS_RANGE_INDEX]);
+
+ /* check if range_index exceeds num_freq_ranges */
+ if (range_index >= rdev->wiphy.sar_capa->num_freq_ranges)
+ return -EINVAL;
+
+ /* check if range_index duplicates */
+ for (i = 0; i < index; i++) {
+ if (sar_specs->sub_specs[i].freq_range_index == range_index)
+ return -EINVAL;
+ }
+
+ sar_specs->sub_specs[index].power =
+ nla_get_s32(spec[NL80211_SAR_ATTR_SPECS_POWER]);
+
+ sar_specs->sub_specs[index].freq_range_index = range_index;
+
+ return 0;
+}
+
+static int nl80211_set_sar_specs(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct nlattr *spec[NL80211_SAR_ATTR_SPECS_MAX + 1];
+ struct nlattr *tb[NL80211_SAR_ATTR_MAX + 1];
+ struct cfg80211_sar_specs *sar_spec;
+ enum nl80211_sar_type type;
+ struct nlattr *spec_list;
+ u32 specs;
+ int rem, err;
+
+ if (!rdev->wiphy.sar_capa || !rdev->ops->set_sar_specs)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_SAR_SPEC])
+ return -EINVAL;
+
+ nla_parse_nested(tb, NL80211_SAR_ATTR_MAX,
+ info->attrs[NL80211_ATTR_SAR_SPEC],
+ NULL, NULL);
+
+ if (!tb[NL80211_SAR_ATTR_TYPE] || !tb[NL80211_SAR_ATTR_SPECS])
+ return -EINVAL;
+
+ type = nla_get_u32(tb[NL80211_SAR_ATTR_TYPE]);
+ if (type != rdev->wiphy.sar_capa->type)
+ return -EINVAL;
+
+ specs = 0;
+ nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem)
+ specs++;
+
+ if (specs > rdev->wiphy.sar_capa->num_freq_ranges)
+ return -EINVAL;
+
+ sar_spec = kzalloc(sizeof(*sar_spec) +
+ specs * sizeof(struct cfg80211_sar_sub_specs),
+ GFP_KERNEL);
+ if (!sar_spec)
+ return -ENOMEM;
+
+ sar_spec->type = type;
+ specs = 0;
+ nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) {
+ nla_parse_nested(spec, NL80211_SAR_ATTR_SPECS_MAX,
+ spec_list, NULL, NULL);
+
+ switch (type) {
+ case NL80211_SAR_TYPE_POWER:
+ if (nl80211_set_sar_sub_specs(rdev, sar_spec,
+ spec, specs)) {
+ err = -EINVAL;
+ goto error;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+ specs++;
+ }
+
+ sar_spec->num_sub_specs = specs;
+
+ rdev->cur_cmd_info = info;
+ err = rdev_set_sar_specs(rdev, sar_spec);
+ rdev->cur_cmd_info = NULL;
+error:
+ kfree(sar_spec);
+ return err;
+}
+
static const struct genl_ops nl80211_ops[] = {
{
.cmd = NL80211_CMD_GET_WIPHY,
@@ -15140,14 +15325,6 @@ static const struct genl_small_ops nl80211_small_ops[] = {
NL80211_FLAG_NEED_RTNL,
},
{
- .cmd = NL80211_CMD_SET_WDS_PEER,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = nl80211_set_wds_peer,
- .flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
- NL80211_FLAG_NEED_RTNL,
- },
- {
.cmd = NL80211_CMD_JOIN_MESH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_join_mesh,
@@ -15519,6 +15696,14 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_SET_SAR_SPECS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = nl80211_set_sar_specs,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
@@ -15854,7 +16039,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
const u8 *buf, size_t len,
enum nl80211_commands cmd, gfp_t gfp,
int uapsd_queues, const u8 *req_ies,
- size_t req_ies_len)
+ size_t req_ies_len, bool reconnect)
{
struct sk_buff *msg;
void *hdr;
@@ -15876,6 +16061,9 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
nla_put(msg, NL80211_ATTR_REQ_IE, req_ies_len, req_ies)))
goto nla_put_failure;
+ if (reconnect && nla_put_flag(msg, NL80211_ATTR_RECONNECT_REQUESTED))
+ goto nla_put_failure;
+
if (uapsd_queues >= 0) {
struct nlattr *nla_wmm =
nla_nest_start_noflag(msg, NL80211_ATTR_STA_WME);
@@ -15904,7 +16092,8 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0);
+ NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0,
+ false);
}
void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
@@ -15914,23 +16103,25 @@ void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
NL80211_CMD_ASSOCIATE, gfp, uapsd_queues,
- req_ies, req_ies_len);
+ req_ies, req_ies_len, false);
}
void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *buf,
- size_t len, gfp_t gfp)
+ size_t len, bool reconnect, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0);
+ NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0,
+ reconnect);
}
void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *buf,
- size_t len, gfp_t gfp)
+ size_t len, bool reconnect, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0);
+ NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0,
+ reconnect);
}
void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
@@ -15961,7 +16152,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len);
nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1,
- NULL, 0);
+ NULL, 0, false);
}
EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt);
@@ -17062,7 +17253,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
- u8 count)
+ u8 count, bool quiet)
{
struct sk_buff *msg;
void *hdr;
@@ -17083,9 +17274,13 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
if (nl80211_send_chandef(msg, chandef))
goto nla_put_failure;
- if ((notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) &&
- (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count)))
+ if (notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) {
+ if (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count))
goto nla_put_failure;
+ if (quiet &&
+ nla_put_flag(msg, NL80211_ATTR_CH_SWITCH_BLOCK_TX))
+ goto nla_put_failure;
+ }
genlmsg_end(msg, hdr);
@@ -17118,13 +17313,13 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
cfg80211_sched_dfs_chan_update(rdev);
nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
- NL80211_CMD_CH_SWITCH_NOTIFY, 0);
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0, false);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u8 count)
+ u8 count, bool quiet)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -17133,7 +17328,8 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
trace_cfg80211_ch_switch_started_notify(dev, chandef);
nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
- NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, count);
+ NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
+ count, quiet);
}
EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index d3e8e426c486..a3f387770f1b 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Portions of this file
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2020 Intel Corporation
*/
#ifndef __NET_WIRELESS_NL80211_H
#define __NET_WIRELESS_NL80211_H
@@ -69,10 +69,12 @@ void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
const u8 *req_ies, size_t req_ies_len);
void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
- const u8 *buf, size_t len, gfp_t gfp);
+ const u8 *buf, size_t len,
+ bool reconnect, gfp_t gfp);
void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
- const u8 *buf, size_t len, gfp_t gfp);
+ const u8 *buf, size_t len,
+ bool reconnect, gfp_t gfp);
void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *addr, gfp_t gfp);
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 950d57494168..8b1358d04ca2 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -582,16 +582,6 @@ static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev,
return ret;
}
-static inline int rdev_set_wds_peer(struct cfg80211_registered_device *rdev,
- struct net_device *dev, const u8 *addr)
-{
- int ret;
- trace_rdev_set_wds_peer(&rdev->wiphy, dev, addr);
- ret = rdev->ops->set_wds_peer(&rdev->wiphy, dev, addr);
- trace_rdev_return_int(&rdev->wiphy, ret);
- return ret;
-}
-
static inline int
rdev_set_multicast_to_unicast(struct cfg80211_registered_device *rdev,
struct net_device *dev,
@@ -1356,4 +1346,16 @@ static inline int rdev_reset_tid_config(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int rdev_set_sar_specs(struct cfg80211_registered_device *rdev,
+ struct cfg80211_sar_specs *sar)
+{
+ int ret;
+
+ trace_rdev_set_sar_specs(&rdev->wiphy, sar);
+ ret = rdev->ops->set_sar_specs(&rdev->wiphy, sar);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index a04fdfb35f07..bb72447ad960 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1616,7 +1616,7 @@ static const struct ieee80211_reg_rule *
__freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
{
const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
- const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20};
+ static const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20};
const struct ieee80211_reg_rule *reg_rule;
int i = ARRAY_SIZE(bws) - 1;
u32 bw;
@@ -2547,6 +2547,7 @@ static void handle_band_custom(struct wiphy *wiphy,
void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
const struct ieee80211_regdomain *regd)
{
+ const struct ieee80211_regdomain *new_regd, *tmp;
enum nl80211_band band;
unsigned int bands_set = 0;
@@ -2566,6 +2567,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
* on your device's supported bands.
*/
WARN_ON(!bands_set);
+ new_regd = reg_copy_regd(regd);
+ if (IS_ERR(new_regd))
+ return;
+
+ tmp = get_wiphy_regdom(wiphy);
+ rcu_assign_pointer(wiphy->regd, new_regd);
+ rcu_free_regdom(tmp);
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 8d0e49c46db3..1b7fec3b53cd 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -694,7 +694,7 @@ static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap,
struct cfg80211_scan_request *request)
{
- u8 i;
+ int i;
u32 s_ssid;
for (i = 0; i < request->n_ssids; i++) {
@@ -726,7 +726,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
int n_channels, count = 0, err;
struct cfg80211_scan_request *request, *rdev_req = rdev->scan_req;
LIST_HEAD(coloc_ap_list);
- bool need_scan_psc;
+ bool need_scan_psc = true;
const struct ieee80211_sband_iftype_data *iftd;
rdev_req->scan_6ghz = true;
@@ -770,20 +770,18 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
(void *)&request->channels[n_channels];
/*
- * PSC channels should not be scanned if all the reported co-located APs
- * are indicating that all APs in the same ESS are co-located
+ * PSC channels should not be scanned in case of direct scan with 1 SSID
+ * and at least one of the reported co-located APs with same SSID
+ * indicating that all APs in the same ESS are co-located
*/
- if (count) {
- need_scan_psc = false;
-
+ if (count && request->n_ssids == 1 && request->ssids[0].ssid_len) {
list_for_each_entry(ap, &coloc_ap_list, list) {
- if (!ap->colocated_ess) {
- need_scan_psc = true;
+ if (ap->colocated_ess &&
+ cfg80211_find_ssid_match(ap, request)) {
+ need_scan_psc = false;
break;
}
}
- } else {
- need_scan_psc = true;
}
/*
@@ -1901,6 +1899,9 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
tmp.ts_boottime = data->boottime_ns;
+ tmp.parent_tsf = data->parent_tsf;
+ ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
+
if (non_tx_data) {
tmp.pub.transmitted_bss = non_tx_data->tx_bss;
ts = bss_from_pub(non_tx_data->tx_bss)->ts;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 6e218a0acd4e..76b777d5903f 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -838,11 +838,6 @@ DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_mpath,
TP_ARGS(wiphy, netdev, mac)
);
-DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_set_wds_peer,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
- TP_ARGS(wiphy, netdev, mac)
-);
-
TRACE_EVENT(rdev_dump_station,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
u8 *mac),
@@ -2684,19 +2679,23 @@ DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt,
);
TRACE_EVENT(cfg80211_tx_mlme_mgmt,
- TP_PROTO(struct net_device *netdev, const u8 *buf, int len),
- TP_ARGS(netdev, buf, len),
+ TP_PROTO(struct net_device *netdev, const u8 *buf, int len,
+ bool reconnect),
+ TP_ARGS(netdev, buf, len, reconnect),
TP_STRUCT__entry(
NETDEV_ENTRY
__dynamic_array(u8, frame, len)
+ __field(int, reconnect)
),
TP_fast_assign(
NETDEV_ASSIGN;
memcpy(__get_dynamic_array(frame), buf, len);
+ __entry->reconnect = reconnect;
),
- TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x",
+ TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x reconnect:%d",
NETDEV_PR_ARG,
- le16_to_cpup((__le16 *)__get_dynamic_array(frame)))
+ le16_to_cpup((__le16 *)__get_dynamic_array(frame)),
+ __entry->reconnect)
);
DECLARE_EVENT_CLASS(netdev_mac_evt,
@@ -3547,6 +3546,25 @@ TRACE_EVENT(rdev_reset_tid_config,
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", tids: 0x%x",
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tids)
);
+
+TRACE_EVENT(rdev_set_sar_specs,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_sar_specs *sar),
+ TP_ARGS(wiphy, sar),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(u16, type)
+ __field(u16, num)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->type = sar->type;
+ __entry->num = sar->num_sub_specs;
+
+ ),
+ TP_printk(WIPHY_PR_FMT ", Set type:%d, num_specs:%d",
+ WIPHY_PR_ARG, __entry->type, __entry->num)
+);
+
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index f01746894a4e..b4acc805114b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -272,18 +272,53 @@ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
return false;
}
-int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
- struct key_params *params, int key_idx,
- bool pairwise, const u8 *mac_addr)
+static bool
+cfg80211_igtk_cipher_supported(struct cfg80211_registered_device *rdev)
{
- int max_key_idx = 5;
+ struct wiphy *wiphy = &rdev->wiphy;
+ int i;
+
+ for (i = 0; i < wiphy->n_cipher_suites; i++) {
+ switch (wiphy->cipher_suites[i]) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ return true;
+ }
+ }
+
+ return false;
+}
- if (wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_BEACON_PROTECTION) ||
- wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT))
+bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
+ int key_idx, bool pairwise)
+{
+ int max_key_idx;
+
+ if (pairwise)
+ max_key_idx = 3;
+ else if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION) ||
+ wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT))
max_key_idx = 7;
+ else if (cfg80211_igtk_cipher_supported(rdev))
+ max_key_idx = 5;
+ else
+ max_key_idx = 3;
+
if (key_idx < 0 || key_idx > max_key_idx)
+ return false;
+
+ return true;
+}
+
+int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
+ struct key_params *params, int key_idx,
+ bool pairwise, const u8 *mac_addr)
+{
+ if (!cfg80211_valid_key_idx(rdev, key_idx, pairwise))
return -EINVAL;
if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -335,6 +370,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
case WLAN_CIPHER_SUITE_WEP104:
if (key_idx > 3)
return -EINVAL;
+ break;
default:
break;
}
@@ -550,8 +586,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
return -1;
break;
case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
- if (unlikely(iftype != NL80211_IFTYPE_WDS &&
- iftype != NL80211_IFTYPE_MESH_POINT &&
+ if (unlikely(iftype != NL80211_IFTYPE_MESH_POINT &&
iftype != NL80211_IFTYPE_AP_VLAN &&
iftype != NL80211_IFTYPE_STATION))
return -1;
@@ -1051,7 +1086,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
/* bridging OK */
break;
@@ -1063,6 +1097,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
/* not happening */
break;
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_NAN:
WARN_ON(1);
break;
@@ -1276,20 +1311,22 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
{
-#define SCALE 2048
- u16 mcs_divisors[12] = {
- 34133, /* 16.666666... */
- 17067, /* 8.333333... */
- 11378, /* 5.555555... */
- 8533, /* 4.166666... */
- 5689, /* 2.777777... */
- 4267, /* 2.083333... */
- 3923, /* 1.851851... */
- 3413, /* 1.666666... */
- 2844, /* 1.388888... */
- 2560, /* 1.250000... */
- 2276, /* 1.111111... */
- 2048, /* 1.000000... */
+#define SCALE 6144
+ u32 mcs_divisors[14] = {
+ 102399, /* 16.666666... */
+ 51201, /* 8.333333... */
+ 34134, /* 5.555555... */
+ 25599, /* 4.166666... */
+ 17067, /* 2.777777... */
+ 12801, /* 2.083333... */
+ 11769, /* 1.851851... */
+ 10239, /* 1.666666... */
+ 8532, /* 1.388888... */
+ 7680, /* 1.250000... */
+ 6828, /* 1.111111... */
+ 6144, /* 1.000000... */
+ 5690, /* 0.926106... */
+ 5120, /* 0.833333... */
};
u32 rates_160M[3] = { 960777777, 907400000, 816666666 };
u32 rates_969[3] = { 480388888, 453700000, 408333333 };
@@ -1301,7 +1338,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
u64 tmp;
u32 result;
- if (WARN_ON_ONCE(rate->mcs > 11))
+ if (WARN_ON_ONCE(rate->mcs > 13))
return 0;
if (WARN_ON_ONCE(rate->he_gi > NL80211_RATE_INFO_HE_GI_3_2))
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 78f2927ead7f..fd9ad74972fb 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -49,9 +49,6 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
case IW_MODE_ADHOC:
type = NL80211_IFTYPE_ADHOC;
break;
- case IW_MODE_REPEAT:
- type = NL80211_IFTYPE_WDS;
- break;
case IW_MODE_MONITOR:
type = NL80211_IFTYPE_MONITOR;
break;
@@ -1150,50 +1147,6 @@ static int cfg80211_wext_giwpower(struct net_device *dev,
return 0;
}
-static int cfg80211_wds_wext_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *addr, char *extra)
-{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- int err;
-
- if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
- return -EINVAL;
-
- if (addr->sa_family != ARPHRD_ETHER)
- return -EINVAL;
-
- if (netif_running(dev))
- return -EBUSY;
-
- if (!rdev->ops->set_wds_peer)
- return -EOPNOTSUPP;
-
- err = rdev_set_wds_peer(rdev, dev, (u8 *)&addr->sa_data);
- if (err)
- return err;
-
- memcpy(&wdev->wext.bssid, (u8 *) &addr->sa_data, ETH_ALEN);
-
- return 0;
-}
-
-static int cfg80211_wds_wext_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *addr, char *extra)
-{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
-
- if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
- return -EINVAL;
-
- addr->sa_family = ARPHRD_ETHER;
- memcpy(&addr->sa_data, wdev->wext.bssid, ETH_ALEN);
-
- return 0;
-}
-
static int cfg80211_wext_siwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rate, char *extra)
@@ -1371,8 +1324,6 @@ static int cfg80211_wext_siwap(struct net_device *dev,
return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra);
- case NL80211_IFTYPE_WDS:
- return cfg80211_wds_wext_siwap(dev, info, ap_addr, extra);
default:
return -EOPNOTSUPP;
}
@@ -1389,8 +1340,6 @@ static int cfg80211_wext_giwap(struct net_device *dev,
return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra);
- case NL80211_IFTYPE_WDS:
- return cfg80211_wds_wext_giwap(dev, info, ap_addr, extra);
default:
return -EOPNOTSUPP;
}
@@ -1472,39 +1421,78 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
}
}
+#define DEFINE_WEXT_COMPAT_STUB(func, type) \
+ static int __ ## func(struct net_device *dev, \
+ struct iw_request_info *info, \
+ union iwreq_data *wrqu, \
+ char *extra) \
+ { \
+ return func(dev, info, (type *)wrqu, extra); \
+ }
+
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwname, char)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwfreq, struct iw_freq)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwfreq, struct iw_freq)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwmode, u32)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwmode, u32)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwrange, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwap, struct sockaddr)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwap, struct sockaddr)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwmlme, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwscan, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwessid, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwessid, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwrate, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwrate, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwrts, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwrts, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwfrag, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwfrag, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwretry, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwretry, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwencode, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwencode, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwpower, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwpower, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwgenie, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwauth, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwauth, struct iw_param)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwencodeext, struct iw_point)
+DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwpmksa, struct iw_point)
+
static const iw_handler cfg80211_handlers[] = {
- [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
- [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq,
- [IW_IOCTL_IDX(SIOCGIWFREQ)] = (iw_handler) cfg80211_wext_giwfreq,
- [IW_IOCTL_IDX(SIOCSIWMODE)] = (iw_handler) cfg80211_wext_siwmode,
- [IW_IOCTL_IDX(SIOCGIWMODE)] = (iw_handler) cfg80211_wext_giwmode,
- [IW_IOCTL_IDX(SIOCGIWRANGE)] = (iw_handler) cfg80211_wext_giwrange,
- [IW_IOCTL_IDX(SIOCSIWAP)] = (iw_handler) cfg80211_wext_siwap,
- [IW_IOCTL_IDX(SIOCGIWAP)] = (iw_handler) cfg80211_wext_giwap,
- [IW_IOCTL_IDX(SIOCSIWMLME)] = (iw_handler) cfg80211_wext_siwmlme,
- [IW_IOCTL_IDX(SIOCSIWSCAN)] = (iw_handler) cfg80211_wext_siwscan,
- [IW_IOCTL_IDX(SIOCGIWSCAN)] = (iw_handler) cfg80211_wext_giwscan,
- [IW_IOCTL_IDX(SIOCSIWESSID)] = (iw_handler) cfg80211_wext_siwessid,
- [IW_IOCTL_IDX(SIOCGIWESSID)] = (iw_handler) cfg80211_wext_giwessid,
- [IW_IOCTL_IDX(SIOCSIWRATE)] = (iw_handler) cfg80211_wext_siwrate,
- [IW_IOCTL_IDX(SIOCGIWRATE)] = (iw_handler) cfg80211_wext_giwrate,
- [IW_IOCTL_IDX(SIOCSIWRTS)] = (iw_handler) cfg80211_wext_siwrts,
- [IW_IOCTL_IDX(SIOCGIWRTS)] = (iw_handler) cfg80211_wext_giwrts,
- [IW_IOCTL_IDX(SIOCSIWFRAG)] = (iw_handler) cfg80211_wext_siwfrag,
- [IW_IOCTL_IDX(SIOCGIWFRAG)] = (iw_handler) cfg80211_wext_giwfrag,
- [IW_IOCTL_IDX(SIOCSIWTXPOW)] = (iw_handler) cfg80211_wext_siwtxpower,
- [IW_IOCTL_IDX(SIOCGIWTXPOW)] = (iw_handler) cfg80211_wext_giwtxpower,
- [IW_IOCTL_IDX(SIOCSIWRETRY)] = (iw_handler) cfg80211_wext_siwretry,
- [IW_IOCTL_IDX(SIOCGIWRETRY)] = (iw_handler) cfg80211_wext_giwretry,
- [IW_IOCTL_IDX(SIOCSIWENCODE)] = (iw_handler) cfg80211_wext_siwencode,
- [IW_IOCTL_IDX(SIOCGIWENCODE)] = (iw_handler) cfg80211_wext_giwencode,
- [IW_IOCTL_IDX(SIOCSIWPOWER)] = (iw_handler) cfg80211_wext_siwpower,
- [IW_IOCTL_IDX(SIOCGIWPOWER)] = (iw_handler) cfg80211_wext_giwpower,
- [IW_IOCTL_IDX(SIOCSIWGENIE)] = (iw_handler) cfg80211_wext_siwgenie,
- [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth,
- [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth,
- [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext,
- [IW_IOCTL_IDX(SIOCSIWPMKSA)] = (iw_handler) cfg80211_wext_siwpmksa,
+ [IW_IOCTL_IDX(SIOCGIWNAME)] = __cfg80211_wext_giwname,
+ [IW_IOCTL_IDX(SIOCSIWFREQ)] = __cfg80211_wext_siwfreq,
+ [IW_IOCTL_IDX(SIOCGIWFREQ)] = __cfg80211_wext_giwfreq,
+ [IW_IOCTL_IDX(SIOCSIWMODE)] = __cfg80211_wext_siwmode,
+ [IW_IOCTL_IDX(SIOCGIWMODE)] = __cfg80211_wext_giwmode,
+ [IW_IOCTL_IDX(SIOCGIWRANGE)] = __cfg80211_wext_giwrange,
+ [IW_IOCTL_IDX(SIOCSIWAP)] = __cfg80211_wext_siwap,
+ [IW_IOCTL_IDX(SIOCGIWAP)] = __cfg80211_wext_giwap,
+ [IW_IOCTL_IDX(SIOCSIWMLME)] = __cfg80211_wext_siwmlme,
+ [IW_IOCTL_IDX(SIOCSIWSCAN)] = cfg80211_wext_siwscan,
+ [IW_IOCTL_IDX(SIOCGIWSCAN)] = __cfg80211_wext_giwscan,
+ [IW_IOCTL_IDX(SIOCSIWESSID)] = __cfg80211_wext_siwessid,
+ [IW_IOCTL_IDX(SIOCGIWESSID)] = __cfg80211_wext_giwessid,
+ [IW_IOCTL_IDX(SIOCSIWRATE)] = __cfg80211_wext_siwrate,
+ [IW_IOCTL_IDX(SIOCGIWRATE)] = __cfg80211_wext_giwrate,
+ [IW_IOCTL_IDX(SIOCSIWRTS)] = __cfg80211_wext_siwrts,
+ [IW_IOCTL_IDX(SIOCGIWRTS)] = __cfg80211_wext_giwrts,
+ [IW_IOCTL_IDX(SIOCSIWFRAG)] = __cfg80211_wext_siwfrag,
+ [IW_IOCTL_IDX(SIOCGIWFRAG)] = __cfg80211_wext_giwfrag,
+ [IW_IOCTL_IDX(SIOCSIWTXPOW)] = cfg80211_wext_siwtxpower,
+ [IW_IOCTL_IDX(SIOCGIWTXPOW)] = cfg80211_wext_giwtxpower,
+ [IW_IOCTL_IDX(SIOCSIWRETRY)] = __cfg80211_wext_siwretry,
+ [IW_IOCTL_IDX(SIOCGIWRETRY)] = __cfg80211_wext_giwretry,
+ [IW_IOCTL_IDX(SIOCSIWENCODE)] = __cfg80211_wext_siwencode,
+ [IW_IOCTL_IDX(SIOCGIWENCODE)] = __cfg80211_wext_giwencode,
+ [IW_IOCTL_IDX(SIOCSIWPOWER)] = __cfg80211_wext_siwpower,
+ [IW_IOCTL_IDX(SIOCGIWPOWER)] = __cfg80211_wext_giwpower,
+ [IW_IOCTL_IDX(SIOCSIWGENIE)] = __cfg80211_wext_siwgenie,
+ [IW_IOCTL_IDX(SIOCSIWAUTH)] = __cfg80211_wext_siwauth,
+ [IW_IOCTL_IDX(SIOCGIWAUTH)] = __cfg80211_wext_giwauth,
+ [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= __cfg80211_wext_siwencodeext,
+ [IW_IOCTL_IDX(SIOCSIWPMKSA)] = __cfg80211_wext_siwpmksa,
};
const struct iw_handler_def cfg80211_wext_handler = {
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e65a50192432..ff687b97b2d9 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -200,22 +200,6 @@ static void x25_remove_socket(struct sock *sk)
}
/*
- * Kill all bound sockets on a dropped device.
- */
-static void x25_kill_by_device(struct net_device *dev)
-{
- struct sock *s;
-
- write_lock_bh(&x25_list_lock);
-
- sk_for_each(s, &x25_list)
- if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
- x25_disconnect(s, ENETUNREACH, 0, 0);
-
- write_unlock_bh(&x25_list_lock);
-}
-
-/*
* Handle device status changes.
*/
static int x25_device_event(struct notifier_block *this, unsigned long event,
@@ -227,27 +211,33 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- if (dev->type == ARPHRD_X25
-#if IS_ENABLED(CONFIG_LLC)
- || dev->type == ARPHRD_ETHER
-#endif
- ) {
+ if (dev->type == ARPHRD_X25) {
switch (event) {
- case NETDEV_UP:
+ case NETDEV_REGISTER:
+ case NETDEV_POST_TYPE_CHANGE:
x25_link_device_up(dev);
break;
- case NETDEV_GOING_DOWN:
+ case NETDEV_DOWN:
nb = x25_get_neigh(dev);
if (nb) {
- x25_terminate_link(nb);
+ x25_link_terminated(nb);
x25_neigh_put(nb);
}
- break;
- case NETDEV_DOWN:
- x25_kill_by_device(dev);
x25_route_device_down(dev);
+ break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ case NETDEV_UNREGISTER:
x25_link_device_down(dev);
break;
+ case NETDEV_CHANGE:
+ if (!netif_carrier_ok(dev)) {
+ nb = x25_get_neigh(dev);
+ if (nb) {
+ x25_link_terminated(nb);
+ x25_neigh_put(nb);
+ }
+ }
+ break;
}
}
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 25bf72ee6cad..5259ef8f5242 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -160,10 +160,6 @@ void x25_establish_link(struct x25_neigh *nb)
*ptr = X25_IFACE_CONNECT;
break;
-#if IS_ENABLED(CONFIG_LLC)
- case ARPHRD_ETHER:
- return;
-#endif
default:
return;
}
@@ -179,10 +175,6 @@ void x25_terminate_link(struct x25_neigh *nb)
struct sk_buff *skb;
unsigned char *ptr;
-#if IS_ENABLED(CONFIG_LLC)
- if (nb->dev->type == ARPHRD_ETHER)
- return;
-#endif
if (nb->dev->type != ARPHRD_X25)
return;
@@ -212,11 +204,6 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
*dptr = X25_IFACE_DATA;
break;
-#if IS_ENABLED(CONFIG_LLC)
- case ARPHRD_ETHER:
- kfree_skb(skb);
- return;
-#endif
default:
kfree_skb(skb);
return;
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index fdae054b7dc1..57a81100c5da 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -58,11 +58,6 @@ static inline void x25_stop_t20timer(struct x25_neigh *nb)
del_timer(&nb->t20timer);
}
-static inline int x25_t20timer_pending(struct x25_neigh *nb)
-{
- return timer_pending(&nb->t20timer);
-}
-
/*
* This handles all restart and diagnostic frames.
*/
@@ -70,20 +65,45 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
unsigned short frametype)
{
struct sk_buff *skbn;
- int confirm;
switch (frametype) {
case X25_RESTART_REQUEST:
- confirm = !x25_t20timer_pending(nb);
- x25_stop_t20timer(nb);
- nb->state = X25_LINK_STATE_3;
- if (confirm)
+ switch (nb->state) {
+ case X25_LINK_STATE_0:
+ /* This can happen when the x25 module just gets loaded
+ * and doesn't know layer 2 has already connected
+ */
+ nb->state = X25_LINK_STATE_3;
x25_transmit_restart_confirmation(nb);
+ break;
+ case X25_LINK_STATE_2:
+ x25_stop_t20timer(nb);
+ nb->state = X25_LINK_STATE_3;
+ break;
+ case X25_LINK_STATE_3:
+ /* clear existing virtual calls */
+ x25_kill_by_neigh(nb);
+
+ x25_transmit_restart_confirmation(nb);
+ break;
+ }
break;
case X25_RESTART_CONFIRMATION:
- x25_stop_t20timer(nb);
- nb->state = X25_LINK_STATE_3;
+ switch (nb->state) {
+ case X25_LINK_STATE_2:
+ x25_stop_t20timer(nb);
+ nb->state = X25_LINK_STATE_3;
+ break;
+ case X25_LINK_STATE_3:
+ /* clear existing virtual calls */
+ x25_kill_by_neigh(nb);
+
+ x25_transmit_restart_request(nb);
+ nb->state = X25_LINK_STATE_2;
+ x25_start_t20timer(nb);
+ break;
+ }
break;
case X25_DIAGNOSTIC:
@@ -214,8 +234,6 @@ void x25_link_established(struct x25_neigh *nb)
{
switch (nb->state) {
case X25_LINK_STATE_0:
- nb->state = X25_LINK_STATE_2;
- break;
case X25_LINK_STATE_1:
x25_transmit_restart_request(nb);
nb->state = X25_LINK_STATE_2;
@@ -232,6 +250,9 @@ void x25_link_established(struct x25_neigh *nb)
void x25_link_terminated(struct x25_neigh *nb)
{
nb->state = X25_LINK_STATE_0;
+ skb_queue_purge(&nb->queue);
+ x25_stop_t20timer(nb);
+
/* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
x25_kill_by_neigh(nb);
}
@@ -277,9 +298,6 @@ void x25_link_device_up(struct net_device *dev)
*/
static void __x25_remove_neigh(struct x25_neigh *nb)
{
- skb_queue_purge(&nb->queue);
- x25_stop_t20timer(nb);
-
if (nb->node.next) {
list_del(&nb->node);
x25_neigh_put(nb);
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 00e46c9a5280..9fbe4bb38d94 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -115,9 +115,6 @@ void x25_route_device_down(struct net_device *dev)
__x25_remove_route(rt);
}
write_unlock_bh(&x25_route_list_lock);
-
- /* Remove any related forwarding */
- x25_clear_forward_by_dev(dev);
}
/*
@@ -127,12 +124,7 @@ struct net_device *x25_dev_get(char *devname)
{
struct net_device *dev = dev_get_by_name(&init_net, devname);
- if (dev &&
- (!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25
-#if IS_ENABLED(CONFIG_LLC)
- && dev->type != ARPHRD_ETHER
-#endif
- ))){
+ if (dev && (!(dev->flags & IFF_UP) || dev->type != ARPHRD_X25)) {
dev_put(dev);
dev = NULL;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 62504471fd20..ac4a317038f1 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -23,6 +23,7 @@
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <net/xdp_sock_drv.h>
+#include <net/busy_poll.h>
#include <net/xdp.h>
#include "xsk_queue.h"
@@ -240,6 +241,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL;
+ sk_mark_napi_id_once_xdp(&xs->sk, xdp);
len = xdp->data_end - xdp->data;
return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
@@ -341,6 +343,63 @@ out:
}
EXPORT_SYMBOL(xsk_tx_peek_desc);
+static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs,
+ u32 max_entries)
+{
+ u32 nb_pkts = 0;
+
+ while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
+ nb_pkts++;
+
+ xsk_tx_release(pool);
+ return nb_pkts;
+}
+
+u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs,
+ u32 max_entries)
+{
+ struct xdp_sock *xs;
+ u32 nb_pkts;
+
+ rcu_read_lock();
+ if (!list_is_singular(&pool->xsk_tx_list)) {
+ /* Fallback to the non-batched version */
+ rcu_read_unlock();
+ return xsk_tx_peek_release_fallback(pool, descs, max_entries);
+ }
+
+ xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
+ if (!xs) {
+ nb_pkts = 0;
+ goto out;
+ }
+
+ nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries);
+ if (!nb_pkts) {
+ xs->tx->queue_empty_descs++;
+ goto out;
+ }
+
+ /* This is the backpressure mechanism for the Tx path. Try to
+ * reserve space in the completion queue for all packets, but
+ * if there are fewer slots available, just process that many
+ * packets. This avoids having to implement any buffering in
+ * the Tx path.
+ */
+ nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts);
+ if (!nb_pkts)
+ goto out;
+
+ xskq_cons_release_n(xs->tx, nb_pkts);
+ __xskq_cons_release(xs->tx);
+ xs->sk.sk_write_space(&xs->sk);
+
+out:
+ rcu_read_unlock();
+ return nb_pkts;
+}
+EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
+
static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
{
struct net_device *dev = xs->dev;
@@ -464,18 +523,65 @@ static int __xsk_sendmsg(struct sock *sk)
return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
}
+static bool xsk_no_wakeup(struct sock *sk)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ /* Prefer busy-polling, skip the wakeup. */
+ return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
+ READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
+#else
+ return false;
+#endif
+}
+
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
+ struct xsk_buff_pool *pool;
if (unlikely(!xsk_is_bound(xs)))
return -ENXIO;
if (unlikely(need_wait))
return -EOPNOTSUPP;
- return __xsk_sendmsg(sk);
+ if (sk_can_busy_loop(sk))
+ sk_busy_loop(sk, 1); /* only support non-blocking sockets */
+
+ if (xsk_no_wakeup(sk))
+ return 0;
+
+ pool = xs->pool;
+ if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
+ return __xsk_sendmsg(sk);
+ return 0;
+}
+
+static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
+{
+ bool need_wait = !(flags & MSG_DONTWAIT);
+ struct sock *sk = sock->sk;
+ struct xdp_sock *xs = xdp_sk(sk);
+
+ if (unlikely(!xsk_is_bound(xs)))
+ return -ENXIO;
+ if (unlikely(!(xs->dev->flags & IFF_UP)))
+ return -ENETDOWN;
+ if (unlikely(!xs->rx))
+ return -ENOBUFS;
+ if (unlikely(need_wait))
+ return -EOPNOTSUPP;
+
+ if (sk_can_busy_loop(sk))
+ sk_busy_loop(sk, 1); /* only support non-blocking sockets */
+
+ if (xsk_no_wakeup(sk))
+ return 0;
+
+ if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
+ return xsk_wakeup(xs, XDP_WAKEUP_RX);
+ return 0;
}
static __poll_t xsk_poll(struct file *file, struct socket *sock,
@@ -554,7 +660,7 @@ static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
node);
if (node) {
- WARN_ON(xsk_map_inc(node->map));
+ bpf_map_inc(&node->map->map);
map = node->map;
*map_entry = node->map_entry;
}
@@ -584,7 +690,7 @@ static void xsk_delete_from_maps(struct xdp_sock *xs)
while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
xsk_map_try_sock_delete(map, xs, map_entry);
- xsk_map_put(map);
+ bpf_map_put(&map->map);
}
}
@@ -1140,7 +1246,7 @@ static const struct proto_ops xsk_proto_ops = {
.setsockopt = xsk_setsockopt,
.getsockopt = xsk_getsockopt,
.sendmsg = xsk_sendmsg,
- .recvmsg = sock_no_recvmsg,
+ .recvmsg = xsk_recvmsg,
.mmap = xsk_mmap,
.sendpage = sock_no_sendpage,
};
diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h
index b9e896cee5bb..edcf249ad1f1 100644
--- a/net/xdp/xsk.h
+++ b/net/xdp/xsk.h
@@ -41,8 +41,6 @@ static inline struct xdp_sock *xdp_sk(struct sock *sk)
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
-int xsk_map_inc(struct xsk_map *map);
-void xsk_map_put(struct xsk_map *map);
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id);
int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
u16 queue_id);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index d5adeee9d5d9..67a4494d63b6 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -144,14 +144,13 @@ static int __xp_assign_dev(struct xsk_buff_pool *pool,
if (err)
return err;
- if (flags & XDP_USE_NEED_WAKEUP) {
+ if (flags & XDP_USE_NEED_WAKEUP)
pool->uses_need_wakeup = true;
- /* Tx needs to be explicitly woken up the first time.
- * Also for supporting drivers that do not implement this
- * feature. They will always have to call sendto().
- */
- pool->cached_need_wakeup = XDP_WAKEUP_TX;
- }
+ /* Tx needs to be explicitly woken up the first time. Also
+ * for supporting drivers that do not implement this
+ * feature. They will always have to call sendto() or poll().
+ */
+ pool->cached_need_wakeup = XDP_WAKEUP_TX;
dev_hold(netdev);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 9e71b9f27679..4a9663aa7afe 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -18,9 +18,11 @@ struct xdp_ring {
/* Hinder the adjacent cache prefetcher to prefetch the consumer
* pointer if the producer pointer is touched and vice versa.
*/
- u32 pad ____cacheline_aligned_in_smp;
+ u32 pad1 ____cacheline_aligned_in_smp;
u32 consumer ____cacheline_aligned_in_smp;
+ u32 pad2 ____cacheline_aligned_in_smp;
u32 flags;
+ u32 pad3 ____cacheline_aligned_in_smp;
};
/* Used for the RX and TX queues for packets */
@@ -197,6 +199,30 @@ static inline bool xskq_cons_read_desc(struct xsk_queue *q,
return false;
}
+static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q,
+ struct xdp_desc *descs,
+ struct xsk_buff_pool *pool, u32 max)
+{
+ u32 cached_cons = q->cached_cons, nb_entries = 0;
+
+ while (cached_cons != q->cached_prod && nb_entries < max) {
+ struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
+ u32 idx = cached_cons & q->ring_mask;
+
+ descs[nb_entries] = ring->desc[idx];
+ if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) {
+ /* Skip the entry */
+ cached_cons++;
+ continue;
+ }
+
+ nb_entries++;
+ cached_cons++;
+ }
+
+ return nb_entries;
+}
+
/* Functions for consumers */
static inline void __xskq_cons_release(struct xsk_queue *q)
@@ -218,17 +244,22 @@ static inline void xskq_cons_get_entries(struct xsk_queue *q)
__xskq_cons_peek(q);
}
-static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
+static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
{
u32 entries = q->cached_prod - q->cached_cons;
- if (entries >= cnt)
- return true;
+ if (entries >= max)
+ return max;
__xskq_cons_peek(q);
entries = q->cached_prod - q->cached_cons;
- return entries >= cnt;
+ return entries >= max ? max : entries;
+}
+
+static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
+{
+ return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false;
}
static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
@@ -247,16 +278,28 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
return xskq_cons_read_desc(q, desc, pool);
}
+static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs,
+ struct xsk_buff_pool *pool, u32 max)
+{
+ u32 entries = xskq_cons_nb_entries(q, max);
+
+ return xskq_cons_read_desc_batch(q, descs, pool, entries);
+}
+
+/* To improve performance in the xskq_cons_release functions, only update local state here.
+ * Reflect this to global state when we get new entries from the ring in
+ * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
+ */
static inline void xskq_cons_release(struct xsk_queue *q)
{
- /* To improve performance, only update local state here.
- * Reflect this to global state when we get new entries
- * from the ring in xskq_cons_get_entries() and whenever
- * Rx or Tx processing are completed in the NAPI loop.
- */
q->cached_cons++;
}
+static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
+{
+ q->cached_cons += cnt;
+}
+
static inline bool xskq_cons_is_full(struct xsk_queue *q)
{
/* No barriers needed since data is not accessed */
@@ -272,18 +315,23 @@ static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
/* Functions for producers */
-static inline bool xskq_prod_is_full(struct xsk_queue *q)
+static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
{
u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
- if (free_entries)
- return false;
+ if (free_entries >= max)
+ return max;
/* Refresh the local tail pointer */
q->cached_cons = READ_ONCE(q->ring->consumer);
free_entries = q->nentries - (q->cached_prod - q->cached_cons);
- return !free_entries;
+ return free_entries >= max ? max : free_entries;
+}
+
+static inline bool xskq_prod_is_full(struct xsk_queue *q)
+{
+ return xskq_prod_nb_free(q, 1) ? false : true;
}
static inline int xskq_prod_reserve(struct xsk_queue *q)
@@ -308,6 +356,23 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
return 0;
}
+static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
+ u32 max)
+{
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+ u32 nb_entries, i, cached_prod;
+
+ nb_entries = xskq_prod_nb_free(q, max);
+
+ /* A, matches D */
+ cached_prod = q->cached_prod;
+ for (i = 0; i < nb_entries; i++)
+ ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
+ q->cached_prod = cached_prod;
+
+ return nb_entries;
+}
+
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
u64 addr, u32 len)
{
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 49da2b8ace8b..113fd9017203 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -11,32 +11,17 @@
#include "xsk.h"
-int xsk_map_inc(struct xsk_map *map)
-{
- bpf_map_inc(&map->map);
- return 0;
-}
-
-void xsk_map_put(struct xsk_map *map)
-{
- bpf_map_put(&map->map);
-}
-
static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
struct xdp_sock **map_entry)
{
struct xsk_map_node *node;
- int err;
- node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
+ node = bpf_map_kzalloc(&map->map, sizeof(*node),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!node)
return ERR_PTR(-ENOMEM);
- err = xsk_map_inc(map);
- if (err) {
- kfree(node);
- return ERR_PTR(err);
- }
+ bpf_map_inc(&map->map);
node->map = map;
node->map_entry = map_entry;
@@ -45,7 +30,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
static void xsk_map_node_free(struct xsk_map_node *node)
{
- xsk_map_put(node->map);
+ bpf_map_put(&node->map->map);
kfree(node);
}
@@ -73,9 +58,8 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{
- struct bpf_map_memory mem;
- int err, numa_node;
struct xsk_map *m;
+ int numa_node;
u64 size;
if (!capable(CAP_NET_ADMIN))
@@ -89,18 +73,11 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
numa_node = bpf_map_attr_numa_node(attr);
size = struct_size(m, xsk_map, attr->max_entries);
- err = bpf_map_charge_init(&mem, size);
- if (err < 0)
- return ERR_PTR(err);
-
m = bpf_map_area_alloc(size, numa_node);
- if (!m) {
- bpf_map_charge_finish(&mem);
+ if (!m)
return ERR_PTR(-ENOMEM);
- }
bpf_map_init_from_attr(&m->map, attr);
- bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock);
return &m->map;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 37456d022cfa..be6351e3f3cd 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -760,9 +760,9 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
}
EXPORT_SYMBOL(xfrm_input_resume);
-static void xfrm_trans_reinject(unsigned long data)
+static void xfrm_trans_reinject(struct tasklet_struct *t)
{
- struct xfrm_trans_tasklet *trans = (void *)data;
+ struct xfrm_trans_tasklet *trans = from_tasklet(trans, t, tasklet);
struct sk_buff_head queue;
struct sk_buff *skb;
@@ -818,7 +818,6 @@ void __init xfrm_input_init(void)
trans = &per_cpu(xfrm_trans_tasklet, i);
__skb_queue_head_init(&trans->queue);
- tasklet_init(&trans->tasklet, xfrm_trans_reinject,
- (unsigned long)trans);
+ tasklet_setup(&trans->tasklet, xfrm_trans_reinject);
}
}
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 9b8e292a7c6a..697cdcfbb5e1 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -319,12 +319,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
err = dst_output(xi->net, skb->sk, skb);
if (net_xmit_eval(err) == 0) {
- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += length;
- tstats->tx_packets++;
- u64_stats_update_end(&tstats->syncp);
+ dev_sw_netstats_tx_add(dev, 1, length);
} else {
stats->tx_errors++;
stats->tx_aborted_errors++;
@@ -538,15 +533,6 @@ static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
return err;
}
-static void xfrmi_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *s)
-{
- dev_fetch_sw_netstats(s, dev->tstats);
-
- s->rx_dropped = dev->stats.rx_dropped;
- s->tx_dropped = dev->stats.tx_dropped;
-}
-
static int xfrmi_get_iflink(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
@@ -554,12 +540,11 @@ static int xfrmi_get_iflink(const struct net_device *dev)
return xi->p.link;
}
-
static const struct net_device_ops xfrmi_netdev_ops = {
.ndo_init = xfrmi_dev_init,
.ndo_uninit = xfrmi_dev_uninit,
.ndo_start_xmit = xfrmi_xmit,
- .ndo_get_stats64 = xfrmi_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = xfrmi_get_iflink,
};
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d0c32a8fcc4a..0727ac853b55 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -848,21 +848,84 @@ static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb
return 0;
}
+static bool xfrm_redact(void)
+{
+ return IS_ENABLED(CONFIG_SECURITY) &&
+ security_locked_down(LOCKDOWN_XFRM_SECRET);
+}
+
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{
struct xfrm_algo *algo;
+ struct xfrm_algo_auth *ap;
struct nlattr *nla;
+ bool redact_secret = xfrm_redact();
nla = nla_reserve(skb, XFRMA_ALG_AUTH,
sizeof(*algo) + (auth->alg_key_len + 7) / 8);
if (!nla)
return -EMSGSIZE;
-
algo = nla_data(nla);
strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
- memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
+
+ if (redact_secret && auth->alg_key_len)
+ memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
+ else
+ memcpy(algo->alg_key, auth->alg_key,
+ (auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;
+ nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth));
+ if (!nla)
+ return -EMSGSIZE;
+ ap = nla_data(nla);
+ memcpy(ap, auth, sizeof(struct xfrm_algo_auth));
+ if (redact_secret && auth->alg_key_len)
+ memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
+ else
+ memcpy(ap->alg_key, auth->alg_key,
+ (auth->alg_key_len + 7) / 8);
+ return 0;
+}
+
+static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
+{
+ struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead));
+ struct xfrm_algo_aead *ap;
+ bool redact_secret = xfrm_redact();
+
+ if (!nla)
+ return -EMSGSIZE;
+
+ ap = nla_data(nla);
+ memcpy(ap, aead, sizeof(*aead));
+
+ if (redact_secret && aead->alg_key_len)
+ memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
+ else
+ memcpy(ap->alg_key, aead->alg_key,
+ (aead->alg_key_len + 7) / 8);
+ return 0;
+}
+
+static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
+{
+ struct xfrm_algo *ap;
+ bool redact_secret = xfrm_redact();
+ struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT,
+ xfrm_alg_len(ealg));
+ if (!nla)
+ return -EMSGSIZE;
+
+ ap = nla_data(nla);
+ memcpy(ap, ealg, sizeof(*ealg));
+
+ if (redact_secret && ealg->alg_key_len)
+ memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
+ else
+ memcpy(ap->alg_key, ealg->alg_key,
+ (ealg->alg_key_len + 7) / 8);
+
return 0;
}
@@ -906,20 +969,17 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
goto out;
}
if (x->aead) {
- ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
+ ret = copy_to_user_aead(x->aead, skb);
if (ret)
goto out;
}
if (x->aalg) {
ret = copy_to_user_auth(x->aalg, skb);
- if (!ret)
- ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
- xfrm_alg_auth_len(x->aalg), x->aalg);
if (ret)
goto out;
}
if (x->ealg) {
- ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
+ ret = copy_to_user_ealg(x->ealg, skb);
if (ret)
goto out;
}