summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 20:11:24 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 20:11:24 +0300
commit687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch)
treeb31a2af35c24a54823674cdd126993b80daeac67 /include
parent3ddf40e8c31964b744ff10abb48c8e36a83ec6e7 (diff)
parent03a1eabc3f54469abd4f1784182851b2e29630cc (diff)
downloadlinux-687ee0ad4e897e29f4b41f7a20c866d74c5e0660.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
Diffstat (limited to 'include')
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h21
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_regs.h1
-rw-r--r--include/linux/bitfield.h93
-rw-r--r--include/linux/bpf.h15
-rw-r--r--include/linux/bpf_verifier.h102
-rw-r--r--include/linux/bug.h3
-rw-r--r--include/linux/cgroup.h23
-rw-r--r--include/linux/filter.h64
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_vlan.h34
-rw-r--r--include/linux/inet_diag.h4
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h441
-rw-r--r--include/linux/mlx5/driver.h35
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h297
-rw-r--r--include/linux/mlx5/port.h40
-rw-r--r--include/linux/mlx5/qp.h128
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter.h63
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h64
-rw-r--r--include/linux/netfilter_ingress.h18
-rw-r--r--include/linux/perf_event.h9
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/ptp_clock_kernel.h5
-rw-r--r--include/linux/qed/common_hsi.h359
-rw-r--r--include/linux/qed/eth_common.h155
-rw-r--r--include/linux/qed/iscsi_common.h28
-rw-r--r--include/linux/qed/qed_chain.h13
-rw-r--r--include/linux/qed/qed_eth_if.h3
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_ll2_if.h139
-rw-r--r--include/linux/qed/qed_roce_if.h604
-rw-r--r--include/linux/qed/qede_roce.h88
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/qed/tcp_common.h16
-rw-r--r--include/linux/rhashtable.h543
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/skbuff.h73
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/win_minmax.h37
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_rxrpc.h53
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h11
-rw-r--r--include/net/bluetooth/hci_mon.h4
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/cfg80211.h259
-rw-r--r--include/net/devlink.h1
-rw-r--r--include/net/dsa.h53
-rw-r--r--include/net/dst_metadata.h52
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/flow_dissector.h14
-rw-r--r--include/net/fq.h3
-rw-r--r--include/net/fq_impl.h7
-rw-r--r--include/net/gre.h10
-rw-r--r--include/net/ieee80211_radiotap.h21
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/ip.h23
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h49
-rw-r--r--include/net/ip_tunnels.h21
-rw-r--r--include/net/kcm.h37
-rw-r--r--include/net/l3mdev.h153
-rw-r--r--include/net/lwtunnel.h44
-rw-r--r--include/net/mac80211.h108
-rw-r--r--include/net/mpls.h15
-rw-r--r--include/net/ncsi.h5
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/nf_conntrack.h56
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h17
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h8
-rw-r--r--include/net/netfilter/nf_log.h14
-rw-r--r--include/net/netfilter/nf_queue.h69
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/net/netfilter/nf_tables_bridge.h7
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h43
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h53
-rw-r--r--include/net/netns/conntrack.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/netns/xfrm.h12
-rw-r--r--include/net/pkt_cls.h24
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/pptp.h23
-rw-r--r--include/net/route.h10
-rw-r--r--include/net/sch_generic.h76
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h94
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h13
-rw-r--r--include/net/strparser.h142
-rw-r--r--include/net/switchdev.h52
-rw-r--r--include/net/tc_act/tc_ife.h2
-rw-r--r--include/net/tc_act/tc_skbmod.h30
-rw-r--r--include/net/tc_act/tc_tunnel_key.h30
-rw-r--r--include/net/tc_act/tc_vlan.h26
-rw-r--r--include/net/tcp.h63
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h18
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/rxrpc/packet.h17
-rw-r--r--include/trace/events/rxrpc.h625
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/batman_adv.h94
-rw-r--r--include/uapi/linux/bpf.h51
-rw-r--r--include/uapi/linux/bpf_perf_event.h18
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h30
-rw-r--r--include/uapi/linux/if_tunnel.h17
-rw-r--r--include/uapi/linux/inet_diag.h20
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/netfilter/nf_log.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h106
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h8
-rw-r--r--include/uapi/linux/netfilter/xt_hashlimit.h23
-rw-r--r--include/uapi/linux/nl80211.h270
-rw-r--r--include/uapi/linux/openvswitch.h17
-rw-r--r--include/uapi/linux/pkt_cls.h19
-rw-r--r--include/uapi/linux/pkt_sched.h4
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h3
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h39
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h41
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h2
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/tipc_netlink.h4
-rw-r--r--include/uapi/linux/xfrm.h2
149 files changed, 5539 insertions, 1520 deletions
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
new file mode 100644
index 000000000000..2383dd20ff43
--- /dev/null
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -0,0 +1,21 @@
+/*
+ * Device Tree constants for Microsemi VSC8531 PHY
+ *
+ * Author: Nagaraju Lakkaraju
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#ifndef _DT_BINDINGS_MSCC_VSC8531_H
+#define _DT_BINDINGS_MSCC_VSC8531_H
+
+/* MAC interface Edge rate control VDDMAC in milli Volts */
+#define MSCC_VDDMAC_3300 3300
+#define MSCC_VDDMAC_2500 2500
+#define MSCC_VDDMAC_1800 1800
+#define MSCC_VDDMAC_1500 1500
+#define MSCC_VDDMAC_MAX 4
+#define MSCC_SLOWDOWN_MAX 8
+
+#endif
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3db25df396cb..8eeedb2db924 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,6 +205,9 @@ struct bcma_host_ops {
#define BCMA_PKG_ID_BCM4709 0
#define BCMA_CHIP_ID_BCM47094 53030
#define BCMA_CHIP_ID_BCM53018 53018
+#define BCMA_CHIP_ID_BCM53573 53573
+#define BCMA_PKG_ID_BCM53573 0
+#define BCMA_PKG_ID_BCM47189 1
/* Board types (on PCI usually equals to the subsystem dev id) */
/* BCM4313 */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 4901fb358b07..9986f8288d01 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -24,6 +24,7 @@
#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
/* Agent registers (common for every core) */
+#define BCMA_OOB_SEL_OUT_A30 0x0100
#define BCMA_IOCTL 0x0408 /* IO control */
#define BCMA_IOCTL_CLK 0x0001
#define BCMA_IOCTL_FGC 0x0002
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644
index 000000000000..f6505d83069d
--- /dev/null
+++ b/include/linux/bitfield.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BITFIELD_H
+#define _LINUX_BITFIELD_H
+
+#include <linux/bug.h>
+
+/*
+ * Bitfield access macros
+ *
+ * FIELD_{GET,PREP} macros take as first parameter shifted mask
+ * from which they extract the base mask and shift amount.
+ * Mask must be a compilation time constant.
+ *
+ * Example:
+ *
+ * #define REG_FIELD_A GENMASK(6, 0)
+ * #define REG_FIELD_B BIT(7)
+ * #define REG_FIELD_C GENMASK(15, 8)
+ * #define REG_FIELD_D GENMASK(31, 16)
+ *
+ * Get:
+ * a = FIELD_GET(REG_FIELD_A, reg);
+ * b = FIELD_GET(REG_FIELD_B, reg);
+ *
+ * Set:
+ * reg = FIELD_PREP(REG_FIELD_A, 1) |
+ * FIELD_PREP(REG_FIELD_B, 0) |
+ * FIELD_PREP(REG_FIELD_C, c) |
+ * FIELD_PREP(REG_FIELD_D, 0x40);
+ *
+ * Modify:
+ * reg &= ~REG_FIELD_C;
+ * reg |= FIELD_PREP(REG_FIELD_C, c);
+ */
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+ ({ \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
+ _pfx "mask is not constant"); \
+ BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
+ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ _pfx "value too large for the field"); \
+ BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ _pfx "type of reg too small for mask"); \
+ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
+ (1ULL << __bf_shf(_mask))); \
+ })
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg: 32bit value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+
+#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 11134238417d..c201017b5730 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -96,6 +96,7 @@ enum bpf_return_type {
struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
+ bool pkt_access;
enum bpf_return_type ret_type;
enum bpf_arg_type arg1_type;
enum bpf_arg_type arg2_type;
@@ -138,6 +139,13 @@ enum bpf_reg_type {
*/
PTR_TO_PACKET,
PTR_TO_PACKET_END, /* skb->data + headlen */
+
+ /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
+ * elem value. We only allow this if we can statically verify that
+ * access from this register are going to fall within the size of the
+ * map element.
+ */
+ PTR_TO_MAP_VALUE_ADJ,
};
struct bpf_prog;
@@ -151,7 +159,8 @@ struct bpf_verifier_ops {
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type);
-
+ int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
+ const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn, struct bpf_prog *prog);
@@ -297,6 +306,10 @@ static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
+static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
new file mode 100644
index 000000000000..7035b997aaa5
--- /dev/null
+++ b/include/linux/bpf_verifier.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_VERIFIER_H
+#define _LINUX_BPF_VERIFIER_H 1
+
+#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/filter.h> /* for MAX_BPF_STACK */
+
+ /* Just some arbitrary values so we can safely do math without overflowing and
+ * are obviously wrong for any sort of memory access.
+ */
+#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+
+struct bpf_reg_state {
+ enum bpf_reg_type type;
+ /*
+ * Used to determine if any memory access using this register will
+ * result in a bad access.
+ */
+ u64 min_value, max_value;
+ union {
+ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+ s64 imm;
+
+ /* valid when type == PTR_TO_PACKET* */
+ struct {
+ u32 id;
+ u16 off;
+ u16 range;
+ };
+
+ /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
+ * PTR_TO_MAP_VALUE_OR_NULL
+ */
+ struct bpf_map *map_ptr;
+ };
+};
+
+enum bpf_stack_slot_type {
+ STACK_INVALID, /* nothing was stored in this stack slot */
+ STACK_SPILL, /* register spilled into stack */
+ STACK_MISC /* BPF program wrote some data into this slot */
+};
+
+#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+
+/* state of the program:
+ * type of all registers and stack info
+ */
+struct bpf_verifier_state {
+ struct bpf_reg_state regs[MAX_BPF_REG];
+ u8 stack_slot_type[MAX_BPF_STACK];
+ struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+};
+
+/* linked list of verifier states used to prune search */
+struct bpf_verifier_state_list {
+ struct bpf_verifier_state state;
+ struct bpf_verifier_state_list *next;
+};
+
+struct bpf_insn_aux_data {
+ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+};
+
+#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+
+struct bpf_verifier_env;
+struct bpf_ext_analyzer_ops {
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+};
+
+/* single container for all structs
+ * one verifier_env per bpf_check() call
+ */
+struct bpf_verifier_env {
+ struct bpf_prog *prog; /* eBPF program being verified */
+ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
+ int stack_size; /* number of states to be processed */
+ struct bpf_verifier_state cur_state; /* current verifier state */
+ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
+ const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
+ void *analyzer_priv; /* pointer to external analyzer's private data */
+ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ u32 used_map_cnt; /* number of used maps */
+ u32 id_gen; /* used to generate unique reg IDs */
+ bool allow_ptr_leaks;
+ bool seen_direct_write;
+ bool varlen_map_value_access;
+ struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+};
+
+int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
+ void *priv);
+
+#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index e51b0709e78d..292d6a10b0c2 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -13,6 +13,7 @@ enum bug_trap_type {
struct pt_regs;
#ifdef __CHECKER__
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -24,6 +25,8 @@ struct pt_regs;
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 984f73b719a9..a4414a11eea7 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -497,6 +497,23 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
}
+/**
+ * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
+ * @task: the task to be tested
+ * @ancestor: possible ancestor of @task's cgroup
+ *
+ * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
+ * It follows all the same rules as cgroup_is_descendant, and only applies
+ * to the default hierarchy.
+ */
+static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
+ struct cgroup *ancestor)
+{
+ struct css_set *cset = task_css_set(task);
+
+ return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
+}
+
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
@@ -557,6 +574,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
+struct cgroup;
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
@@ -574,6 +592,11 @@ static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
+static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
+ struct cgroup *ancestor)
+{
+ return true;
+}
#endif /* !CONFIG_CGROUPS */
/*
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a16439b99fd9..1f09c521adfe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -314,6 +314,70 @@ struct bpf_prog_aux;
bpf_size; \
})
+#define BPF_SIZEOF(type) \
+ ({ \
+ const int __size = bytes_to_bpf_size(sizeof(type)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define BPF_FIELD_SIZEOF(type, field) \
+ ({ \
+ const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define __BPF_MAP_0(m, v, ...) v
+#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
+#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
+#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
+#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
+#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
+
+#define __BPF_REG_0(...) __BPF_PAD(5)
+#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
+#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
+#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
+#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
+#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
+
+#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
+#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
+
+#define __BPF_CAST(t, a) \
+ (__force t) \
+ (__force \
+ typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
+ (unsigned long)0, (t)0))) a
+#define __BPF_V void
+#define __BPF_N
+
+#define __BPF_DECL_ARGS(t, a) t a
+#define __BPF_DECL_REGS(t, a) u64 a
+
+#define __BPF_PAD(n) \
+ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
+ u64, __ur_3, u64, __ur_4, u64, __ur_5)
+
+#define BPF_CALL_x(x, name, ...) \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ { \
+ return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ } \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
+
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd184bdca58f..6824556d37ed 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,6 +1169,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
+static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
+{
+ const struct kobject *kobj = &device_obj->device.kobj;
+
+ return kobj->name;
+}
+
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dcb89e3515db..c6587c01d951 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -45,6 +45,7 @@ struct br_ip_list {
#define BR_PROXYARP BIT(8)
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
+#define BR_MCAST_FLOOD BIT(11)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f923d15b432c..0b17c585b5cd 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -25,5 +25,6 @@ struct ifla_vf_info {
__u32 max_tx_rate;
__u32 rss_query_en;
__u32 trusted;
+ __be16 vlan_proto;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5f6ce6b578c..3319d97d789d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -271,6 +272,23 @@ static inline int vlan_get_encap_level(struct net_device *dev)
}
#endif
+/**
+ * eth_type_vlan - check for valid vlan ether type.
+ * @ethertype: ether type to check
+ *
+ * Returns true if the ether type is a vlan ether type.
+ */
+static inline bool eth_type_vlan(__be16 ethertype)
+{
+ switch (ethertype) {
+ case htons(ETH_P_8021Q):
+ case htons(ETH_P_8021AD):
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline bool vlan_hw_offload_capable(netdev_features_t features,
__be16 proto)
{
@@ -424,8 +442,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
- if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
- veth->h_vlan_proto != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(veth->h_vlan_proto))
return -EINVAL;
*vlan_tci = ntohs(veth->h_vlan_TCI);
@@ -487,7 +504,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
* ETH_HLEN otherwise
*/
- if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+ if (eth_type_vlan(type)) {
if (vlan_depth) {
if (WARN_ON(vlan_depth < VLAN_HLEN))
return 0;
@@ -505,8 +522,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
- } while (type == htons(ETH_P_8021Q) ||
- type == htons(ETH_P_8021AD));
+ } while (eth_type_vlan(type));
}
if (depth)
@@ -571,8 +587,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
{
if (!skb_vlan_tag_present(skb) &&
- likely(skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)))
+ likely(!eth_type_vlan(skb->protocol)))
return false;
return true;
@@ -592,15 +607,14 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (!skb_vlan_tag_present(skb)) {
struct vlan_ethhdr *veh;
- if (likely(protocol != htons(ETH_P_8021Q) &&
- protocol != htons(ETH_P_8021AD)))
+ if (likely(!eth_type_vlan(protocol)))
return false;
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
- if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(protocol))
return false;
return true;
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index feb04ea20f11..65da430e260f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh);
+ const struct nlmsghdr *unlh, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
@@ -56,7 +56,7 @@ void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns);
+ struct user_namespace *user_ns, bool net_admin);
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c6dbcd84a2c7..7e9a789be5e0 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -18,6 +18,7 @@ struct ipv6_devconf {
__s32 dad_transmits;
__s32 rtr_solicits;
__s32 rtr_solicit_interval;
+ __s32 rtr_solicit_max_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
__s32 mldv1_unsolicited_report_interval;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 3ffc69ebe967..0fb7ffb1775f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -238,6 +238,11 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
}
+static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
+}
+
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 116b284bc4ce..1f3568694a57 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -309,7 +309,8 @@ int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
+ u8 qos, __be16 proto);
int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 42da3552f7cb..59b50d3eedb4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -221,6 +221,7 @@ enum {
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
+ MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
};
enum {
@@ -1371,6 +1372,8 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index deaa2217214d..b4ee8f62ce8d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -160,6 +160,7 @@ struct mlx4_qp_path {
enum { /* fl */
MLX4_FL_CV = 1 << 6,
+ MLX4_FL_SV = 1 << 5,
MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
@@ -267,6 +268,7 @@ enum {
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
+ MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
};
enum { /* param3 */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2566f6d6444f..7c3c0d3aca37 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_create_cq_mbox_in *in, int inlen);
+ u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_query_cq_mbox_out *out);
+ u32 *out, int outlen);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_modify_cq_mbox_in *in, int in_sz);
+ u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b6d15cddb2f..77c141797152 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -198,19 +198,6 @@ enum {
};
enum {
- MLX5_ACCESS_MODE_PA = 0,
- MLX5_ACCESS_MODE_MTT = 1,
- MLX5_ACCESS_MODE_KLM = 2
-};
-
-enum {
- MLX5_MKEY_REMOTE_INVAL = 1 << 24,
- MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
- MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
-};
-
-enum {
MLX5_EN_RD = (u64)1,
MLX5_EN_WR = (u64)2
};
@@ -411,33 +398,6 @@ enum {
MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
};
-struct mlx5_inbox_hdr {
- __be16 opcode;
- u8 rsvd[4];
- __be16 opmod;
-};
-
-struct mlx5_outbox_hdr {
- u8 status;
- u8 rsvd[3];
- __be32 syndrome;
-};
-
-struct mlx5_cmd_query_adapter_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_query_adapter_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[24];
- u8 intapin;
- u8 rsvd1[13];
- __be16 vsd_vendor_id;
- u8 vsd[208];
- u8 vsd_psid[16];
-};
-
enum mlx5_odp_transport_cap_bits {
MLX5_ODP_SUPPORT_SEND = 1 << 31,
MLX5_ODP_SUPPORT_RECV = 1 << 30,
@@ -455,30 +415,6 @@ struct mlx5_odp_caps {
char reserved2[0xe4];
};
-struct mlx5_cmd_init_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_init_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
@@ -494,7 +430,6 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-
struct health_buffer {
__be32 assert_var[5];
__be32 rsvd0[3];
@@ -856,245 +791,15 @@ struct mlx5_cqe128 {
struct mlx5_cqe64 cqe64;
};
-struct mlx5_srq_ctx {
- u8 state_log_sz;
- u8 rsvd0[3];
- __be32 flags_xrcd;
- __be32 pgoff_cqn;
- u8 rsvd1[4];
- u8 log_pg_sz;
- u8 rsvd2[7];
- __be32 pd;
- __be16 lwm;
- __be16 wqe_cnt;
- u8 rsvd3[8];
- __be64 db_record;
-};
-
-struct mlx5_create_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_srqn;
- u8 rsvd0[4];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[208];
- __be64 pas[0];
-};
-
-struct mlx5_create_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[32];
- __be64 pas[0];
-};
-
-struct mlx5_arm_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- __be16 rsvd;
- __be16 lwm;
-};
-
-struct mlx5_arm_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cq_context {
- u8 status;
- u8 cqe_sz_flags;
- u8 st;
- u8 rsvd3;
- u8 rsvd4[6];
- __be16 page_offset;
- __be32 log_sz_usr_page;
- __be16 cq_period;
- __be16 cq_max_count;
- __be16 rsvd20;
- __be16 c_eqn;
- u8 log_pg_sz;
- u8 rsvd25[7];
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_counter;
- __be32 producer_counter;
- u8 rsvd48[8];
- __be64 db_record_addr;
-};
-
-struct mlx5_create_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_cqn;
- u8 rsvdx[4];
- struct mlx5_cq_context ctx;
- u8 rsvd6[192];
- __be64 pas[0];
-};
-
-struct mlx5_create_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_cq_context ctx;
- u8 rsvd6[16];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- __be32 field_select;
- struct mlx5_cq_context ctx;
- u8 rsvd[192];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_eq_context {
- u8 status;
- u8 ec_oi;
- u8 st;
- u8 rsvd2[7];
- __be16 page_pffset;
- __be32 log_sz_usr_page;
- u8 rsvd3[7];
- u8 intr;
- u8 log_page_size;
- u8 rsvd4[15];
- __be32 consumer_counter;
- __be32 produser_counter;
- u8 rsvd5[16];
-};
-
-struct mlx5_create_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 input_eqn;
- u8 rsvd1[4];
- struct mlx5_eq_context ctx;
- u8 rsvd2[8];
- __be64 events_mask;
- u8 rsvd3[176];
- __be64 pas[0];
-};
-
-struct mlx5_create_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[3];
- u8 eq_number;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_map_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be64 mask;
- u8 mu;
- u8 rsvd0[2];
- u8 eqn;
- u8 rsvd1[24];
-};
-
-struct mlx5_map_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_query_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- struct mlx5_eq_context ctx;
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
};
enum {
- MLX5_MKEY_STATUS_FREE = 1 << 6,
+ MLX5_MKEY_REMOTE_INVAL = 1 << 24,
+ MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
+ MLX5_MKEY_BSF_EN = 1 << 30,
+ MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
@@ -1119,134 +824,12 @@ struct mlx5_mkey_seg {
u8 rsvd4[4];
};
-struct mlx5_query_special_ctxs_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_special_ctxs_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 dump_fill_mkey;
- __be32 reserved_lkey;
-};
-
-struct mlx5_create_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_mkey_index;
- __be32 flags;
- struct mlx5_mkey_seg seg;
- u8 rsvd1[16];
- __be32 xlat_oct_act_size;
- __be32 rsvd2;
- u8 rsvd3[168];
- __be64 pas[0];
-};
-
-struct mlx5_create_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_query_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_dump_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
-};
-
-struct mlx5_dump_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_mad_ifc_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be16 remote_lid;
- u8 rsvd0;
- u8 port;
- u8 rsvd1[4];
- u8 data[256];
-};
-
-struct mlx5_mad_ifc_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- u8 data[256];
-};
-
-struct mlx5_access_reg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 register_id;
- __be32 arg;
- __be32 data[0];
-};
-
-struct mlx5_access_reg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 data[0];
-};
-
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
};
-struct mlx5_allocate_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 npsv_pd;
- __be32 rsvd_psv0;
-};
-
-struct mlx5_allocate_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 psv_idx[4];
-};
-
-struct mlx5_destroy_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 psv_number;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
enum {
VPORT_STATE_DOWN = 0x0,
VPORT_STATE_UP = 0x1,
@@ -1381,6 +964,18 @@ enum mlx5_cap_type {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ccea6fb16482..85c4786427e4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -49,10 +49,6 @@
#include <linux/mlx5/srq.h>
enum {
- MLX5_RQ_BITMASK_VSD = 1 << 1,
-};
-
-enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
@@ -481,6 +477,7 @@ struct mlx5_fc_stats {
};
struct mlx5_eswitch;
+struct mlx5_lag;
struct mlx5_rl_entry {
u32 rate;
@@ -554,6 +551,7 @@ struct mlx5_priv {
struct mlx5_flow_steering *steering;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
+ struct mlx5_lag *lag;
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
@@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
-int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
-int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context);
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen,
+ u32 *out, int outlen,
+ mlx5_cmd_cbk_t callback, void *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
- struct mlx5_create_mkey_mbox_in *in, int inlen,
- mlx5_cmd_cbk_t callback, void *context,
- struct mlx5_create_mkey_mbox_out *out);
+ u32 *in, int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- struct mlx5_query_mkey_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
@@ -826,8 +828,6 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-int mlx5_sriov_init(struct mlx5_core_dev *dev);
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -865,7 +865,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- struct mlx5_query_eq_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
@@ -930,6 +930,8 @@ enum {
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
+ int (*attach)(struct mlx5_core_dev *dev, void *context);
+ void (*detach)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
void * (*get_dev)(void *context);
@@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
+int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
+int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index e036d6030867..93ebc5e21334 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
@@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX,
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX,
};
struct mlx5_flow_table;
@@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
u32 level, u16 vport);
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+ struct mlx5_flow_namespace *ns,
+ int prio, u32 level);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d1f9a581aca8..6045d4d58065 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -152,7 +152,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -174,6 +174,12 @@ enum {
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_SET_WOL_ROL = 0x830,
MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
+ MLX5_CMD_OP_CREATE_LAG = 0x840,
+ MLX5_CMD_OP_MODIFY_LAG = 0x841,
+ MLX5_CMD_OP_QUERY_LAG = 0x842,
+ MLX5_CMD_OP_DESTROY_LAG = 0x843,
+ MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844,
+ MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -212,6 +218,8 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
MLX5_CMD_OP_MAX
};
@@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 reserved_at_7[0x19];
+ u8 encap[0x1];
+ u8 decap[0x1];
+ u8 reserved_at_9[0x17];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits {
struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
- u8 reserved_at_1[0x1ff];
+ u8 nic_rx_multi_path_tirs_fts[0x1];
+ u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
+ u8 reserved_at_3[0x1fd];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
@@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
- u8 reserved_at_20[0x7e0];
+ u8 vxlan_encap_decap[0x1];
+ u8 nvgre_encap_decap[0x1];
+ u8 reserved_at_22[0x9];
+ u8 log_max_encap_headers[0x5];
+ u8 reserved_2b[0x6];
+ u8 max_encap_header_size[0xa];
+
+ u8 reserved_40[0x7c0];
+
};
struct mlx5_ifc_qos_cap_bits {
@@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x3];
+ u8 reserved_at_183[0x1];
+ u8 modify_rq_counter_set_id[0x1];
+ u8 reserved_at_185[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_tx_eth_packet[0x1];
u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x10];
+
+ u8 reserved_at_270[0xb];
+ u8 lag_master[0x1];
+ u8 num_lag_ports[0x4];
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
@@ -1904,7 +1929,7 @@ enum {
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
- u8 reserved_at_4[0x4];
+ u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
@@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3e0[0x8];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x40];
+ u8 reserved_at_400[0x8];
+ u8 deth_sqpn[0x18];
+
+ u8 reserved_at_420[0x20];
u8 reserved_at_440[0x8];
u8 last_acked_psn[0x18];
@@ -2064,6 +2092,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
+ MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
};
struct mlx5_ifc_flow_context_bits {
@@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 reserved_at_c0[0x140];
+ u8 encap_id[0x20];
+
+ u8 reserved_at_e0[0x120];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
};
struct mlx5_ifc_tisc_bits {
- u8 reserved_at_0[0xc];
+ u8 strict_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1[0x3];
+ u8 lag_tx_port_affinity[0x04];
+
+ u8 reserved_at_8[0x4];
u8 prio[0x4];
u8 reserved_at_10[0x10];
@@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x180];
+ u8 reserved_at_180[0x200];
struct mlx5_ifc_wq_bits wq;
};
@@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x20];
+ u8 dump_fill_mkey[0x20];
u8 resd_lkey[0x20];
};
@@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_encap_header_in_bits {
+ u8 reserved_at_0[0x5];
+ u8 header_type[0x3];
+ u8 reserved_at_8[0xe];
+ u8 encap_header_size[0xa];
+
+ u8 reserved_at_20[0x10];
+ u8 encap_header[2][0x8];
+
+ u8 more_encap_header[0][0x8];
+};
+
+struct mlx5_ifc_query_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header[0];
+};
+
+struct mlx5_ifc_query_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
+struct mlx5_ifc_alloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header;
+};
+
+struct mlx5_ifc_dealloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_60[0x20];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
struct mlx5_ifc_modify_tis_bitmask_bits {
u8 reserved_at_0[0x20];
- u8 reserved_at_20[0x1f];
+ u8 reserved_at_20[0x1d];
+ u8 lag_tx_port_affinity[0x1];
+ u8 strict_lag_tx_port_affinity[0x1];
u8 prio[0x1];
};
@@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
u8 reserved_at_40[0x40];
};
+enum {
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
+};
+
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x16];
u8 node_guid[0x1];
u8 port_guid[0x1];
- u8 reserved_at_18[0x1];
+ u8 min_inline[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];
@@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_a0[0x20];
- u8 reserved_at_c0[0x4];
+ u8 encap_en[0x1];
+ u8 decap_en[0x1];
+ u8 reserved_at_c2[0x2];
u8 table_miss_mode[0x4];
u8 level[0x8];
u8 reserved_at_d0[0x8];
@@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_at_100[0x100];
+ u8 reserved_at_100[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_120[0x80];
};
struct mlx5_ifc_create_flow_group_out_bits {
@@ -7563,7 +7690,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
};
enum {
- MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1,
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
+ MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
};
struct mlx5_ifc_modify_flow_table_out_bits {
@@ -7602,7 +7730,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_at_100[0x100];
+ u8 reserved_at_100[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_120[0x80];
};
struct mlx5_ifc_ets_tcn_config_reg_bits {
@@ -7710,4 +7841,134 @@ struct mlx5_ifc_dcbx_param_bits {
u8 error[0x8];
u8 reserved_at_a0[0x160];
};
+
+struct mlx5_ifc_lagc_bits {
+ u8 reserved_at_0[0x1d];
+ u8 lag_state[0x3];
+
+ u8 reserved_at_20[0x14];
+ u8 tx_remap_affinity_2[0x4];
+ u8 reserved_at_38[0x4];
+ u8 tx_remap_affinity_1[0x4];
+};
+
+struct mlx5_ifc_create_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_modify_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+ u8 field_select[0x20];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index e3012cc64b8a..b3065acd20b4 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -61,6 +61,39 @@ enum mlx5_an_status {
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_50GBASE_SR2 = 18,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_1000BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
@@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, int proto_mask,
- u8 local_port);
+int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, u8 local_port);
+int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
+ u32 *proto_oper, u8 local_port);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 7879bf411891..0aacb2a7480d 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -123,12 +123,13 @@ enum {
};
enum {
- MLX5_NON_ZERO_RQ = 0 << 24,
- MLX5_SRQ_RQ = 1 << 24,
- MLX5_CRQ_RQ = 2 << 24,
- MLX5_ZERO_LEN_RQ = 3 << 24
+ MLX5_NON_ZERO_RQ = 0x0,
+ MLX5_SRQ_RQ = 0x1,
+ MLX5_CRQ_RQ = 0x2,
+ MLX5_ZERO_LEN_RQ = 0x3
};
+/* TODO REM */
enum {
/* params1 */
MLX5_QP_BIT_SRE = 1 << 15,
@@ -178,12 +179,6 @@ enum {
};
enum {
- MLX5_QP_LAT_SENSITIVE = 1 << 28,
- MLX5_QP_BLOCK_MCAST = 1 << 30,
- MLX5_QP_ENABLE_SIG = 1 << 31,
-};
-
-enum {
MLX5_RCV_DBR = 0,
MLX5_SND_DBR = 1,
};
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
u8 rmac[6];
};
+/* FIXME: use mlx5_ifc.h qpc */
struct mlx5_qp_context {
__be32 flags;
__be32 flags_pd;
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
u8 rsvd1[24];
};
-struct mlx5_create_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_qpn;
- u8 rsvd0[4];
- __be32 opt_param_mask;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd3[16];
- __be64 pas[0];
-};
-
-struct mlx5_create_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_modify_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
- __be32 optparam;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
-};
-
-struct mlx5_modify_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[4];
-};
-
-struct mlx5_query_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd1[8];
- __be32 optparam;
- u8 rsvd0[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
- __be64 pas[0];
-};
-
-struct mlx5_conf_sqp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[3];
- u8 type;
-};
-
-struct mlx5_conf_sqp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
-struct mlx5_page_fault_resume_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 flags_qpn;
- u8 reserved[4];
-};
-
-struct mlx5_page_fault_resume_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
+ u32 *in,
int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
- struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+ u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- struct mlx5_query_qp_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index e087b7d047ac..451b0bde9083 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0d126aeb3ec0..d43ef96bf075 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -32,6 +32,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
diff --git a/include/linux/net.h b/include/linux/net.h
index b9f0ff4d489c..cd0c8bd0a1de 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -25,6 +25,7 @@
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
#include <linux/once.h>
+#include <linux/fs.h>
#include <uapi/linux/net.h>
@@ -128,6 +129,9 @@ struct page;
struct sockaddr;
struct msghdr;
struct module;
+struct sk_buff;
+typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
+ unsigned int, size_t);
struct proto_ops {
int family;
@@ -186,6 +190,8 @@ struct proto_ops {
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
+ int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e8d79d4ebcfe..136ae6bbe81e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -52,6 +52,7 @@
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <linux/hashtable.h>
struct netpoll_info;
struct device;
@@ -788,6 +789,7 @@ enum {
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
TC_SETUP_MATCHALL,
+ TC_SETUP_CLSBPF,
};
struct tc_cls_u32_offload;
@@ -799,6 +801,7 @@ struct tc_to_netdev {
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
struct tc_cls_matchall_offload *cls_mall;
+ struct tc_cls_bpf_offload *cls_bpf;
};
};
@@ -923,6 +926,14 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
+ * bool (*ndo_has_offload_stats)(int attr_id)
+ * Return true if this device supports offload stats of this attr_id.
+ *
+ * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
+ * void *attr_data)
+ * Get statistics for offload operations by attr_id. Write it into the
+ * attr_data pointer.
+ *
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device supports VLAN filtering this function is called when a
* VLAN id is registered.
@@ -935,7 +946,8 @@ struct netdev_xdp {
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
- * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
+ * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
+ * u8 qos, __be16 proto);
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
@@ -1030,7 +1042,7 @@ struct netdev_xdp {
* Deletes the FDB entry from dev coresponding to addr.
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
- * int idx)
+ * int *idx)
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
@@ -1154,6 +1166,10 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
+ bool (*ndo_has_offload_stats)(int attr_id);
+ int (*ndo_get_offload_stats)(int attr_id,
+ const struct net_device *dev,
+ void *attr_data);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
@@ -1172,7 +1188,8 @@ struct net_device_ops {
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
- int queue, u16 vlan, u8 qos);
+ int queue, u16 vlan,
+ u8 qos, __be16 proto);
int (*ndo_set_vf_rate)(struct net_device *dev,
int vf, int min_tx_rate,
int max_tx_rate);
@@ -1262,7 +1279,7 @@ struct net_device_ops {
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx);
+ int *idx);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
@@ -1561,8 +1578,6 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
- * @offload_fwd_mark: Offload device fwding mark
- *
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
@@ -1784,7 +1799,7 @@ struct net_device {
#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
- struct list_head nf_hooks_ingress;
+ struct nf_hook_entry __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
@@ -1800,6 +1815,9 @@ struct net_device {
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
+#ifdef CONFIG_NET_SCHED
+ DECLARE_HASHTABLE (qdisc_hash, 4);
+#endif
unsigned long tx_queue_len;
spinlock_t tx_global_lock;
int watchdog_timeo;
@@ -1810,9 +1828,6 @@ struct net_device {
#ifdef CONFIG_NET_CLS_ACT
struct tcf_proto __rcu *egress_cl_list;
#endif
-#ifdef CONFIG_NET_SWITCHDEV
- u32 offload_fwd_mark;
-#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 9230f9aee896..abc7fdcb9eb1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -55,12 +55,34 @@ struct nf_hook_state {
struct net_device *out;
struct sock *sk;
struct net *net;
- struct list_head *hook_list;
+ struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
+typedef unsigned int nf_hookfn(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+struct nf_hook_ops {
+ struct list_head list;
+
+ /* User fills in from here down. */
+ nf_hookfn *hook;
+ struct net_device *dev;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
+ /* Hooks are ordered in ascending priority. */
+ int priority;
+};
+
+struct nf_hook_entry {
+ struct nf_hook_entry __rcu *next;
+ struct nf_hook_ops ops;
+ const struct nf_hook_ops *orig_ops;
+};
+
static inline void nf_hook_state_init(struct nf_hook_state *p,
- struct list_head *hook_list,
+ struct nf_hook_entry *hook_entry,
unsigned int hook,
int thresh, u_int8_t pf,
struct net_device *indev,
@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
p->out = outdev;
p->sk = sk;
p->net = net;
- p->hook_list = hook_list;
+ RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn;
}
-typedef unsigned int nf_hookfn(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state);
-
-struct nf_hook_ops {
- struct list_head list;
- /* User fills in from here down. */
- nf_hookfn *hook;
- struct net_device *dev;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
- /* Hooks are ordered in ascending priority. */
- int priority;
-};
struct nf_sockopt_ops {
struct list_head list;
@@ -133,6 +140,8 @@ int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
+int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
+void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
@@ -161,7 +170,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
- struct list_head *hook_list;
+ struct nf_hook_entry *hook_head;
+ int ret = 1;
#ifdef HAVE_JUMP_LABEL
if (__builtin_constant_p(pf) &&
@@ -170,16 +180,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
return 1;
#endif
- hook_list = &net->nf.hooks[pf][hook];
-
- if (!list_empty(hook_list)) {
+ rcu_read_lock();
+ hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+ if (hook_head) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook_list, hook, thresh,
+ nf_hook_state_init(&state, hook_head, hook, thresh,
pf, indev, outdev, sk, net, okfn);
- return nf_hook_slow(skb, &state);
+
+ ret = nf_hook_slow(skb, &state);
}
- return 1;
+ rcu_read_unlock();
+
+ return ret;
}
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 275505792664..1d1ef4e20512 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -4,13 +4,9 @@
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
- unsigned int searched;
unsigned int found;
- unsigned int new;
unsigned int invalid;
unsigned int ignore;
- unsigned int delete;
- unsigned int delete_list;
unsigned int insert;
unsigned int insert_failed;
unsigned int drop;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index df78dc2b5524..dee0acd0dd31 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,68 +1,8 @@
#ifndef _CONNTRACK_PROTO_GRE_H
#define _CONNTRACK_PROTO_GRE_H
#include <asm/byteorder.h>
-
-/* GRE PROTOCOL HEADER */
-
-/* GRE Version field */
-#define GRE_VERSION_1701 0x0
-#define GRE_VERSION_PPTP 0x1
-
-/* GRE Protocol field */
-#define GRE_PROTOCOL_PPTP 0x880B
-
-/* GRE Flags */
-#define GRE_FLAG_C 0x80
-#define GRE_FLAG_R 0x40
-#define GRE_FLAG_K 0x20
-#define GRE_FLAG_S 0x10
-#define GRE_FLAG_A 0x80
-
-#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
-#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
-#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
-#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
-#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
-
-/* GRE is a mess: Four different standards */
-struct gre_hdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u16 rec:3,
- srr:1,
- seq:1,
- key:1,
- routing:1,
- csum:1,
- version:3,
- reserved:4,
- ack:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u16 csum:1,
- routing:1,
- key:1,
- seq:1,
- srr:1,
- rec:3,
- ack:1,
- reserved:4,
- version:3;
-#else
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
- __be16 protocol;
-};
-
-/* modified GRE header for PPTP */
-struct gre_hdr_pptp {
- __u8 flags; /* bitfield */
- __u8 version; /* should be GRE_VERSION_PPTP */
- __be16 protocol; /* should be GRE_PROTOCOL_PPTP */
- __be16 payload_len; /* size of ppp payload, not inc. gre header */
- __be16 call_id; /* peer's call_id for this session */
- __be32 seq; /* sequence number. Present if S==1 */
- __be32 ack; /* seq number of highest packet received by */
- /* sender in this session */
-};
+#include <net/gre.h>
+#include <net/pptp.h>
struct nf_ct_gre {
unsigned int stream_timeout;
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 5fcd375ef175..33e37fb41d5d 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -11,22 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false;
#endif
- return !list_empty(&skb->dev->nf_hooks_ingress);
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
}
+/* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb)
{
+ struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
- nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
- NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
- skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
return nf_hook_slow(skb, &state);
}
static inline void nf_hook_ingress_init(struct net_device *dev)
{
- INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
}
#else /* CONFIG_NETFILTER_INGRESS */
static inline int nf_hook_ingress_active(struct sk_buff *skb)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5c5362584aba..060d0ede88df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -690,6 +690,10 @@ struct perf_event {
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
+#ifdef CONFIG_BPF_SYSCALL
+ perf_overflow_handler_t orig_overflow_handler;
+ struct bpf_prog *prog;
+#endif
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
@@ -802,6 +806,11 @@ struct perf_output_handle {
int page;
};
+struct bpf_perf_event_data_kern {
+ struct pt_regs *regs;
+ struct perf_sample_data *data;
+};
+
#ifdef CONFIG_CGROUP_PERF
/*
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2d24b283aa2d..e25f1830fbcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -80,6 +80,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -123,6 +124,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
+ case PHY_INTERFACE_MODE_TRGMII:
+ return "trgmii";
default:
return "unknown";
}
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6b15e168148a..5ad54fc66cf0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -127,6 +127,11 @@ struct ptp_clock;
*
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
*/
extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 40c0ada01806..734deb094618 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -5,28 +5,77 @@
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
+#ifndef _COMMON_HSI_H
+#define _COMMON_HSI_H
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
+#define DMA_REGPAIR_LE(x, val) do { \
+ (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val)); \
+ } while (0)
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
-#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define X_FINAL_CLEANUP_AGG_INT 1
+
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
+
#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
+
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
+
+#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 0
+#define MSTORM_QZONE_SIZE 16
#define USTORM_QZONE_SIZE 8
#define XSTORM_QZONE_SIZE 8
#define YSTORM_QZONE_SIZE 0
#define PSTORM_QZONE_SIZE 0
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+
+/********************************/
+/* CORE (LIGHT L2) FW CONSTANTS */
+/********************************/
+
+#define CORE_LL2_MAX_RAMROD_PER_CON 8
+#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
+
+#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
+
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
+#define MAX_NUM_LL2_RX_QUEUES 32
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 10
-#define FW_REVISION_VERSION 5
+#define FW_REVISION_VERSION 10
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -83,6 +132,20 @@
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
+/* Clock values */
+#define MASTER_CLK_FREQ_E4 (375e6)
+#define STORM_CLK_FREQ_E4 (1000e6)
+#define CLK25M_CLK_FREQ_E4 (25e6)
+
+/* Global PXP windows (GTT) */
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
+
+/* Tools Version */
+#define TOOLS_VERSION 10
+
/*****************/
/* CDU CONSTANTS */
/*****************/
@@ -90,6 +153,8 @@
#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
/*****************/
/* DQ CONSTANTS */
/*****************/
@@ -115,6 +180,11 @@
#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -159,13 +229,16 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -178,9 +251,45 @@
#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
/* UCM agg counter flag selection (FW) */
-#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
-#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
-
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+/* TCM agg counter flag selection (HW) */
+#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
+#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
+#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
+#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
+#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
+#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
+/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+
+/* PWM address mapping */
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM16_BASE 0x40
+#define DQ_PWM_OFFSET_XCM32_BASE 0x44
+#define DQ_PWM_OFFSET_UCM16_BASE 0x48
+#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
+#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_TCM16_BASE 0x58
+#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -214,15 +323,17 @@
*/
#define CM_TX_PQ_BASE 0x200
+/* number of global Vport/QCN rate limiters */
+#define MAX_QM_GLOBAL_RLS 256
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
-#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
#define QM_BYTE_CRD_REG_WIDTH 24
-#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1))
#define QM_WFQ_CRD_REG_WIDTH 32
-#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1))
#define QM_RL_CRD_REG_WIDTH 32
-#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1))
/*****************/
/* CAU CONSTANTS */
@@ -287,6 +398,17 @@
/* PXP CONSTANTS */
/*****************/
+/* Bars for Blocks */
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_MSDM 0
+#define PXP_BAR_YSDM 0
+#define PXP_BAR_PSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
/* PTT and GTT */
#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
@@ -334,6 +456,52 @@
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+/* PF BAR */
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
+ PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
+ PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
+#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
+ PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -399,6 +567,20 @@
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+/*****************/
+/* PRM CONSTANTS */
+/*****************/
+#define PRM_DMA_PAD_BYTES_NUM 2
+/******************/
+/* SDMs CONSTANTS */
+/******************/
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
#define SDM_COMP_TYPE_NONE 0
#define SDM_COMP_TYPE_WAKE_THREAD 1
@@ -424,6 +606,8 @@
/* PRS CONSTANTS */
/*****************/
+#define PRS_GFT_CAM_LINES_NO_MATCH 31
+
/* Async data KCQ CQE */
struct async_data {
__le32 cid;
@@ -440,20 +624,6 @@ struct coalescing_timeset {
#define COALESCING_TIMESET_VALID_SHIFT 7
};
-struct common_prs_pf_msg_info {
- __le32 value;
-#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
-#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
-#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
-};
-
struct common_queue_zone {
__le16 ring_drv_data_consumer;
__le16 reserved;
@@ -473,6 +643,19 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
+struct iscsi_eqe_data {
+ __le32 cid;
+ __le16 conn_id;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
+#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
+};
+
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
@@ -488,8 +671,10 @@ struct initial_cleanup_eqe_data {
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct iscsi_eqe_data iscsi_info;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct regpair roce_handle;
};
/* Event Ring Entry */
@@ -616,6 +801,52 @@ enum db_dest {
MAX_DB_DEST
};
+/* Enum of doorbell DPM types */
+enum db_dpm_type {
+ DPM_LEGACY,
+ DPM_ROCE,
+ DPM_L2_INLINE,
+ DPM_L2_BD,
+ MAX_DB_DPM_TYPE
+};
+
+/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */
+struct db_l2_dpm_data {
+ __le16 icid;
+ __le16 bd_prod;
+ __le32 params;
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+};
+
+/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
+struct db_l2_dpm_sge {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 bitfields;
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+ __le32 reserved2;
+};
+
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
@@ -627,6 +858,49 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in PWM mode */
+struct db_pwm_addr {
+ __le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK 0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_OFFSET_SHIFT 3
+#define DB_PWM_ADDR_WID_MASK 0x3
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_RESERVED1_MASK 0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+};
+
+/* Parameters to RoCE firmware, passed in EDPM doorbell */
+struct db_roce_dpm_params {
+ __le32 params;
+#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+};
+
+/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+struct db_roce_dpm_data {
+ __le16 icid;
+ __le16 prod_val;
+ struct db_roce_dpm_params params;
+};
+
/* Igu interrupt command */
enum igu_int_cmd {
IGU_INT_ENABLE = 0,
@@ -764,6 +1038,19 @@ struct pxp_ptt_entry {
struct pxp_pretend_cmd pretend;
};
+/* VF Zone A Permission Register. */
+struct pxp_vf_zone_a_permission {
+ __le32 control;
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+};
+
/* RSS hash type */
struct rdif_task_context {
__le32 initial_ref_tag;
@@ -831,6 +1118,7 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
@@ -942,7 +1230,7 @@ struct tdif_task_context {
};
struct timers_context {
- __le32 logical_client0;
+ __le32 logical_client_0;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
@@ -951,7 +1239,7 @@ struct timers_context {
#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
- __le32 logical_client1;
+ __le32 logical_client_1;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
@@ -960,7 +1248,7 @@ struct timers_context {
#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
- __le32 logical_client2;
+ __le32 logical_client_2;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
@@ -978,3 +1266,4 @@ struct timers_context {
#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
};
#endif /* __COMMON_HSI__ */
+#endif
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index b5ebc697d05f..1aa0727c4136 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -13,9 +13,12 @@
/* ETH FW CONSTANTS */
/********************/
#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 0
-#define ETH_CACHE_LINE_SIZE 64
+#define ETH_HSI_VER_MINOR 10
+
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
#define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
@@ -24,15 +27,25 @@
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
#define ETH_TX_MAX_LSO_HDR_NBD 4
#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
+#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
@@ -59,6 +72,8 @@
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+/* Control frame check constants */
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
struct eth_tx_1st_bd_flags {
u8 bitfields;
@@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags {
/* The parsing information data fo rthe first tx bd of a given packet. */
struct eth_tx_data_1st_bd {
- __le16 vlan;
- u8 nbds;
- struct eth_tx_1st_bd_flags bd_flags;
- __le16 bitfields;
+ __le16 vlan;
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 bitfields;
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
@@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd {
/* The parsing information data for the second tx bd of a given packet. */
struct eth_tx_data_2nd_bd {
- __le16 tunn_ip_size;
+ __le16 tunn_ip_size;
__le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
@@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
+/* Firmware data for L2-EDPM packet. */
+struct eth_edpm_fw_data {
+ struct eth_tx_data_1st_bd data_1st_bd;
+ struct eth_tx_data_2nd_bd data_2nd_bd;
+ __le32 reserved;
+};
+
struct eth_fast_path_cqe_fw_debug {
- u8 reserved0;
- u8 reserved1;
__le16 reserved2;
};
@@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags {
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
};
+/* PMD flow control bits */
+struct eth_pmd_flow_flags {
+ u8 flags;
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+};
+
/* Regular ETH Rx FP CQE. */
struct eth_fast_path_rx_reg_cqe {
u8 type;
@@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe {
u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
- u8 reserved[7];
+ u8 reserved[9];
struct eth_fast_path_cqe_fw_debug fw_debug;
u8 reserved1[3];
- u8 flags;
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-continue ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_cont_cqe {
- u8 type;
- u8 tpa_agg_index;
- __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
- u8 reserved[5];
- u8 reserved1;
- __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved;
+ u8 reserved1;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved3[3];
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-end ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_end_cqe {
- u8 type;
- u8 tpa_agg_index;
- __le16 total_packet_len;
- u8 num_of_bds;
- u8 end_reason;
- __le16 num_of_coalesced_segs;
- __le32 ts_delta;
- __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
- u8 reserved1[3];
- u8 reserved2;
- __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 total_packet_len;
+ u8 num_of_bds;
+ u8 end_reason;
+ __le16 num_of_coalesced_segs;
+ __le32 ts_delta;
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved1;
+ u8 reserved2;
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-start ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_start_cqe {
- u8 type;
- u8 bitfields;
+ u8 type;
+ u8 bitfields;
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
- __le16 seg_len;
+ __le16 seg_len;
struct parsing_and_err_flags pars_flags;
- __le16 vlan_tag;
- __le32 rss_hash;
- __le16 len_on_first_bd;
- u8 placement_offset;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_first_bd;
+ u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
- u8 tpa_agg_index;
- u8 header_len;
- __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+ u8 tpa_agg_index;
+ u8 header_len;
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
struct eth_fast_path_cqe_fw_debug fw_debug;
+ u8 reserved;
+ struct eth_pmd_flow_flags pmd_flags;
};
/* The L4 pseudo checksum mode for Ethernet */
@@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe {
u8 reserved[25];
__le16 echo;
u8 reserved1;
- u8 flags;
-/* for PMD mode - valid indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
-/* for PMD mode - valid toggle indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags;
};
/* union for all ETH Rx CQE types */
@@ -276,6 +298,11 @@ enum eth_rx_cqe_type {
MAX_ETH_RX_CQE_TYPE
};
+struct eth_rx_pmd_cqe {
+ union eth_rx_cqe cqe;
+ u8 reserved[ETH_RX_CQE_GAP];
+};
+
enum eth_rx_tunn_type {
ETH_RX_NO_TUNN,
ETH_RX_TUNN_GENEVE,
@@ -313,8 +340,8 @@ struct eth_tx_2nd_bd {
/* The parsing information data for the third tx bd of a given packet. */
struct eth_tx_data_3rd_bd {
- __le16 lso_mss;
- __le16 bitfields;
+ __le16 lso_mss;
+ __le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
@@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd {
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
- u8 tunn_l4_hdr_start_offset_w;
- u8 tunn_hdr_size_w;
+ u8 tunn_l4_hdr_start_offset_w;
+ u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
@@ -355,10 +382,10 @@ struct eth_tx_bd {
};
union eth_tx_bd_types {
- struct eth_tx_1st_bd first_bd;
- struct eth_tx_2nd_bd second_bd;
- struct eth_tx_3rd_bd third_bd;
- struct eth_tx_bd reg_bd;
+ struct eth_tx_1st_bd first_bd;
+ struct eth_tx_2nd_bd second_bd;
+ struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_bd reg_bd;
};
/* Mstorm Queue Zone */
@@ -389,8 +416,8 @@ struct eth_db_data {
#define ETH_DB_DATA_RESERVED_SHIFT 5
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 bd_prod;
+ u8 agg_flags;
+ __le16 bd_prod;
};
#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b3c0feb15ae9..8f64b1223c2f 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -311,7 +311,7 @@ struct iscsi_login_req_hdr {
#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 isid_TABC;
+ __le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
@@ -464,7 +464,7 @@ struct iscsi_login_response_hdr {
#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 isid_TABC;
+ __le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
@@ -688,8 +688,7 @@ union iscsi_cqe {
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
- ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
- ,
+ ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
ISCSI_CQE_TYPE_TASK_CLEANUP,
ISCSI_CQE_TYPE_DUMMY,
MAX_ISCSI_CQES_TYPE
@@ -769,9 +768,9 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_UPDATE_CONN,
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
+ ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
- RESERVED8,
RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
@@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
MAX_ISCSI_RAMROD_CMD_ID
};
@@ -883,6 +883,16 @@ union iscsi_seq_num {
__le16 r2t_sn;
};
+struct iscsi_spe_conn_mac_update {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ u8 reserved0[2];
+};
+
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
};
-struct ooo_opaque {
- __le32 cid;
- u8 drop_isle;
- u8 drop_size;
- u8 ooo_opcode;
- u8 ooo_isle;
-};
-
struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 7e441bdeabdc..72d88cf3ca25 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -16,19 +16,6 @@
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
-/* dma_addr_t manip */
-#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
-#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
-#define DMA_REGPAIR_LE(x, val) do { \
- (x).hi = DMA_HI_LE((val)); \
- (x).lo = DMA_LO_LE((val)); \
- } while (0)
-
-#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
-#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
-
enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR,
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4475a9d8ae15..33c24ebc9b7f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -23,6 +23,9 @@ struct qed_dev_eth_info {
u8 port_mac[ETH_ALEN];
u8 num_vlan_filters;
+
+ /* Legacy VF - this affects the datapath, so qede has to know */
+ bool is_legacy;
};
struct qed_update_vport_rss_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d6c4177df7cb..f9ae903bbb84 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,8 @@ enum dcbx_protocol_type {
DCBX_MAX_PROTOCOL_TYPE
};
+#define QED_ROCE_PROTOCOL_INDEX (3)
+
#ifdef CONFIG_DCB
#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
#define QED_LLDP_PORT_ID_STAT_LEN 4
@@ -260,15 +262,15 @@ struct qed_dev_info {
/* MFW version */
u32 mfw_rev;
- bool rdma_supported;
-
u32 flash_size;
u8 mf_mode;
bool tx_switching;
+ bool rdma_supported;
};
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
+ QED_SB_TYPE_CNQ,
};
enum qed_protocol {
@@ -276,6 +278,21 @@ enum qed_protocol {
QED_PROTOCOL_ISCSI,
};
+enum qed_link_mode_bits {
+ QED_LM_FIBRE_BIT = BIT(0),
+ QED_LM_Autoneg_BIT = BIT(1),
+ QED_LM_Asym_Pause_BIT = BIT(2),
+ QED_LM_Pause_BIT = BIT(3),
+ QED_LM_1000baseT_Half_BIT = BIT(4),
+ QED_LM_1000baseT_Full_BIT = BIT(5),
+ QED_LM_10000baseKR_Full_BIT = BIT(6),
+ QED_LM_25000baseKR_Full_BIT = BIT(7),
+ QED_LM_40000baseLR4_Full_BIT = BIT(8),
+ QED_LM_50000baseKR2_Full_BIT = BIT(9),
+ QED_LM_100000baseKR4_Full_BIT = BIT(10),
+ QED_LM_COUNT = 11
+};
+
struct qed_link_params {
bool link_up;
@@ -303,9 +320,11 @@ struct qed_link_params {
struct qed_link_output {
bool link_up;
- u32 supported_caps; /* In SUPPORTED defs */
- u32 advertised_caps; /* In ADVERTISED defs */
- u32 lp_caps; /* In ADVERTISED defs */
+ /* In QED_LM_* defs */
+ u32 supported_caps;
+ u32 advertised_caps;
+ u32 lp_caps;
+
u32 speed; /* In Mb/s */
u8 duplex; /* In DUPLEX defs */
u8 port; /* In PORT defs */
@@ -438,6 +457,10 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+ int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+
+ int (*dbg_all_data_size) (struct qed_dev *cdev);
+
/**
* @brief can_link_change - can the instance change the link or not
*
@@ -606,8 +629,9 @@ enum DP_MODULE {
QED_MSG_SP = 0x100000,
QED_MSG_STORAGE = 0x200000,
QED_MSG_CXT = 0x800000,
+ QED_MSG_LL2 = 0x1000000,
QED_MSG_ILT = 0x2000000,
- QED_MSG_ROCE = 0x4000000,
+ QED_MSG_RDMA = 0x4000000,
QED_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644
index 000000000000..fd75c265dba3
--- /dev/null
+++ b/include/linux/qed/qed_ll2_if.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_IF_H
+#define _QED_LL2_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_ll2_stats {
+ u64 gsi_invalid_hdr;
+ u64 gsi_invalid_pkt_length;
+ u64 gsi_unsupported_pkt_typ;
+ u64 gsi_crcchksm_error;
+
+ u64 packet_too_big_discard;
+ u64 no_buff_discard;
+
+ u64 rcv_ucast_bytes;
+ u64 rcv_mcast_bytes;
+ u64 rcv_bcast_bytes;
+ u64 rcv_ucast_pkts;
+ u64 rcv_mcast_pkts;
+ u64 rcv_bcast_pkts;
+
+ u64 sent_ucast_bytes;
+ u64 sent_mcast_bytes;
+ u64 sent_bcast_bytes;
+ u64 sent_ucast_pkts;
+ u64 sent_mcast_pkts;
+ u64 sent_bcast_pkts;
+};
+
+#define QED_LL2_UNUSED_HANDLE (0xff)
+
+struct qed_ll2_cb_ops {
+ int (*rx_cb)(void *, struct sk_buff *, u32, u32);
+ int (*tx_cb)(void *, struct sk_buff *, bool);
+};
+
+struct qed_ll2_params {
+ u16 mtu;
+ bool drop_ttl0_packets;
+ bool rx_vlan_stripping;
+ u8 tx_tc;
+ bool frags_mapped;
+ u8 ll2_mac_address[ETH_ALEN];
+};
+
+struct qed_ll2_ops {
+/**
+ * @brief start - initializes ll2
+ *
+ * @param cdev
+ * @param params - protocol driver configuration for the ll2.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
+
+/**
+ * @brief stop - stops the ll2
+ *
+ * @param cdev
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*stop)(struct qed_dev *cdev);
+
+/**
+ * @brief start_xmit - transmits an skb over the ll2 interface
+ *
+ * @param cdev
+ * @param skb
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
+
+/**
+ * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * packets. Should be called before `start'.
+ *
+ * @param cdev
+ * @param cookie - to be passed to the callback functions.
+ * @param ops - the callback functions to register for Rx / Tx.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ void (*register_cb_ops)(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie);
+
+/**
+ * @brief get LL2 related statistics
+ *
+ * @param cdev
+ * @param stats - pointer to struct that would be filled with stats
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
+};
+
+#ifdef CONFIG_QED_LL2
+int qed_ll2_alloc_if(struct qed_dev *);
+void qed_ll2_dealloc_if(struct qed_dev *);
+#else
+static const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = NULL,
+ .stop = NULL,
+ .start_xmit = NULL,
+ .register_cb_ops = NULL,
+ .get_stats = NULL,
+};
+
+static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+}
+#endif
+#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644
index 000000000000..53047d3fa678
--- /dev/null
+++ b/include/linux/qed/qed_roce_if.h
@@ -0,0 +1,604 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_IF_H
+#define _QED_ROCE_IF_H
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include <linux/qed/rdma_common.h>
+
+enum qed_roce_ll2_tx_dest {
+ /* Light L2 TX Destination to the Network */
+ QED_ROCE_LL2_TX_DEST_NW,
+
+ /* Light L2 TX Destination to the Loopback */
+ QED_ROCE_LL2_TX_DEST_LB,
+ QED_ROCE_LL2_TX_DEST_MAX
+};
+
+#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
+
+/* rdma interface */
+
+enum qed_roce_qp_state {
+ QED_ROCE_QP_STATE_RESET,
+ QED_ROCE_QP_STATE_INIT,
+ QED_ROCE_QP_STATE_RTR,
+ QED_ROCE_QP_STATE_RTS,
+ QED_ROCE_QP_STATE_SQD,
+ QED_ROCE_QP_STATE_ERR,
+ QED_ROCE_QP_STATE_SQE
+};
+
+enum qed_rdma_tid_type {
+ QED_RDMA_TID_REGISTERED_MR,
+ QED_RDMA_TID_FMR,
+ QED_RDMA_TID_MW_TYPE1,
+ QED_RDMA_TID_MW_TYPE2A
+};
+
+struct qed_rdma_events {
+ void *context;
+ void (*affiliated_event)(void *context, u8 fw_event_code,
+ void *fw_handle);
+ void (*unaffiliated_event)(void *context, u8 event_code);
+};
+
+struct qed_rdma_device {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+
+ u64 node_guid;
+ u64 sys_image_guid;
+
+ u8 max_cnq;
+ u8 max_sge;
+ u8 max_srq_sge;
+ u16 max_inline;
+ u32 max_wqe;
+ u32 max_srq_wqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_srq;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u16 max_srq_wr;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ /* Abilty to support RNR-NAK generation */
+
+#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
+#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
+ /* Abilty to support shutdown port */
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
+ /* Abilty to support port active event */
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
+ /* Abilty to support port change event */
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
+ /* Abilty to support system image GUID */
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
+ /* Abilty to support bad P_Key counter support */
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
+ /* Abilty to support atomic operations */
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
+ /* Abilty to support modifying the maximum number of
+ * outstanding work requests per QP
+ */
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
+ /* Abilty to support automatic path migration */
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
+ /* Abilty to support the base memory management extensions */
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
+ /* Abilty to support multipile page sizes per memory region */
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
+ /* Abilty to support block list physical buffer list */
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
+ /* Abilty to support zero based virtual addresses */
+#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
+#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
+ /* Abilty to support local invalidate fencing */
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
+ /* Abilty to support Loopback on QP */
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+enum qed_port_state {
+ QED_RDMA_PORT_UP,
+ QED_RDMA_PORT_DOWN,
+};
+
+enum qed_roce_capability {
+ QED_ROCE_V1 = 1 << 0,
+ QED_ROCE_V2 = 1 << 1,
+};
+
+struct qed_rdma_port {
+ enum qed_port_state port_state;
+ int link_speed;
+ u64 max_msg_size;
+ u8 source_gid_table_len;
+ void *source_gid_table_ptr;
+ u8 pkey_table_len;
+ void *pkey_table_ptr;
+ u32 pkey_bad_counter;
+ enum qed_roce_capability capability;
+};
+
+struct qed_rdma_cnq_params {
+ u8 num_pbl_pages;
+ u64 pbl_ptr;
+};
+
+/* The CQ Mode affects the CQ doorbell transaction size.
+ * 64/32 bit machines should configure to 32/16 bits respectively.
+ */
+enum qed_rdma_cq_mode {
+ QED_RDMA_CQ_MODE_16_BITS,
+ QED_RDMA_CQ_MODE_32_BITS,
+};
+
+struct qed_roce_dcqcn_params {
+ u8 notification_point;
+ u8 reaction_point;
+
+ /* fields for notification point */
+ u32 cnp_send_timeout;
+
+ /* fields for reaction point */
+ u32 rl_bc_rate;
+ u16 rl_max_rate;
+ u16 rl_r_ai;
+ u16 rl_r_hai;
+ u16 dcqcn_g;
+ u32 dcqcn_k_us;
+ u32 dcqcn_timeout_us;
+};
+
+struct qed_rdma_start_in_params {
+ struct qed_rdma_events *events;
+ struct qed_rdma_cnq_params cnq_pbl_list[128];
+ u8 desired_cnq;
+ enum qed_rdma_cq_mode cq_mode;
+ struct qed_roce_dcqcn_params dcqcn_params;
+ u16 max_mtu;
+ u8 mac_addr[ETH_ALEN];
+ u8 iwarp_flags;
+};
+
+struct qed_rdma_add_user_out_params {
+ u16 dpi;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+};
+
+enum roce_mode {
+ ROCE_V1,
+ ROCE_V2_IPV4,
+ ROCE_V2_IPV6,
+ MAX_ROCE_MODE
+};
+
+union qed_gid {
+ u8 bytes[16];
+ u16 words[8];
+ u32 dwords[4];
+ u64 qwords[2];
+ u32 ipv4_addr;
+};
+
+struct qed_rdma_register_tid_in_params {
+ u32 itid;
+ enum qed_rdma_tid_type tid_type;
+ u8 key;
+ u16 pd;
+ bool local_read;
+ bool local_write;
+ bool remote_read;
+ bool remote_write;
+ bool remote_atomic;
+ bool mw_bind;
+ u64 pbl_ptr;
+ bool pbl_two_level;
+ u8 pbl_page_size_log;
+ u8 page_size_log;
+ u32 fbo;
+ u64 length;
+ u64 vaddr;
+ bool zbva;
+ bool phy_mr;
+ bool dma_mr;
+
+ bool dif_enabled;
+ u64 dif_error_addr;
+ u64 dif_runt_addr;
+};
+
+struct qed_rdma_create_cq_in_params {
+ u32 cq_handle_lo;
+ u32 cq_handle_hi;
+ u32 cq_size;
+ u16 dpi;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+ u8 cnq_id;
+ u16 int_timeout;
+};
+
+struct qed_rdma_create_srq_in_params {
+ u64 pbl_base_addr;
+ u64 prod_pair_addr;
+ u16 num_pages;
+ u16 pd_id;
+ u16 page_size;
+};
+
+struct qed_rdma_destroy_cq_in_params {
+ u16 icid;
+};
+
+struct qed_rdma_destroy_cq_out_params {
+ u16 num_cq_notif;
+};
+
+struct qed_rdma_create_qp_in_params {
+ u32 qp_handle_lo;
+ u32 qp_handle_hi;
+ u32 qp_handle_async_lo;
+ u32 qp_handle_async_hi;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+ u16 pd;
+ u16 dpi;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ u64 sq_pbl_ptr;
+ u8 max_sq_sges;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ u64 rq_pbl_ptr;
+ u16 srq_id;
+ u8 stats_queue;
+};
+
+struct qed_rdma_create_qp_out_params {
+ u32 qp_id;
+ u16 icid;
+ void *rq_pbl_virt;
+ dma_addr_t rq_pbl_phys;
+ void *sq_pbl_virt;
+ dma_addr_t sq_pbl_phys;
+};
+
+struct qed_rdma_modify_qp_in_params {
+ u32 modify_flags;
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
+
+ enum qed_roce_qp_state new_state;
+ u16 pkey;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ u32 dest_qp;
+ bool lb_indication;
+ u16 mtu;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u32 flow_label;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u16 udp_src_port;
+
+ u16 vlan_id;
+
+ u32 rq_psn;
+ u32 sq_psn;
+ u8 max_rd_atomic_resp;
+ u8 max_rd_atomic_req;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+ bool use_local_mac;
+ enum roce_mode roce_mode;
+};
+
+struct qed_rdma_query_qp_out_params {
+ enum qed_roce_qp_state state;
+ u32 rq_psn;
+ u32 sq_psn;
+ bool draining;
+ u16 mtu;
+ u32 dest_qp;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u32 flow_label;
+ u8 hop_limit_ttl;
+ u8 traffic_class_tos;
+ u32 timeout;
+ u8 rnr_retry;
+ u8 retry_cnt;
+ u8 min_rnr_nak_timer;
+ u16 pkey_index;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ bool sqd_async;
+};
+
+struct qed_rdma_create_srq_out_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_destroy_srq_in_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_modify_srq_in_params {
+ u32 wqe_limit;
+ u16 srq_id;
+};
+
+struct qed_rdma_stats_out_params {
+ u64 sent_bytes;
+ u64 sent_pkts;
+ u64 rcv_bytes;
+ u64 rcv_pkts;
+};
+
+struct qed_rdma_counters_out_params {
+ u64 pd_count;
+ u64 max_pd;
+ u64 dpi_count;
+ u64 max_dpi;
+ u64 cq_count;
+ u64 max_cq;
+ u64 qp_count;
+ u64 max_qp;
+ u64 tid_count;
+ u64 max_tid;
+};
+
+#define QED_ROCE_TX_HEAD_FAILURE (1)
+#define QED_ROCE_TX_FRAG_FAILURE (2)
+
+struct qed_roce_ll2_header {
+ void *vaddr;
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_buffer {
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_packet {
+ struct qed_roce_ll2_header header;
+ int n_seg;
+ struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
+ int roce_mode;
+ enum qed_roce_ll2_tx_dest tx_dest;
+};
+
+struct qed_roce_ll2_tx_params {
+ int reserved;
+};
+
+struct qed_roce_ll2_rx_params {
+ u16 vlan_id;
+ u8 smac[ETH_ALEN];
+ int rc;
+};
+
+struct qed_roce_ll2_cbs {
+ void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
+
+ void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params);
+};
+
+struct qed_roce_ll2_params {
+ u16 max_rx_buffers;
+ u16 max_tx_buffers;
+ u16 mtu;
+ u8 mac_address[ETH_ALEN];
+ struct qed_roce_ll2_cbs cbs;
+ void *cb_cookie;
+};
+
+struct qed_roce_ll2_info {
+ u8 handle;
+ struct qed_roce_ll2_cbs cbs;
+ u8 mac_address[ETH_ALEN];
+ void *cb_cookie;
+
+ /* Lock to protect ll2 */
+ struct mutex lock;
+};
+
+enum qed_rdma_type {
+ QED_RDMA_TYPE_ROCE,
+};
+
+struct qed_dev_rdma_info {
+ struct qed_dev_info common;
+ enum qed_rdma_type rdma_type;
+};
+
+struct qed_rdma_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info);
+ void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
+
+ int (*rdma_init)(struct qed_dev *dev,
+ struct qed_rdma_start_in_params *iparams);
+
+ int (*rdma_add_user)(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *oparams);
+
+ void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
+ int (*rdma_stop)(void *rdma_cxt);
+ struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
+ struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
+ int (*rdma_get_start_sb)(struct qed_dev *cdev);
+ int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
+ void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
+ int (*rdma_get_rdma_int)(struct qed_dev *cdev,
+ struct qed_int_info *info);
+ int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
+ int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
+ void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
+ int (*rdma_create_cq)(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid);
+ int (*rdma_destroy_cq)(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *iparams,
+ struct qed_rdma_destroy_cq_out_params *oparams);
+ struct qed_rdma_qp *
+ (*rdma_create_qp)(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *iparams,
+ struct qed_rdma_create_qp_out_params *oparams);
+
+ int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *iparams);
+
+ int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *oparams);
+ int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
+ int
+ (*rdma_register_tid)(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *iparams);
+ int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
+ int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
+ void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+ int (*roce_ll2_start)(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params);
+ int (*roce_ll2_stop)(struct qed_dev *cdev);
+ int (*roce_ll2_tx)(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *packet,
+ struct qed_roce_ll2_tx_params *params);
+ int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw);
+ int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address);
+ int (*roce_ll2_stats)(struct qed_dev *cdev,
+ struct qed_ll2_stats *stats);
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void);
+
+#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644
index 000000000000..99fbe6d55acb
--- /dev/null
+++ b/include/linux/qed/qede_roce.h
@@ -0,0 +1,88 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef QEDE_ROCE_H
+#define QEDE_ROCE_H
+
+struct qedr_dev;
+struct qed_dev;
+struct qede_dev;
+
+enum qede_roce_event {
+ QEDE_UP,
+ QEDE_DOWN,
+ QEDE_CHANGE_ADDR,
+ QEDE_CLOSE
+};
+
+struct qede_roce_event_work {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+ enum qede_roce_event event;
+};
+
+struct qedr_driver {
+ unsigned char name[32];
+
+ struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
+ struct net_device *);
+
+ void (*remove)(struct qedr_dev *);
+ void (*notify)(struct qedr_dev *, enum qede_roce_event);
+};
+
+/* APIs for RoCE driver to register callback handlers,
+ * which will be invoked when device is added, removed, ifup, ifdown
+ */
+int qede_roce_register_driver(struct qedr_driver *drv);
+void qede_roce_unregister_driver(struct qedr_driver *drv);
+
+bool qede_roce_supported(struct qede_dev *dev);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+int qede_roce_dev_add(struct qede_dev *dev);
+void qede_roce_dev_event_open(struct qede_dev *dev);
+void qede_roce_dev_event_close(struct qede_dev *dev);
+void qede_roce_dev_remove(struct qede_dev *dev);
+void qede_roce_event_changeaddr(struct qede_dev *qedr);
+#else
+static inline int qede_roce_dev_add(struct qede_dev *dev)
+{
+ return 0;
+}
+
+static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
+static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
+static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
+static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
+#endif
+#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 187991c1f439..7663725faa94 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -28,6 +28,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index accba0e6b704..dc3889d1bbe6 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -11,6 +11,14 @@
#define TCP_INVALID_TIMEOUT_VAL -1
+struct ooo_opaque {
+ __le32 cid;
+ u8 drop_isle;
+ u8 drop_size;
+ u8 ooo_opcode;
+ u8 ooo_isle;
+};
+
enum tcp_connect_mode {
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
@@ -18,14 +26,10 @@ enum tcp_connect_mode {
};
struct tcp_init_params {
- __le32 max_cwnd;
- __le16 dup_ack_threshold;
+ __le32 two_msl_timer;
__le16 tx_sws_timer;
- __le16 min_rto;
- __le16 min_rto_rt;
- __le16 max_rto;
u8 maxfinrt;
- u8 reserved[1];
+ u8 reserved[9];
};
enum tcp_ip_version {
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 3eef0802a0cd..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
@@ -53,6 +53,11 @@ struct rhash_head {
struct rhash_head __rcu *next;
};
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
+ bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
};
/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
* @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
};
/**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * struct rhashtable_iter - Hash table iterator
* @ht: Table to iterate through
* @p: Current pointer
+ * @list: Current hash list pointer
* @walker: Associated rhashtable walker
* @slot: Current slot
* @skip: Number of entries to skip in slot
@@ -173,7 +189,8 @@ struct rhashtable_walker {
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
- struct rhashtable_walker *walker;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
};
@@ -339,15 +356,14 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl);
-int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj);
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
- gfp_t gfp);
+void rhashtable_walk_enter(struct rhashtable *ht,
+ struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
@@ -506,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+/**
+ * rhl_for_each_rcu - iterate over rcu hash table list
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_rcu(pos, list) \
+ for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+
+/**
+ * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ * @member: name of the &struct rlist_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
+ for (pos = list; pos && rht_entry(tpos, pos, member); \
+ pos = rcu_dereference_raw(pos->next))
+
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -515,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
-static inline void *rhashtable_lookup_fast(
+/* Internal function, do not use. */
+static inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -538,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
struct rhash_head *he;
unsigned int hash;
- rcu_read_lock();
-
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
@@ -548,8 +577,7 @@ restart:
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
+ return he;
}
/* Ensure we see any new tables. */
@@ -558,89 +586,165 @@ restart:
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(tbl))
goto restart;
- rcu_read_unlock();
return NULL;
}
-/* Internal function, please use rhashtable_insert_fast() instead */
-static inline int __rhashtable_insert_fast(
- struct rhashtable *ht, const void *key, struct rhash_head *obj,
+/**
+ * rhashtable_lookup - search hash table
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+
+ return he ? rht_obj(ht, he) : NULL;
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, without RCU read lock
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Only use this function when you have other mechanisms guaranteeing
+ * that the object won't go away after the RCU read lock is released.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ void *obj;
+
+ rcu_read_lock();
+ obj = rhashtable_lookup(ht, key, params);
+ rcu_read_unlock();
+
+ return obj;
+}
+
+/**
+ * rhltable_lookup - search hash list table
+ * @hlt: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. All matching entries are returned
+ * in a list.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the list of entries that match the given key.
+ */
+static inline struct rhlist_head *rhltable_lookup(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+
+ return he ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead. This
+ * function returns the existing element already in hashes in there is a clash,
+ * otherwise it returns an error via ERR_PTR().
+ */
+static inline void *__rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params, bool rhlist)
+{
struct rhashtable_compare_arg arg = {
.ht = ht,
.key = key,
};
- struct bucket_table *tbl, *new_tbl;
+ struct rhash_head __rcu **pprev;
+ struct bucket_table *tbl;
struct rhash_head *head;
spinlock_t *lock;
- unsigned int elasticity;
unsigned int hash;
- int err;
+ int elasticity;
+ void *data;
-restart:
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
- /* All insertions must grab the oldest table containing
- * the hashed bucket that is yet to be rehashed.
- */
- for (;;) {
- hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
-
- if (tbl->rehash <= hash)
- break;
-
- spin_unlock_bh(lock);
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- }
-
- new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
- if (!IS_ERR_OR_NULL(tbl))
- goto slow_path;
-
- err = PTR_ERR(tbl);
- goto out;
- }
-
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
-
- if (unlikely(rht_grow_above_100(ht, tbl))) {
+ if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
slow_path:
spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
- if (err)
- return err;
-
- goto restart;
+ return rhashtable_insert_slow(ht, key, obj);
}
- err = -EEXIST;
elasticity = ht->elasticity;
+ pprev = &tbl->buckets[hash];
rht_for_each(head, tbl, hash) {
- if (key &&
- unlikely(!(params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head)))))
+ struct rhlist_head *plist;
+ struct rhlist_head *list;
+
+ elasticity--;
+ if (!key ||
+ (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head))))
+ continue;
+
+ data = rht_obj(ht, head);
+
+ if (!rhlist)
goto out;
- if (!--elasticity)
- goto slow_path;
+
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
+
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ rcu_assign_pointer(*pprev, obj);
+
+ goto good;
}
- err = 0;
+ if (elasticity <= 0)
+ goto slow_path;
+
+ data = ERR_PTR(-E2BIG);
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ goto slow_path;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
+ if (rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
rcu_assign_pointer(tbl->buckets[hash], obj);
@@ -648,11 +752,14 @@ slow_path:
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
+good:
+ data = NULL;
+
out:
spin_unlock_bh(lock);
rcu_read_unlock();
- return err;
+ return data;
}
/**
@@ -675,7 +782,65 @@ static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
- return __rhashtable_insert_fast(ht, NULL, obj, params);
+ void *ret;
+
+ ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhltable_insert_key - insert object into hash list table
+ * @hlt: hash list table
+ * @key: the pointer to the key
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert_key(
+ struct rhltable *hlt, const void *key, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+ params, true));
+}
+
+/**
+ * rhltable_insert - insert object into hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(&hlt->ht, &list->rhead);
+
+ key += params.key_offset;
+
+ return rhltable_insert_key(hlt, key, list, params);
}
/**
@@ -704,11 +869,16 @@ static inline int rhashtable_lookup_insert_fast(
const struct rhashtable_params params)
{
const char *key = rht_obj(ht, obj);
+ void *ret;
BUG_ON(ht->p.obj_hashfn);
- return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
- params);
+ ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
}
/**
@@ -737,15 +907,42 @@ static inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
+ void *ret;
+
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ ret = __rhashtable_insert_fast(ht, key, obj, params, false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ * @data: pointer to element data already in hashes
+ *
+ * Just like rhashtable_lookup_insert_key(), but this function returns the
+ * object if it exists, NULL if it does not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
BUG_ON(!ht->p.obj_hashfn || !key);
- return __rhashtable_insert_fast(ht, key, obj, params);
+ return __rhashtable_insert_fast(ht, key, obj, params, false);
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
- struct rhash_head *obj, const struct rhashtable_params params)
+ struct rhash_head *obj, const struct rhashtable_params params,
+ bool rhlist)
{
struct rhash_head __rcu **pprev;
struct rhash_head *he;
@@ -760,39 +957,66 @@ static inline int __rhashtable_remove_fast(
pprev = &tbl->buckets[hash];
rht_for_each(he, tbl, hash) {
+ struct rhlist_head *list;
+
+ list = container_of(he, struct rhlist_head, rhead);
+
if (he != obj) {
+ struct rhlist_head __rcu **lpprev;
+
pprev = &he->next;
- continue;
+
+ if (!rhlist)
+ continue;
+
+ do {
+ lpprev = &list->next;
+ list = rht_dereference_bucket(list->next,
+ tbl, hash);
+ } while (list && obj != &list->rhead);
+
+ if (!list)
+ continue;
+
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ RCU_INIT_POINTER(*lpprev, list);
+ err = 0;
+ break;
}
- rcu_assign_pointer(*pprev, obj->next);
- err = 0;
+ obj = rht_dereference_bucket(obj->next, tbl, hash);
+ err = 1;
+
+ if (rhlist) {
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ if (list) {
+ RCU_INIT_POINTER(list->rhead.next, obj);
+ obj = &list->rhead;
+ err = 0;
+ }
+ }
+
+ rcu_assign_pointer(*pprev, obj);
break;
}
spin_unlock_bh(lock);
+ if (err > 0) {
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+ err = 0;
+ }
+
return err;
}
-/**
- * rhashtable_remove_fast - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
-static inline int rhashtable_remove_fast(
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
+ const struct rhashtable_params params, bool rhlist)
{
struct bucket_table *tbl;
int err;
@@ -806,24 +1030,60 @@ static inline int rhashtable_remove_fast(
* visible then that guarantees the entry to still be in
* the old tbl if it exists.
*/
- while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
+ rhlist)) &&
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
;
- if (err)
- goto out;
-
- atomic_dec(&ht->nelems);
- if (unlikely(ht->p.automatic_shrinking &&
- rht_shrink_below_30(ht, tbl)))
- schedule_work(&ht->run_work);
-
-out:
rcu_read_unlock();
return err;
}
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(ht, obj, params, false);
+}
+
+/**
+ * rhltable_remove - remove object from hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhltable_remove(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
+}
+
/* Internal function, please use rhashtable_replace_fast() instead */
static inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
@@ -906,4 +1166,59 @@ static inline int rhashtable_replace_fast(
return err;
}
+/* Obsolete function, do not use in new code. */
+static inline int rhashtable_walk_init(struct rhashtable *ht,
+ struct rhashtable_iter *iter, gfp_t gfp)
+{
+ rhashtable_walk_enter(ht, iter);
+ return 0;
+}
+
+/**
+ * rhltable_walk_enter - Initialise an iterator
+ * @hlt: Table to walk over
+ * @iter: Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice. Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit after this function returns.
+ */
+static inline void rhltable_walk_enter(struct rhltable *hlt,
+ struct rhashtable_iter *iter)
+{
+ return rhashtable_walk_enter(&hlt->ht, iter);
+}
+
+/**
+ * rhltable_free_and_destroy - free elements and destroy hash list table
+ * @hlt: the hash list table to destroy
+ * @free_fn: callback to release resources of element
+ * @arg: pointer passed to free_fn
+ *
+ * See documentation for rhashtable_free_and_destroy.
+ */
+static inline void rhltable_free_and_destroy(struct rhltable *hlt,
+ void (*free_fn)(void *ptr,
+ void *arg),
+ void *arg)
+{
+ return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+}
+
+static inline void rhltable_destroy(struct rhltable *hlt)
+{
+ return rhltable_free_and_destroy(hlt, NULL, NULL);
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2daece8979f7..57e54847b0b9 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -105,7 +105,7 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx);
+ int *idx);
extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0f665cb26b50..9bf60b556bd2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -612,7 +612,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
- * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -677,13 +676,23 @@ struct sk_buff {
*/
kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
+
+/* if you move cloned around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CLONED_MASK (1 << 7)
+#else
+#define CLONED_MASK 1
+#endif
+#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+
+ __u8 __cloned_offset[0];
__u8 cloned:1,
nohdr:1,
fclone:2,
peeked:1,
head_frag:1,
- xmit_more:1;
- /* one bit hole */
+ xmit_more:1,
+ __unused:1; /* one bit hole */
kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
@@ -730,7 +739,10 @@ struct sk_buff {
__u8 ipvs_property:1;
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
- /* 3 or 5 bit hole */
+#ifdef CONFIG_NET_SWITCHDEV
+ __u8 offload_fwd_mark:1;
+#endif
+ /* 2, 4 or 5 bit hole */
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -757,14 +769,9 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
- union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
-#endif
-#ifdef CONFIG_NET_SWITCHDEV
- __u32 offload_fwd_mark;
+ __u32 secmark;
#endif
- };
union {
__u32 mark;
@@ -2295,7 +2302,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
if (unlikely(skb_is_nonlinear(skb))) {
WARN_ON(1);
@@ -2305,6 +2312,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
skb_set_tail_pointer(skb, len);
}
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ __skb_set_length(skb, len);
+}
+
void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
@@ -2335,6 +2347,20 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
BUG_ON(err);
}
+static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int diff = len - skb->len;
+
+ if (skb_tailroom(skb) < diff) {
+ int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
+ GFP_ATOMIC);
+ if (ret)
+ return ret;
+ }
+ __skb_set_length(skb, len);
+ return 0;
+}
+
/**
* skb_orphan - orphan a buffer
* @skb: buffer to orphan
@@ -2386,6 +2412,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
+void skb_rbtree_purge(struct rb_root *root);
+
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2938,6 +2966,21 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
return __pskb_trim(skb, len);
}
+static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ __skb_trim(skb, len);
+ return 0;
+}
+
+static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ return __skb_grow(skb, len);
+}
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -3042,6 +3085,7 @@ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -3726,6 +3770,13 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+static inline void skb_gso_reset(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_segs = 0;
+ skb_shinfo(skb)->gso_type = 0;
+}
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index a4f7203a9017..ecc3e07c6e63 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
+#include <linux/uidgid.h>
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
@@ -159,6 +160,9 @@ struct ctl_table_root {
struct ctl_table_set default_set;
struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
struct nsproxy *namespaces);
+ void (*set_ownership)(struct ctl_table_header *head,
+ struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid);
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
};
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7be9b1242354..a17ae7b85218 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
#include <linux/skbuff.h>
+#include <linux/win_minmax.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -212,7 +213,8 @@ struct tcp_sock {
u8 reord; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
- u8 unused;
+ u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:7;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -234,9 +236,7 @@ struct tcp_sock {
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
- struct rtt_meas {
- u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
- } rtt_min[3];
+ struct minmax rtt_min;
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
@@ -268,6 +268,12 @@ struct tcp_sock {
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ struct skb_mstamp first_tx_mstamp; /* start of window send phase */
+ struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
@@ -281,10 +287,9 @@ struct tcp_sock {
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
- /* OOO segments go in this list. Note that socket lock must be held,
- * as we do not use sk_buff_head lock.
- */
- struct sk_buff_head out_of_order_queue;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
new file mode 100644
index 000000000000..56569604278f
--- /dev/null
+++ b/include/linux/win_minmax.h
@@ -0,0 +1,37 @@
+/**
+ * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
+ *
+ */
+#ifndef MINMAX_H
+#define MINMAX_H
+
+#include <linux/types.h>
+
+/* A single data point for our parameterized min-max tracker */
+struct minmax_sample {
+ u32 t; /* time measurement was taken */
+ u32 v; /* value measured */
+};
+
+/* State for the parameterized min-max tracker */
+struct minmax {
+ struct minmax_sample s[3];
+};
+
+static inline u32 minmax_get(const struct minmax *m)
+{
+ return m->s[0].v;
+}
+
+static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ m->s[2] = m->s[1] = m->s[0] = val;
+ return m->s[0].v;
+}
+
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
+
+#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9826d3a9464c..f2d072787947 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,9 @@
#ifndef _ADDRCONF_H
#define _ADDRCONF_H
-#define MAX_RTR_SOLICITATIONS 3
+#define MAX_RTR_SOLICITATIONS -1 /* unlimited */
#define RTR_SOLICITATION_INTERVAL (4*HZ)
+#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 7b0f88699b25..1061a472a3e3 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,42 +12,39 @@
#ifndef _NET_RXRPC_H
#define _NET_RXRPC_H
-#include <linux/skbuff.h>
#include <linux/rxrpc.h>
+struct key;
+struct sock;
+struct socket;
struct rxrpc_call;
-/*
- * the mark applied to socket buffers that may be intercepted
- */
-enum rxrpc_skb_mark {
- RXRPC_SKB_MARK_DATA, /* data message */
- RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
- RXRPC_SKB_MARK_BUSY, /* server busy message */
- RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
- RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
- RXRPC_SKB_MARK_NET_ERROR, /* network error message */
- RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
- RXRPC_SKB_MARK_NEW_CALL, /* local error message */
-};
+typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
+typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
-typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
- struct sk_buff *);
-void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t);
+void rxrpc_kernel_new_call_notification(struct socket *,
+ rxrpc_notify_new_call_t,
+ rxrpc_discard_new_call_t);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
struct sockaddr_rxrpc *,
struct key *,
unsigned long,
- gfp_t);
-int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
-void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
-void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
-void rxrpc_kernel_end_call(struct rxrpc_call *);
-bool rxrpc_kernel_is_data_last(struct sk_buff *);
-u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
-int rxrpc_kernel_get_error_number(struct sk_buff *);
-void rxrpc_kernel_free_skb(struct sk_buff *);
-struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
-int rxrpc_kernel_reject_call(struct socket *);
+ gfp_t,
+ rxrpc_notify_rx_t);
+int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
+ struct msghdr *, size_t);
+int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
+ void *, size_t, size_t *, bool, u32 *);
+void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+ u32, int, const char *);
+void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
+void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
+ struct sockaddr_rxrpc *);
+int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
+ rxrpc_user_attach_call_t, unsigned long, gfp_t);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bfd1590821d6..0a1e21d7bce1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,7 +29,8 @@
#include <net/sock.h>
#include <linux/seq_file.h>
-#define BT_SUBSYS_VERSION "2.21"
+#define BT_SUBSYS_VERSION 2
+#define BT_SUBSYS_REVISION 22
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
@@ -371,6 +372,7 @@ void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
int hci_sock_test_flag(struct sock *sk, int nr);
unsigned short hci_sock_get_channel(struct sock *sk);
+u32 hci_sock_get_cookie(struct sock *sk);
int hci_sock_init(void);
void hci_sock_cleanup(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 003b25283407..99aa5e5e3100 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -63,6 +63,7 @@
#define HCI_SDIO 6
#define HCI_SPI 7
#define HCI_I2C 8
+#define HCI_SMD 9
/* HCI controller types */
#define HCI_PRIMARY 0x00
@@ -207,7 +208,11 @@ enum {
HCI_MGMT_INDEX_EVENTS,
HCI_MGMT_UNCONF_INDEX_EVENTS,
HCI_MGMT_EXT_INDEX_EVENTS,
- HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_EXT_INFO_EVENTS,
+ HCI_MGMT_OPTION_EVENTS,
+ HCI_MGMT_SETTING_EVENTS,
+ HCI_MGMT_DEV_CLASS_EVENTS,
+ HCI_MGMT_LOCAL_NAME_EVENTS,
HCI_MGMT_OOB_DATA_EVENTS,
};
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ee7fc47680a1..f00bf667ec33 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -211,6 +211,7 @@ struct hci_dev {
__u8 dev_name[HCI_MAX_NAME_LENGTH];
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
+ __u16 appearance;
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
@@ -399,7 +400,9 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
+#endif
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
@@ -1026,8 +1029,8 @@ int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
-void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_do_close(struct hci_dev *hdev);
@@ -1404,6 +1407,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
@@ -1449,6 +1455,7 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
+void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 587d0131b349..240786b04a46 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -45,6 +45,10 @@ struct hci_mon_hdr {
#define HCI_MON_VENDOR_DIAG 11
#define HCI_MON_SYSTEM_NOTE 12
#define HCI_MON_USER_LOGGING 13
+#define HCI_MON_CTRL_OPEN 14
+#define HCI_MON_CTRL_CLOSE 15
+#define HCI_MON_CTRL_COMMAND 16
+#define HCI_MON_CTRL_EVENT 17
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 7647964b1efa..72a456bbbcd5 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -586,6 +586,24 @@ struct mgmt_rp_get_adv_size_info {
#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
+#define MGMT_OP_READ_EXT_INFO 0x0042
+#define MGMT_READ_EXT_INFO_SIZE 0
+struct mgmt_rp_read_ext_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_SET_APPEARANCE 0x0043
+struct mgmt_cp_set_appearance {
+ __u16 appearance;
+} __packed;
+#define MGMT_SET_APPEARANCE_SIZE 2
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -800,3 +818,9 @@ struct mgmt_ev_advertising_added {
struct mgmt_ev_advertising_removed {
__u8 instance;
} __packed;
+
+#define MGMT_EV_EXT_INFO_CHANGED 0x0025
+struct mgmt_ev_ext_info_changed {
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index beb7610d64e9..fe78f02a242e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5,7 +5,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2015 Intel Deutschland GmbH
+ * Copyright 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -593,6 +593,8 @@ struct survey_info {
s8 noise;
};
+#define CFG80211_MAX_WEP_KEYS 4
+
/**
* struct cfg80211_crypto_settings - Crypto settings
* @wpa_versions: indicates which, if any, WPA versions are enabled
@@ -610,6 +612,9 @@ struct survey_info {
* allowed through even on unauthorized ports
* @control_port_no_encrypt: TRUE to prevent encryption of control port
* protocol frames.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
@@ -621,6 +626,8 @@ struct cfg80211_crypto_settings {
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
+ struct key_params *wep_keys;
+ int wep_tx_key;
};
/**
@@ -676,6 +683,18 @@ struct cfg80211_acl_data {
struct mac_address mac_addrs[];
};
+/*
+ * cfg80211_bitrate_mask - masks for bitrate control
+ */
+struct cfg80211_bitrate_mask {
+ struct {
+ u32 legacy;
+ u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ enum nl80211_txrate_gi gi;
+ } control[NUM_NL80211_BANDS];
+};
+
/**
* struct cfg80211_ap_settings - AP configuration
*
@@ -700,6 +719,7 @@ struct cfg80211_acl_data {
* MAC address based access control
* @pbss: If set, start as a PCP instead of AP. Relevant for DMG
* networks.
+ * @beacon_rate: bitrate to be used for beacons
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -719,6 +739,7 @@ struct cfg80211_ap_settings {
bool p2p_opp_ps;
const struct cfg80211_acl_data *acl;
bool pbss;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
@@ -1351,6 +1372,7 @@ struct mesh_config {
* @beacon_interval: beacon interval to use
* @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
+ * @beacon_rate: bitrate to be used for beacons
*
* These parameters are fixed when the mesh is created.
*/
@@ -1371,6 +1393,7 @@ struct mesh_setup {
u16 beacon_interval;
int mcast_rate[NUM_NL80211_BANDS];
u32 basic_rates;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
@@ -2010,17 +2033,6 @@ enum wiphy_params_flags {
WIPHY_PARAM_DYN_ACK = 1 << 5,
};
-/*
- * cfg80211_bitrate_mask - masks for bitrate control
- */
-struct cfg80211_bitrate_mask {
- struct {
- u32 legacy;
- u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
- u16 vht_mcs[NL80211_VHT_NSS_MAX];
- enum nl80211_txrate_gi gi;
- } control[NUM_NL80211_BANDS];
-};
/**
* struct cfg80211_pmksa - PMK Security Association
*
@@ -2302,6 +2314,98 @@ struct cfg80211_qos_map {
};
/**
+ * struct cfg80211_nan_conf - NAN configuration
+ *
+ * This struct defines NAN configuration parameters
+ *
+ * @master_pref: master preference (1 - 255)
+ * @dual: dual band operation mode, see &enum nl80211_nan_dual_band_conf
+ */
+struct cfg80211_nan_conf {
+ u8 master_pref;
+ u8 dual;
+};
+
+/**
+ * enum cfg80211_nan_conf_changes - indicates changed fields in NAN
+ * configuration
+ *
+ * @CFG80211_NAN_CONF_CHANGED_PREF: master preference
+ * @CFG80211_NAN_CONF_CHANGED_DUAL: dual band operation
+ */
+enum cfg80211_nan_conf_changes {
+ CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
+ CFG80211_NAN_CONF_CHANGED_DUAL = BIT(1),
+};
+
+/**
+ * struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter
+ *
+ * @filter: the content of the filter
+ * @len: the length of the filter
+ */
+struct cfg80211_nan_func_filter {
+ const u8 *filter;
+ u8 len;
+};
+
+/**
+ * struct cfg80211_nan_func - a NAN function
+ *
+ * @type: &enum nl80211_nan_function_type
+ * @service_id: the service ID of the function
+ * @publish_type: &nl80211_nan_publish_type
+ * @close_range: if true, the range should be limited. Threshold is
+ * implementation specific.
+ * @publish_bcast: if true, the solicited publish should be broadcasted
+ * @subscribe_active: if true, the subscribe is active
+ * @followup_id: the instance ID for follow up
+ * @followup_reqid: the requestor instance ID for follow up
+ * @followup_dest: MAC address of the recipient of the follow up
+ * @ttl: time to live counter in DW.
+ * @serv_spec_info: Service Specific Info
+ * @serv_spec_info_len: Service Specific Info length
+ * @srf_include: if true, SRF is inclusive
+ * @srf_bf: Bloom Filter
+ * @srf_bf_len: Bloom Filter length
+ * @srf_bf_idx: Bloom Filter index
+ * @srf_macs: SRF MAC addresses
+ * @srf_num_macs: number of MAC addresses in SRF
+ * @rx_filters: rx filters that are matched with corresponding peer's tx_filter
+ * @tx_filters: filters that should be transmitted in the SDF.
+ * @num_rx_filters: length of &rx_filters.
+ * @num_tx_filters: length of &tx_filters.
+ * @instance_id: driver allocated id of the function.
+ * @cookie: unique NAN function identifier.
+ */
+struct cfg80211_nan_func {
+ enum nl80211_nan_function_type type;
+ u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN];
+ u8 publish_type;
+ bool close_range;
+ bool publish_bcast;
+ bool subscribe_active;
+ u8 followup_id;
+ u8 followup_reqid;
+ struct mac_address followup_dest;
+ u32 ttl;
+ const u8 *serv_spec_info;
+ u8 serv_spec_info_len;
+ bool srf_include;
+ const u8 *srf_bf;
+ u8 srf_bf_len;
+ u8 srf_bf_idx;
+ struct mac_address *srf_macs;
+ int srf_num_macs;
+ struct cfg80211_nan_func_filter *rx_filters;
+ struct cfg80211_nan_func_filter *tx_filters;
+ u8 num_tx_filters;
+ u8 num_rx_filters;
+ u8 instance_id;
+ u64 cookie;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -2432,7 +2536,8 @@ struct cfg80211_qos_map {
* cases, the result of roaming is indicated with a call to
* cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
- * @disconnect: Disconnect from the BSS/ESS.
+ * @disconnect: Disconnect from the BSS/ESS. Once done, call
+ * cfg80211_disconnected().
* (invoked with the wireless_dev mutex held)
*
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
@@ -2588,6 +2693,19 @@ struct cfg80211_qos_map {
* and returning to the base channel for communication with the AP.
* @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
* peers must be on the base channel when the call completes.
+ * @start_nan: Start the NAN interface.
+ * @stop_nan: Stop the NAN interface.
+ * @add_nan_func: Add a NAN function. Returns negative value on failure.
+ * On success @nan_func ownership is transferred to the driver and
+ * it may access it outside of the scope of this function. The driver
+ * should free the @nan_func when no longer needed by calling
+ * cfg80211_free_nan_func().
+ * On success the driver should assign an instance_id in the
+ * provided @nan_func.
+ * @del_nan_func: Delete a NAN function.
+ * @nan_change_conf: changes NAN configuration. The changed parameters must
+ * be specified in @changes (using &enum cfg80211_nan_conf_changes);
+ * All other parameters must be ignored.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2853,6 +2971,17 @@ struct cfg80211_ops {
void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
+ int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf);
+ void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev);
+ int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ u64 cookie);
+ int (*nan_change_conf)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf,
+ u32 changes);
};
/*
@@ -2899,6 +3028,8 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
+ * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
+ * before connection.
*/
enum wiphy_flags {
/* use hole at 0 */
@@ -2924,6 +3055,7 @@ enum wiphy_flags {
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
+ WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
};
/**
@@ -3301,6 +3433,8 @@ struct wiphy_iftype_ext_capab {
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
+ *
+ * @cookie_counter: unique generic cookie counter, used to identify objects.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -3430,6 +3564,8 @@ struct wiphy {
u32 bss_select_support;
+ u64 cookie_counter;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -3610,6 +3746,7 @@ struct cfg80211_cached_keys;
* beacons, 0 when not valid
* @address: The address for this device, valid only if @netdev is %NULL
* @p2p_started: true if this is a P2P Device that has been started
+ * @nan_started: true if this is a NAN interface that has been started
* @cac_started: true if DFS channel availability check has been started
* @cac_start_time: timestamp (jiffies) when the dfs state was entered.
* @cac_time_ms: CAC time in ms
@@ -3641,7 +3778,7 @@ struct wireless_dev {
struct mutex mtx;
- bool use_4addr, p2p_started;
+ bool use_4addr, p2p_started, nan_started;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@ -3955,6 +4092,34 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
struct cfg80211_qos_map *qos_map);
/**
+ * cfg80211_find_ie_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE where the byte array should match.
+ * If match_len is zero, this must also be set to zero.
+ * Otherwise this must be set to 2 or more, because the first
+ * byte is the element id, which is already compared to eid, and
+ * the second byte is the IE length.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match, or a pointer to the first
+ * byte of the requested element, that is the byte containing the
+ * element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
+ const u8 *match, int match_len,
+ int match_offset);
+
+/**
* cfg80211_find_ie - find information element in data
*
* @eid: element ID
@@ -3969,7 +4134,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
-const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
+static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0);
+}
/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
@@ -5518,6 +5686,67 @@ wiphy_ext_feature_isset(struct wiphy *wiphy,
return (ft_byte & BIT(ftidx % 8)) != 0;
}
+/**
+ * cfg80211_free_nan_func - free NAN function
+ * @f: NAN function that should be freed
+ *
+ * Frees all the NAN function and all it's allocated members.
+ */
+void cfg80211_free_nan_func(struct cfg80211_nan_func *f);
+
+/**
+ * struct cfg80211_nan_match_params - NAN match parameters
+ * @type: the type of the function that triggered a match. If it is
+ * %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber.
+ * If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery
+ * result.
+ * If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up.
+ * @inst_id: the local instance id
+ * @peer_inst_id: the instance id of the peer's function
+ * @addr: the MAC address of the peer
+ * @info_len: the length of the &info
+ * @info: the Service Specific Info from the peer (if any)
+ * @cookie: unique identifier of the corresponding function
+ */
+struct cfg80211_nan_match_params {
+ enum nl80211_nan_function_type type;
+ u8 inst_id;
+ u8 peer_inst_id;
+ const u8 *addr;
+ u8 info_len;
+ const u8 *info;
+ u64 cookie;
+};
+
+/**
+ * cfg80211_nan_match - report a match for a NAN function.
+ * @wdev: the wireless device reporting the match
+ * @match: match notification parameters
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function had a match. This
+ * can be a subscribe that had a match or a solicited publish that
+ * was sent. It can also be a follow up that was received.
+ */
+void cfg80211_nan_match(struct wireless_dev *wdev,
+ struct cfg80211_nan_match_params *match, gfp_t gfp);
+
+/**
+ * cfg80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * @wdev: the wireless device reporting the match
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @cookie: unique NAN function identifier
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function is terminated.
+ */
+void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ u64 cookie, gfp_t gfp);
+
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c99ffe8cef3c..211bd3c37028 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -50,7 +50,6 @@ struct devlink_sb_pool_info {
};
struct devlink_ops {
- size_t priv_size;
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2217a3f817f8..b122196d5a1f 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_TRAILER,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_BRCM,
+ DSA_TAG_PROTO_QCA,
DSA_TAG_LAST, /* MUST BE LAST */
};
@@ -142,6 +143,7 @@ struct dsa_port {
struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
+ u8 stp_state;
};
struct dsa_switch {
@@ -165,9 +167,9 @@ struct dsa_switch {
struct dsa_chip_data *cd;
/*
- * The used switch driver.
+ * The switch operations.
*/
- struct dsa_switch_driver *drv;
+ struct dsa_switch_ops *ops;
/*
* An array of which element [a] indicates which port on this
@@ -234,19 +236,21 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
struct switchdev_trans;
struct switchdev_obj;
struct switchdev_obj_port_fdb;
+struct switchdev_obj_port_mdb;
struct switchdev_obj_port_vlan;
-struct dsa_switch_driver {
+struct dsa_switch_ops {
struct list_head list;
- enum dsa_tag_protocol tag_protocol;
-
/*
* Probing and setup.
*/
const char *(*probe)(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv);
+
+ enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds);
+
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -336,6 +340,7 @@ struct dsa_switch_driver {
void (*port_bridge_leave)(struct dsa_switch *ds, int port);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
+ void (*port_fast_age)(struct dsa_switch *ds, int port);
/*
* VLAN support
@@ -368,17 +373,27 @@ struct dsa_switch_driver {
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj));
+
+ /*
+ * Multicast database
+ */
+ int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+ void (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+ int (*port_mdb_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+ int (*port_mdb_dump)(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_mdb *mdb,
+ int (*cb)(struct switchdev_obj *obj));
};
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
+void register_switch_driver(struct dsa_switch_ops *type);
+void unregister_switch_driver(struct dsa_switch_ops *type);
struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
-static inline void *ds_to_priv(struct dsa_switch *ds)
-{
- return ds->priv;
-}
-
static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
{
return dst->rcv != NULL;
@@ -386,4 +401,18 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds);
+int dsa_switch_resume(struct dsa_switch *ds);
+#else
+static inline int dsa_switch_suspend(struct dsa_switch *ds)
+{
+ return 0;
+}
+static inline int dsa_switch_resume(struct dsa_switch *ds)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
#endif
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 5db9f5910428..6965c8f68ade 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -112,12 +112,13 @@ static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb
return &dst->u.tun_info;
}
-static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
- __be16 flags,
- __be64 tunnel_id,
- int md_size)
+static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
+ __be32 daddr,
+ __u8 tos, __u8 ttl,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
{
- const struct iphdr *iph = ip_hdr(skb);
struct metadata_dst *tun_dst;
tun_dst = tun_rx_dst(md_size);
@@ -125,17 +126,30 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
return NULL;
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ saddr, daddr, tos, ttl,
0, 0, 0, tunnel_id, flags);
return tun_dst;
}
-static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ flags, tunnel_id, md_size);
+}
+
+static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 tos, __u8 ttl,
+ __be32 label,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
@@ -150,14 +164,26 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
info->key.tp_src = 0;
info->key.tp_dst = 0;
- info->key.u.ipv6.src = ip6h->saddr;
- info->key.u.ipv6.dst = ip6h->daddr;
+ info->key.u.ipv6.src = *saddr;
+ info->key.u.ipv6.dst = *daddr;
- info->key.tos = ipv6_get_dsfield(ip6h);
- info->key.ttl = ip6h->hop_limit;
- info->key.label = ip6_flowlabel(ip6h);
+ info->key.tos = tos;
+ info->key.ttl = ttl;
+ info->key.label = label;
return tun_dst;
}
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
+ ipv6_get_dsfield(ip6h), ip6h->hop_limit,
+ ip6_flowlabel(ip6h), flags, tunnel_id,
+ md_size);
+}
#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index d47ef4bb5423..035aa7716967 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,8 +34,7 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
-#define FLOWI_FLAG_L3MDEV_SRC 0x04
-#define FLOWI_FLAG_SKIP_NH_OIF 0x08
+#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
struct flowi_tunnel flowic_tun_key;
};
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d3d60dccd19f..d9534927d93b 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -32,8 +32,13 @@ struct flow_dissector_key_basic {
};
struct flow_dissector_key_tags {
- u32 vlan_id:12,
- flow_label:20;
+ u32 flow_label;
+};
+
+struct flow_dissector_key_vlan {
+ u16 vlan_id:12,
+ vlan_priority:3;
+ u16 padding;
};
struct flow_dissector_key_keyid {
@@ -119,7 +124,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
- FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
@@ -148,6 +153,7 @@ struct flow_keys {
#define FLOW_KEYS_HASH_START_FIELD basic
struct flow_dissector_key_basic basic;
struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
struct flow_dissector_key_addrs addrs;
@@ -177,7 +183,7 @@ struct flow_keys_digest {
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);
-static inline bool flow_keys_have_l4(struct flow_keys *keys)
+static inline bool flow_keys_have_l4(const struct flow_keys *keys)
{
return (keys->ports.ports || keys->tags.flow_label);
}
diff --git a/include/net/fq.h b/include/net/fq.h
index 268b49049c37..6d8521a30c5c 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -72,9 +72,12 @@ struct fq {
u32 flows_cnt;
u32 perturbation;
u32 limit;
+ u32 memory_limit;
+ u32 memory_usage;
u32 quantum;
u32 backlog;
u32 overlimit;
+ u32 overmemory;
u32 collisions;
};
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 163f3ed0f05a..4e6131cd3f43 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -29,6 +29,7 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
tin->backlog_packets--;
flow->backlog -= skb->len;
fq->backlog--;
+ fq->memory_usage -= skb->truesize;
if (flow->backlog == 0) {
list_del_init(&flow->backlogchain);
@@ -154,6 +155,7 @@ static void fq_tin_enqueue(struct fq *fq,
flow->backlog += skb->len;
tin->backlog_bytes += skb->len;
tin->backlog_packets++;
+ fq->memory_usage += skb->truesize;
fq->backlog++;
fq_recalc_backlog(fq, tin, flow);
@@ -166,7 +168,7 @@ static void fq_tin_enqueue(struct fq *fq,
__skb_queue_tail(&flow->queue, skb);
- if (fq->backlog > fq->limit) {
+ if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
flow = list_first_entry_or_null(&fq->backlogs,
struct fq_flow,
backlogchain);
@@ -181,6 +183,8 @@ static void fq_tin_enqueue(struct fq *fq,
flow->tin->overlimit++;
fq->overlimit++;
+ if (fq->memory_usage > fq->memory_limit)
+ fq->overmemory++;
}
}
@@ -251,6 +255,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
fq->perturbation = prandom_u32();
fq->quantum = 300;
fq->limit = 8192;
+ fq->memory_limit = 16 << 20; /* 16 MBytes */
fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
if (!fq->flows)
diff --git a/include/net/gre.h b/include/net/gre.h
index 73ea256eb7d7..d25d836c129b 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -7,7 +7,15 @@
struct gre_base_hdr {
__be16 flags;
__be16 protocol;
-};
+} __packed;
+
+struct gre_full_hdr {
+ struct gre_base_hdr fixed_header;
+ __be16 csum;
+ __be16 reserved1;
+ __be32 key;
+ __be32 seq;
+} __packed;
#define GRE_HEADER_SECTION 4
#define GREPROTO_CISCO 0
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index b0fd9476c538..ba07b9d8ed63 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -190,6 +190,10 @@ struct ieee80211_radiotap_header {
* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16
*
* Contains VHT information about this frame.
+ *
+ * IEEE80211_RADIOTAP_TIMESTAMP u64, u16, u8, u8 variable
+ *
+ * Contains timestamp information for this frame.
*/
enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TSFT = 0,
@@ -214,6 +218,7 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_MCS = 19,
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
IEEE80211_RADIOTAP_VHT = 21,
+ IEEE80211_RADIOTAP_TIMESTAMP = 22,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -321,6 +326,22 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
+/* For IEEE80211_RADIOTAP_TIMESTAMP */
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK 0x000F
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US 0x0001
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0010
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0030
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0
+
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT 0x01
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY 0x02
+
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 1c8b6820b694..515352c6280a 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -201,6 +201,7 @@ struct inet6_dev {
struct ipv6_devstat stats;
struct timer_list rs_timer;
+ __s32 rs_interval; /* in jiffies */
__u8 rs_probes;
__u8 addr_gen_mode;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 49dcad4fe99e..197a30d221e9 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -134,8 +134,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[64 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
+ u64 icsk_ca_priv[88 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index 9742b92dc933..bc43c0fcae12 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -219,6 +219,29 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
}
#endif
+#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff64[i] += snmp_get_cpu_field64( \
+ mib_statistic, \
+ c, stats_list[i].entry, \
+ offset); \
+ } \
+}
+
+#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff[i] += snmp_get_cpu_field( \
+ mib_statistic, \
+ c, stats_list[i].entry); \
+ } \
+}
+
void inet_get_local_port_range(struct net *net, int *low, int *high);
#ifdef CONFIG_SYSCTL
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index d97305d0e71f..e0cd318d5103 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -64,6 +64,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
}
void ip6_route_input(struct sk_buff *skb);
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags);
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 43a5a0e4524c..20ed9699fcd4 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -23,6 +23,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
struct in6_addr laddr; /* local tunnel end-point address */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7d4a72e75f33..b9314b48e39f 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,7 @@
#include <net/fib_rules.h>
#include <net/inetpeer.h>
#include <linux/percpu.h>
+#include <linux/notifier.h>
struct fib_config {
u8 fc_dst_len;
@@ -122,6 +123,7 @@ struct fib_info {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
#endif
+ unsigned int fib_offload_cnt;
struct rcu_head rcu;
struct fib_nh fib_nh[0];
#define fib_dev fib_nh[0].nh_dev
@@ -173,6 +175,18 @@ struct fib_result_nl {
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+static inline void fib_info_offload_inc(struct fib_info *fi)
+{
+ fi->fib_offload_cnt++;
+ fi->fib_flags |= RTNH_F_OFFLOAD;
+}
+
+static inline void fib_info_offload_dec(struct fib_info *fi)
+{
+ if (--fi->fib_offload_cnt == 0)
+ fi->fib_flags &= ~RTNH_F_OFFLOAD;
+}
+
#define FIB_RES_SADDR(net, res) \
((FIB_RES_NH(res).nh_saddr_genid == \
atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
@@ -185,6 +199,33 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
FIB_RES_SADDR(net, res))
+struct fib_notifier_info {
+ struct net *net;
+};
+
+struct fib_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 tb_id;
+ u32 nlflags;
+};
+
+enum fib_event_type {
+ FIB_EVENT_ENTRY_ADD,
+ FIB_EVENT_ENTRY_DEL,
+ FIB_EVENT_RULE_ADD,
+ FIB_EVENT_RULE_DEL,
+};
+
+int register_fib_notifier(struct notifier_block *nb);
+int unregister_fib_notifier(struct notifier_block *nb);
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
@@ -196,13 +237,12 @@ struct fib_table {
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
-int fib_table_insert(struct fib_table *, struct fib_config *);
-int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
+int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
-int fib_table_flush(struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
-void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -315,7 +355,6 @@ static inline int fib_num_tclassid_users(struct net *net)
}
#endif
int fib_unmerge(struct net *net);
-void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a5e7035fb93f..59557c07904b 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -222,6 +222,25 @@ static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
}
+static inline __be64 key32_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)key;
+#else
+ return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static inline __be32 tunnel_id_to_key32(__be64 tun_id)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)tun_id;
+#else
+ return (__force __be32)((__force u64)tun_id >> 32);
+#endif
+}
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -236,6 +255,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const u8 proto);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 2840b5825dcc..2a8965819db0 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <net/sock.h>
+#include <net/strparser.h>
#include <uapi/linux/kcm.h>
extern unsigned int kcm_net_id;
@@ -21,16 +22,8 @@ extern unsigned int kcm_net_id;
#define KCM_STATS_INCR(stat) ((stat)++)
struct kcm_psock_stats {
- unsigned long long rx_msgs;
- unsigned long long rx_bytes;
unsigned long long tx_msgs;
unsigned long long tx_bytes;
- unsigned int rx_aborts;
- unsigned int rx_mem_fail;
- unsigned int rx_need_more_hdr;
- unsigned int rx_msg_too_big;
- unsigned int rx_msg_timeouts;
- unsigned int rx_bad_hdr_len;
unsigned long long reserved;
unsigned long long unreserved;
unsigned int tx_aborts;
@@ -64,13 +57,6 @@ struct kcm_tx_msg {
struct sk_buff *last_skb;
};
-struct kcm_rx_msg {
- int full_len;
- int accum_len;
- int offset;
- int early_eaten;
-};
-
/* Socket structure for KCM client sockets */
struct kcm_sock {
struct sock sk;
@@ -87,6 +73,7 @@ struct kcm_sock {
struct work_struct tx_work;
struct list_head wait_psock_list;
struct sk_buff *seq_skb;
+ u32 tx_stopped : 1;
/* Don't use bit fields here, these are set under different locks */
bool tx_wait;
@@ -104,11 +91,11 @@ struct bpf_prog;
/* Structure for an attached lower socket */
struct kcm_psock {
struct sock *sk;
+ struct strparser strp;
struct kcm_mux *mux;
int index;
u32 tx_stopped : 1;
- u32 rx_stopped : 1;
u32 done : 1;
u32 unattaching : 1;
@@ -121,18 +108,12 @@ struct kcm_psock {
struct kcm_psock_stats stats;
/* Receive */
- struct sk_buff *rx_skb_head;
- struct sk_buff **rx_skb_nextp;
- struct sk_buff *ready_rx_msg;
struct list_head psock_ready_list;
- struct work_struct rx_work;
- struct delayed_work rx_delayed_work;
struct bpf_prog *bpf_prog;
struct kcm_sock *rx_kcm;
unsigned long long saved_rx_bytes;
unsigned long long saved_rx_msgs;
- struct timer_list rx_msg_timer;
- unsigned int rx_need_bytes;
+ struct sk_buff *ready_rx_msg;
/* Transmit */
struct kcm_sock *tx_kcm;
@@ -146,6 +127,7 @@ struct kcm_net {
struct mutex mutex;
struct kcm_psock_stats aggregate_psock_stats;
struct kcm_mux_stats aggregate_mux_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
struct list_head mux_list;
int count;
};
@@ -163,6 +145,7 @@ struct kcm_mux {
struct kcm_mux_stats stats;
struct kcm_psock_stats aggregate_psock_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
/* Receive */
spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -190,14 +173,6 @@ static inline void aggregate_psock_stats(struct kcm_psock_stats *stats,
/* Save psock statistics in the mux when psock is being unattached. */
#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
- SAVE_PSOCK_STATS(rx_msgs);
- SAVE_PSOCK_STATS(rx_bytes);
- SAVE_PSOCK_STATS(rx_aborts);
- SAVE_PSOCK_STATS(rx_mem_fail);
- SAVE_PSOCK_STATS(rx_need_more_hdr);
- SAVE_PSOCK_STATS(rx_msg_too_big);
- SAVE_PSOCK_STATS(rx_msg_timeouts);
- SAVE_PSOCK_STATS(rx_bad_hdr_len);
SAVE_PSOCK_STATS(tx_msgs);
SAVE_PSOCK_STATS(tx_bytes);
SAVE_PSOCK_STATS(reserved);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index e90095091aa0..b220dabeab45 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -11,6 +11,7 @@
#ifndef _NET_L3MDEV_H_
#define _NET_L3MDEV_H_
+#include <net/dst.h>
#include <net/fib_rules.h>
/**
@@ -18,30 +19,24 @@
*
* @l3mdev_fib_table: Get FIB table id to use for lookups
*
- * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
+ * @l3mdev_l3_rcv: Hook in L3 receive path
*
- * @l3mdev_get_saddr: Get source address for a flow
+ * @l3mdev_l3_out: Hook in L3 output path
*
- * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device
+ * @l3mdev_link_scope_lookup: IPv6 lookup for linklocal and mcast destinations
*/
struct l3mdev_ops {
u32 (*l3mdev_fib_table)(const struct net_device *dev);
struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
struct sk_buff *skb, u16 proto);
-
- /* IPv4 ops */
- struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
- const struct flowi4 *fl4);
- int (*l3mdev_get_saddr)(struct net_device *dev,
- struct flowi4 *fl4);
+ struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev,
+ struct sock *sk, struct sk_buff *skb,
+ u16 proto);
/* IPv6 ops */
- struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
+ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev,
struct flowi6 *fl6);
- int (*l3mdev_get_saddr6)(struct net_device *dev,
- const struct sock *sk,
- struct flowi6 *fl6);
};
#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -49,6 +44,8 @@ struct l3mdev_ops {
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
+void l3mdev_update_flow(struct net *net, struct flowi *fl);
+
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
static inline int l3mdev_master_ifindex(struct net_device *dev)
{
@@ -80,7 +77,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
{
/* netdev_master_upper_dev_get_rcu calls
* list_first_or_null_rcu to walk the upper dev list.
@@ -89,7 +86,7 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
* typecast to remove the const
*/
struct net_device *dev = (struct net_device *)_dev;
- const struct net_device *master;
+ struct net_device *master;
if (!dev)
return NULL;
@@ -104,26 +101,6 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
return master;
}
-/* get index of an interface to use for FIB lookups. For devices
- * enslaved to an L3 master device FIB lookups are based on the
- * master index
- */
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
-}
-
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- int oif;
-
- rcu_read_lock();
- oif = l3mdev_fib_oif_rcu(dev);
- rcu_read_unlock();
-
- return oif;
-}
-
u32 l3mdev_fib_table_rcu(const struct net_device *dev);
u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -137,39 +114,7 @@ static inline u32 l3mdev_fib_table(const struct net_device *dev)
return tb_id;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
-{
- if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
- return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
-
- return NULL;
-}
-
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
- struct net_device *dev;
- bool rc = false;
-
- if (ifindex == 0)
- return false;
-
- rcu_read_lock();
-
- dev = dev_get_by_index_rcu(net, ifindex);
- if (dev)
- rc = netif_is_l3_master(dev);
-
- rcu_read_unlock();
-
- return rc;
-}
-
-int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
-
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6);
-int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6);
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
static inline
struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
@@ -199,6 +144,34 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
return l3mdev_l3_rcv(skb, AF_INET6);
}
+static inline
+struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+
+ if (netif_is_l3_slave(dev)) {
+ struct net_device *master;
+
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master && master->l3mdev_ops->l3mdev_l3_out)
+ skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
+ skb, proto);
+ }
+
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET);
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET6);
+}
#else
static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev)
@@ -216,20 +189,11 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return NULL;
}
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-
static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
{
return 0;
@@ -243,43 +207,32 @@ static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
return 0;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
+static inline
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
{
return NULL;
}
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
- return false;
-}
-
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
- struct flowi4 *fl4)
-{
- return 0;
-}
-
static inline
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- return NULL;
+ return skb;
}
-static inline int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6)
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
- return 0;
+ return skb;
}
static inline
-struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
static inline
-struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
@@ -290,6 +243,10 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
{
return 1;
}
+static inline
+void l3mdev_update_flow(struct net *net, struct flowi *fl)
+{
+}
#endif
#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index e9f116e29c22..ea3f80f58fd6 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -13,6 +13,13 @@
/* lw tunnel state flags */
#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
+#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+
+enum {
+ LWTUNNEL_XMIT_DONE,
+ LWTUNNEL_XMIT_CONTINUE,
+};
+
struct lwtunnel_state {
__u16 type;
@@ -21,6 +28,7 @@ struct lwtunnel_state {
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
int len;
+ __u16 headroom;
__u8 data[0];
};
@@ -34,6 +42,7 @@ struct lwtunnel_encap_ops {
struct lwtunnel_state *lwtstate);
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+ int (*xmit)(struct sk_buff *skb);
};
#ifdef CONFIG_LWTUNNEL
@@ -75,6 +84,24 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_XMIT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ if (lwtunnel_xmit_redirect(lwtstate) && lwtstate->headroom < mtu)
+ return lwtstate->headroom;
+
+ return 0;
+}
+
int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
@@ -90,6 +117,7 @@ struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int lwtunnel_input(struct sk_buff *skb);
+int lwtunnel_xmit(struct sk_buff *skb);
#else
@@ -117,6 +145,17 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ return 0;
+}
+
static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num)
{
@@ -170,6 +209,11 @@ static inline int lwtunnel_input(struct sk_buff *skb)
return -EOPNOTSUPP;
}
+static inline int lwtunnel_xmit(struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_LWTUNNEL */
#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type))
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cca510a585c3..a810dfcb83c2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
* frame (PS-Poll or uAPSD).
* @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
+ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
*
* These flags are used in tx_info->control.flags.
*/
@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
IEEE80211_TX_CTRL_AMSDU = BIT(3),
+ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
};
/*
@@ -1735,6 +1737,9 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
+ * that this station is allowed to transmit to us.
+ * Can be modified by driver.
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
@@ -1775,6 +1780,7 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u8 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
@@ -2014,6 +2020,11 @@ struct ieee80211_txq {
* @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
* skbs, needed for zero-copy software A-MSDU.
*
+ * @IEEE80211_HW_REPORTS_LOW_ACK: The driver (or firmware) reports low ack event
+ * by ieee80211_report_low_ack() based on its own algorithm. For such
+ * drivers, mac80211 packet loss mechanism will not be triggered and driver
+ * is completely depending on firmware event for station kickout.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2054,6 +2065,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_USES_RSS,
IEEE80211_HW_TX_AMSDU,
IEEE80211_HW_TX_FRAG_LIST,
+ IEEE80211_HW_REPORTS_LOW_ACK,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2141,6 +2153,14 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
*
+ * @radiotap_timestamp: Information for the radiotap timestamp field; if the
+ * 'units_pos' member is set to a non-negative value it must be set to
+ * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
+ * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value, and then the timestamp
+ * field will be added and populated from the &struct ieee80211_rx_status
+ * device_timestamp. If the 'accuracy' member is non-negative, it's put
+ * into the accuracy radiotap field and the accuracy known flag is set.
+ *
* @netdev_features: netdev features to be set in each netdev created
* from this HW. Note that not all features are usable with mac80211,
* other features will be rejected during HW registration.
@@ -2159,6 +2179,8 @@ enum ieee80211_hw_flags {
* @n_cipher_schemes: a size of an array of cipher schemes definitions.
* @cipher_schemes: a pointer to an array of cipher scheme definitions
* supported by HW.
+ * @max_nan_de_entries: maximum number of NAN DE functions supported by the
+ * device.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2184,11 +2206,16 @@ struct ieee80211_hw {
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
u16 radiotap_vht_details;
+ struct {
+ int units_pos;
+ s16 accuracy;
+ } radiotap_timestamp;
netdev_features_t netdev_features;
u8 uapsd_queues;
u8 uapsd_max_sp_len;
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
+ u8 max_nan_de_entries;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -3085,11 +3112,8 @@ enum ieee80211_reconfig_type {
*
* @sta_add_debugfs: Drivers can use this callback to add debugfs files
* when a station is added to mac80211's station list. This callback
- * and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS
- * conditional. This callback can sleep.
- *
- * @sta_remove_debugfs: Remove the debugfs files which were added using
- * @sta_add_debugfs. This callback can sleep.
+ * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
+ * callback can sleep.
*
* @sta_notify: Notifies low level driver about power state transition of an
* associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
@@ -3147,6 +3171,12 @@ enum ieee80211_reconfig_type {
* required function.
* The callback can sleep.
*
+ * @offset_tsf: Offset the TSF timer by the specified value in the
+ * firmware/hardware. Preferred to set_tsf as it avoids delay between
+ * calling set_tsf() and hardware getting programmed, which will show up
+ * as TSF delay. Is not a required function.
+ * The callback can sleep.
+ *
* @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
* with other STAs in the IBSS. This is only used in IBSS mode. This
* function is optional if the firmware/hardware takes full care of
@@ -3401,6 +3431,21 @@ enum ieee80211_reconfig_type {
* synchronization which is needed in case driver has in its RSS queues
* pending frames that were received prior to the control path action
* currently taken (e.g. disassociation) but are not processed yet.
+ *
+ * @start_nan: join an existing NAN cluster, or create a new one.
+ * @stop_nan: leave the NAN cluster.
+ * @nan_change_conf: change NAN configuration. The data in cfg80211_nan_conf
+ * contains full new configuration and changes specify which parameters
+ * are changed with respect to the last NAN config.
+ * The driver gets both full configuration and the changed parameters since
+ * some devices may need the full configuration while others need only the
+ * changed parameters.
+ * @add_nan_func: Add a NAN function. Returns 0 on success. The data in
+ * cfg80211_nan_func must not be referenced outside the scope of
+ * this call.
+ * @del_nan_func: Remove a NAN function. The driver must call
+ * ieee80211_nan_func_terminated() with
+ * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3485,10 +3530,6 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
- void (*sta_remove_debugfs)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir);
#endif
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
@@ -3516,6 +3557,8 @@ struct ieee80211_ops {
u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 tsf);
+ void (*offset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 offset);
void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
int (*ampdu_action)(struct ieee80211_hw *hw,
@@ -3640,6 +3683,21 @@ struct ieee80211_ops {
void (*wake_tx_queue)(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
void (*sync_rx_queues)(struct ieee80211_hw *hw);
+
+ int (*start_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf);
+ int (*stop_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*nan_change_conf)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf, u32 changes);
+ int (*add_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 instance_id);
};
/**
@@ -5713,4 +5771,36 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
unsigned long *frame_cnt,
unsigned long *byte_cnt);
+
+/**
+ * ieee80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * This function is used to notify mac80211 about NAN function termination.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_terminated(struct ieee80211_vif *vif,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ gfp_t gfp);
+
+/**
+ * ieee80211_nan_func_match - notify about NAN function match event.
+ *
+ * This function is used to notify mac80211 about NAN function match. The
+ * cookie inside the match struct will be assigned by mac80211.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @match: match event information
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_match(struct ieee80211_vif *vif,
+ struct cfg80211_nan_match_params *match,
+ gfp_t gfp);
+
#endif /* MAC80211_H */
diff --git a/include/net/mpls.h b/include/net/mpls.h
index 5b3b5addfb08..1dbc669b770e 100644
--- a/include/net/mpls.h
+++ b/include/net/mpls.h
@@ -19,21 +19,18 @@
#define MPLS_HLEN 4
+struct mpls_shim_hdr {
+ __be32 label_stack_entry;
+};
+
static inline bool eth_p_mpls(__be16 eth_type)
{
return eth_type == htons(ETH_P_MPLS_UC) ||
eth_type == htons(ETH_P_MPLS_MC);
}
-/*
- * For non-MPLS skbs this will correspond to the network header.
- * For MPLS skbs it will be before the network_header as the MPLS
- * label stack lies between the end of the mac header and the network
- * header. That is, for MPLS skbs the end of the mac header
- * is the top of the MPLS label stack.
- */
-static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
{
- return skb_mac_header(skb) + skb->mac_len;
+ return (struct mpls_shim_hdr *)skb_network_header(skb);
}
#endif
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index 1dbf42f79750..68680baac0fd 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -31,6 +31,7 @@ struct ncsi_dev {
struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
void (*notifier)(struct ncsi_dev *nd));
int ncsi_start_dev(struct ncsi_dev *nd);
+void ncsi_stop_dev(struct ncsi_dev *nd);
void ncsi_unregister_dev(struct ncsi_dev *nd);
#else /* !CONFIG_NET_NCSI */
static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
@@ -44,6 +45,10 @@ static inline int ncsi_start_dev(struct ncsi_dev *nd)
return -ENOTTY;
}
+static void ncsi_stop_dev(struct ncsi_dev *nd)
+{
+}
+
static inline void ncsi_unregister_dev(struct ncsi_dev *nd)
{
}
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index e8d1448425a7..0b0c35c37125 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -15,6 +15,12 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
void nf_bridge_update_protocol(struct sk_buff *skb);
+int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *,
+ struct sk_buff *));
+
static inline struct nf_bridge_info *
nf_bridge_info_get(const struct sk_buff *skb)
{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 445b019c2078..50418052a520 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -42,7 +42,6 @@ union nf_conntrack_expect_proto {
#include <linux/types.h>
#include <linux/skbuff.h>
-#include <linux/timer.h>
#ifdef CONFIG_NETFILTER_DEBUG
#define NF_CT_ASSERT(x) WARN_ON(!(x))
@@ -73,7 +72,7 @@ struct nf_conn_help {
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
struct nf_conn {
- /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
+ /* Usage count in here is 1 for hash table, 1 per skb,
* plus 1 for any connection(s) we are `master' for
*
* Hint, SKB address this struct and refcnt via skb->nfct and
@@ -96,8 +95,8 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- /* Timer function; drops refcnt when it goes off. */
- struct timer_list timeout;
+ /* jiffies32 when this ct is considered dead */
+ u32 timeout;
possible_net_t ct_net;
@@ -220,21 +219,14 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
}
-bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb, int do_acct);
-
/* kill conntrack and do accounting */
-static inline bool nf_ct_kill_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb)
-{
- return __nf_ct_kill_acct(ct, ctinfo, skb, 1);
-}
+bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb);
/* kill conntrack without accounting */
static inline bool nf_ct_kill(struct nf_conn *ct)
{
- return __nf_ct_kill_acct(ct, 0, NULL, 0);
+ return nf_ct_delete(ct, 0, 0);
}
/* These are for NAT. Icky. */
@@ -291,21 +283,55 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
}
+#define nfct_time_stamp ((u32)(jiffies))
+
/* jiffies until ct expires, 0 if already expired */
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
{
- long timeout = (long)ct->timeout.expires - (long)jiffies;
+ s32 timeout = ct->timeout - nfct_time_stamp;
return timeout > 0 ? timeout : 0;
}
+static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+{
+ return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+}
+
+/* use after obtaining a reference count */
+static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+{
+ return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+ !nf_ct_is_dying(ct);
+}
+
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
int nf_conntrack_hash_resize(unsigned int hashsize);
+
+extern struct hlist_nulls_head *nf_conntrack_hash;
extern unsigned int nf_conntrack_htable_size;
+extern seqcount_t nf_conntrack_generation;
extern unsigned int nf_conntrack_max;
+/* must be called with rcu read lock held */
+static inline void
+nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
+{
+ struct hlist_nulls_head *hptr;
+ unsigned int sequence, hsz;
+
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ hsz = nf_conntrack_htable_size;
+ hptr = nf_conntrack_hash;
+ } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+ *hash = hptr;
+ *hsize = hsz;
+}
+
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 79d7ac5c9740..62e17d1319ff 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -51,8 +51,6 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
-void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize);
-
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net,
@@ -83,7 +81,6 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
#define CONNTRACK_LOCKS 1024
-extern struct hlist_nulls_head *nf_conntrack_hash;
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index fa36447371c6..12d967b58726 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -12,12 +12,19 @@
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/netfilter/nf_conntrack_extend.h>
+enum nf_ct_ecache_state {
+ NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
+ NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
+ NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
+};
+
struct nf_conntrack_ecache {
- unsigned long cache; /* bitops want long */
- unsigned long missed; /* missed events */
- u16 ctmask; /* bitmask of ct events to be delivered */
- u16 expmask; /* bitmask of expect events to be delivered */
- u32 portid; /* netlink portid of destroyer */
+ unsigned long cache; /* bitops want long */
+ unsigned long missed; /* missed events */
+ u16 ctmask; /* bitmask of ct events to be delivered */
+ u16 expmask; /* bitmask of expect events to be delivered */
+ u32 portid; /* netlink portid of destroyer */
+ enum nf_ct_ecache_state state; /* ecache state */
};
static inline struct nf_conntrack_ecache *
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index cdc920b4c4c2..8992e4229da9 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -63,10 +63,6 @@ struct nf_conntrack_l3proto {
size_t nla_size;
-#ifdef CONFIG_SYSCTL
- const char *ctl_table_path;
-#endif /* CONFIG_SYSCTL */
-
/* Init l3proto pernet data */
int (*init_net)(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1a5fb36f165f..de629f1520df 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -134,14 +134,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
-static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
-{
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- kfree(pn->ctl_compat_table);
- pn->ctl_compat_table = NULL;
-#endif
-}
-
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple);
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 83d855ba6af1..309cd267be4f 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -2,15 +2,10 @@
#define _NF_LOG_H
#include <linux/netfilter.h>
+#include <linux/netfilter/nf_log.h>
-/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will
- * disappear once iptables is replaced with pkttables. Please DO NOT use them
- * for any new code! */
-#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
-#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
-#define NF_LOG_IPOPT 0x04 /* Log IP options */
-#define NF_LOG_UID 0x08 /* Log UID owning local socket */
-#define NF_LOG_MASK 0x0f
+/* Log tcp sequence, tcp options, ip options and uid owning local socket */
+#define NF_LOG_DEFAULT_MASK 0x0f
/* This flag indicates that copy_len field in nf_loginfo is set */
#define NF_LOG_F_COPY_LEN 0x1
@@ -60,8 +55,7 @@ struct nf_logger {
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
void nf_log_unregister(struct nf_logger *logger);
-void nf_log_set(struct net *net, u_int8_t pf,
- const struct nf_logger *logger);
+int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger);
void nf_log_unset(struct net *net, const struct nf_logger *logger);
int nf_log_bind_pf(struct net *net, u_int8_t pf,
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 0dbce55437f2..2280cfe86c56 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -11,7 +11,6 @@ struct nf_queue_entry {
struct sk_buff *skb;
unsigned int id;
- struct nf_hook_ops *elem;
struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
@@ -22,10 +21,10 @@ struct nf_queue_entry {
/* Packet queuing */
struct nf_queue_handler {
- int (*outfn)(struct nf_queue_entry *entry,
- unsigned int queuenum);
- void (*nf_hook_drop)(struct net *net,
- struct nf_hook_ops *ops);
+ int (*outfn)(struct nf_queue_entry *entry,
+ unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net,
+ const struct nf_hook_entry *hooks);
};
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
@@ -41,23 +40,19 @@ static inline void init_hashrandom(u32 *jhash_initval)
*jhash_initval = prandom_u32();
}
-static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
{
- const struct iphdr *iph = ip_hdr(skb);
-
/* packets in either direction go into same queue */
if ((__force u32)iph->saddr < (__force u32)iph->daddr)
return jhash_3words((__force u32)iph->saddr,
- (__force u32)iph->daddr, iph->protocol, jhash_initval);
+ (__force u32)iph->daddr, iph->protocol, initval);
return jhash_3words((__force u32)iph->daddr,
- (__force u32)iph->saddr, iph->protocol, jhash_initval);
+ (__force u32)iph->saddr, iph->protocol, initval);
}
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v6(const struct ipv6hdr *ip6h, u32 initval)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
u32 a, b, c;
if ((__force u32)ip6h->saddr.s6_addr32[3] <
@@ -75,20 +70,50 @@ static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
else
c = (__force u32) ip6h->daddr.s6_addr32[1];
- return jhash_3words(a, b, c, jhash_initval);
+ return jhash_3words(a, b, c, initval);
+}
+
+static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
+{
+ struct ipv6hdr *ip6h, _ip6h;
+ struct iphdr *iph, _iph;
+
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ iph = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*iph), &_iph);
+ if (iph)
+ return hash_v4(iph, initval);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*ip6h), &_ip6h);
+ if (ip6h)
+ return hash_v6(ip6h, initval);
+ break;
+ }
+
+ return 0;
}
-#endif
static inline u32
nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
- u32 jhash_initval)
+ u32 initval)
{
- if (family == NFPROTO_IPV4)
- queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32;
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
- else if (family == NFPROTO_IPV6)
- queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32;
-#endif
+ switch (family) {
+ case NFPROTO_IPV4:
+ queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_IPV6:
+ queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_BRIDGE:
+ queue += reciprocal_scale(hash_bridge(skb, initval),
+ queues_total);
+ break;
+ }
return queue;
}
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f2f13399ce44..5031e072567b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -19,6 +19,7 @@ struct nft_pktinfo {
const struct net_device *out;
u8 pf;
u8 hook;
+ bool tprot_set;
u8 tprot;
/* for x_tables compatibility */
struct xt_action_param xt;
@@ -36,6 +37,23 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->pf = pkt->xt.family = state->pf;
}
+static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+ pkt->tprot_set = false;
+ pkt->tprot = 0;
+ pkt->xt.thoff = 0;
+ pkt->xt.fragoff = 0;
+}
+
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
/**
* struct nft_verdict - nf_tables verdict
*
@@ -127,6 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
+unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
@@ -251,7 +270,8 @@ struct nft_set_ops {
int (*insert)(const struct net *net,
const struct nft_set *set,
- const struct nft_set_elem *elem);
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext);
void (*activate)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem);
diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h
deleted file mode 100644
index 511fb79f6dad..000000000000
--- a/include/net/netfilter/nf_tables_bridge.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _NET_NF_TABLES_BRIDGE_H
-#define _NET_NF_TABLES_BRIDGE_H
-
-int nft_bridge_iphdr_validate(struct sk_buff *skb);
-int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
-
-#endif /* _NET_NF_TABLES_BRIDGE_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a9060dd99db7..00f4f6b1b1ba 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -28,6 +28,9 @@ extern const struct nft_expr_ops nft_cmp_fast_ops;
int nft_cmp_module_init(void);
void nft_cmp_module_exit(void);
+int nft_range_module_init(void);
+void nft_range_module_exit(void);
+
int nft_lookup_module_init(void);
void nft_lookup_module_exit(void);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ca6ef6bf775e..968f00b82fb5 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -14,11 +14,54 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
ip = ip_hdr(pkt->skb);
+ pkt->tprot_set = true;
pkt->tprot = ip->protocol;
pkt->xt.thoff = ip_hdrlen(pkt->skb);
pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
+static inline int
+__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct iphdr *iph, _iph;
+ u32 len, thoff;
+
+ iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+ &_iph);
+ if (!iph)
+ return -1;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return -1;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (skb->len < len)
+ return -1;
+ else if (len < thoff)
+ return -1;
+
+ pkt->tprot_set = true;
+ pkt->tprot = iph->protocol;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+}
+
+static inline void
+nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
extern struct nft_af_info nft_af_ipv4;
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 8ad39a6a5fe1..d150b5066201 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -4,7 +4,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ipv6.h>
-static inline int
+static inline void
nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -15,15 +15,64 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
- /* If malformed, drop it */
+ if (protohdr < 0) {
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+ return;
+ }
+
+ pkt->tprot_set = true;
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+}
+
+static inline int
+__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
+ int protohdr;
+ u32 pkt_len;
+
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+ &_ip6h);
+ if (!ip6h)
+ return -1;
+
+ if (ip6h->version != 6)
+ return -1;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > skb->len)
+ return -1;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
if (protohdr < 0)
return -1;
+ pkt->tprot_set = true;
pkt->tprot = protohdr;
pkt->xt.thoff = thoff;
pkt->xt.fragoff = frag_off;
return 0;
+#else
+ return -1;
+#endif
+}
+
+static inline void
+nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
}
extern struct nft_af_info nft_af_ipv6;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 38b1a80517f0..e469e85de3f9 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -15,10 +15,6 @@ struct nf_proto_net {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *ctl_table_header;
struct ctl_table *ctl_table;
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- struct ctl_table_header *ctl_compat_header;
- struct ctl_table *ctl_compat_table;
-#endif
#endif
unsigned int users;
};
@@ -58,10 +54,6 @@ struct nf_ip_net {
struct nf_udp_net udp;
struct nf_icmp_net icmp;
struct nf_icmp_net icmpv6;
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- struct ctl_table_header *ctl_table_header;
- struct ctl_table *ctl_table;
-#endif
};
struct ct_pcpu {
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d061ffeb1e71..7adf4386ac8f 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,7 +40,6 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
- struct fib_table __rcu *fib_local;
struct fib_table __rcu *fib_main;
struct fib_table __rcu *fib_default;
#endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 36d723579af2..58487b1cc99a 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -16,6 +16,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
- struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 24cd3949a9a4..27bb9633c69d 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -11,7 +11,7 @@
struct ctl_table_header;
struct xfrm_policy_hash {
- struct hlist_head *table;
+ struct hlist_head __rcu *table;
unsigned int hmask;
u8 dbits4;
u8 sbits4;
@@ -38,14 +38,12 @@ struct netns_xfrm {
* mode. Also, it can be used by ah/esp icmp error handler to find
* offending SA.
*/
- struct hlist_head *state_bydst;
- struct hlist_head *state_bysrc;
- struct hlist_head *state_byspi;
+ struct hlist_head __rcu *state_bydst;
+ struct hlist_head __rcu *state_bysrc;
+ struct hlist_head __rcu *state_byspi;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
- struct hlist_head state_gc_list;
- struct work_struct state_gc_work;
struct list_head policy_all;
struct hlist_head *policy_byidx;
@@ -73,7 +71,7 @@ struct netns_xfrm {
struct dst_ops xfrm6_dst_ops;
#endif
spinlock_t xfrm_state_lock;
- rwlock_t xfrm_policy_lock;
+ spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
/* flow cache part */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index c99508d426cc..767b03a3fe67 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -69,17 +69,19 @@ struct tcf_exts {
int police;
};
-static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
+static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
{
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
exts->nr_actions = 0;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
- WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
+ if (!exts->actions)
+ return -ENOMEM;
#endif
exts->action = action;
exts->police = police;
+ return 0;
}
/**
@@ -121,7 +123,7 @@ static inline void tcf_exts_to_list(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- list_add(&a->list, actions);
+ list_add_tail(&a->list, actions);
}
#endif
}
@@ -484,4 +486,20 @@ struct tc_cls_matchall_offload {
unsigned long cookie;
};
+enum tc_clsbpf_command {
+ TC_CLSBPF_ADD,
+ TC_CLSBPF_REPLACE,
+ TC_CLSBPF_DESTROY,
+ TC_CLSBPF_STATS,
+};
+
+struct tc_cls_bpf_offload {
+ enum tc_clsbpf_command command;
+ struct tcf_exts *exts;
+ struct bpf_prog *prog;
+ const char *name;
+ bool exts_integrated;
+ u32 gen_flags;
+};
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7caa99b482c6..cd334c9584e9 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -90,8 +90,8 @@ int unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
-void qdisc_list_add(struct Qdisc *q);
-void qdisc_list_del(struct Qdisc *q);
+void qdisc_hash_add(struct Qdisc *q);
+void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
diff --git a/include/net/pptp.h b/include/net/pptp.h
new file mode 100644
index 000000000000..92e9f1fe2628
--- /dev/null
+++ b/include/net/pptp.h
@@ -0,0 +1,23 @@
+#ifndef _NET_PPTP_H
+#define _NET_PPTP_H
+
+#define PPP_LCP_ECHOREQ 0x09
+#define PPP_LCP_ECHOREP 0x0A
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define MISSING_WINDOW 20
+#define WRAPPED(curseq, lastseq)\
+ ((((curseq) & 0xffffff00) == 0) &&\
+ (((lastseq) & 0xffffff00) == 0xffffff00))
+
+#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
+struct pptp_gre_header {
+ struct gre_base_hdr gre_hd;
+ __be16 payload_len;
+ __be16 call_id;
+ __be32 seq;
+ __be32 ack;
+} __packed;
+
+
+#endif
diff --git a/include/net/route.h b/include/net/route.h
index ad777d79af94..0429d47cad25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -29,7 +29,6 @@
#include <net/flow.h>
#include <net/inet_sock.h>
#include <net/ip_fib.h>
-#include <net/l3mdev.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -285,15 +284,6 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
sport, dport, sk);
- if (!src && oif) {
- int rc;
-
- rc = l3mdev_get_saddr(net, oif, fl4);
- if (rc < 0)
- return ERR_PTR(rc);
-
- src = fl4->saddr;
- }
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 909aff2db2b3..e6aa0a249672 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,14 @@ struct qdisc_size_table {
u16 data[];
};
+/* similar to sk_buff_head, but skb->prev pointer is undefined. */
+struct qdisc_skb_head {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
struct Qdisc {
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
@@ -61,7 +69,7 @@ struct Qdisc {
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
- struct list_head list;
+ struct hlist_node hash;
u32 handle;
u32 parent;
void *u32_node;
@@ -76,7 +84,7 @@ struct Qdisc {
* For performance sake on SMP, we put highly modified fields at the end
*/
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
- struct sk_buff_head q;
+ struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
@@ -592,7 +600,7 @@ static inline void qdisc_qstats_drop(struct Qdisc *sch)
static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
- qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
+ this_cpu_inc(sch->cpu_qstats->drops);
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
+{
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff_head *list)
+ struct qdisc_skb_head *qh)
{
- __skb_queue_tail(list, skb);
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
@@ -614,14 +639,16 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
return __qdisc_enqueue_tail(skb, sch, &sch->q);
}
-static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
- struct sk_buff_head *list)
+static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = qh->head;
if (likely(skb != NULL)) {
- qdisc_qstats_backlog_dec(sch, skb);
- qdisc_bstats_update(sch, skb);
+ qh->head = skb->next;
+ qh->qlen--;
+ if (qh->head == NULL)
+ qh->tail = NULL;
+ skb->next = NULL;
}
return skb;
@@ -629,7 +656,14 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
- return __qdisc_dequeue_head(sch, &sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ }
+
+ return skb;
}
/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -642,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff_head *list,
+ struct qdisc_skb_head *qh,
struct sk_buff **to_free)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
@@ -666,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
- return skb_peek(&sch->q);
+ const struct qdisc_skb_head *qh = &sch->q;
+
+ return qh->head;
}
/* generic pseudo peek method for non-work-conserving qdisc */
@@ -701,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
return skb;
}
-static inline void __qdisc_reset_queue(struct sk_buff_head *list)
+static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
{
/*
* We do not know the backlog in bytes of this list, it
* is up to the caller to correct it
*/
- if (!skb_queue_empty(list)) {
- rtnl_kfree_skbs(list->next, list->prev);
- __skb_queue_head_init(list);
+ ASSERT_RTNL();
+ if (qh->qlen) {
+ rtnl_kfree_skbs(qh->head, qh->tail);
+
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
}
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 632e205ca54b..87a7f42e7639 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -83,9 +83,9 @@
#endif
/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
+#define SCTP_PAD4(s) (((s)+3)&~3)
/* Truncate to the previous multiple of 4. */
-#define WORD_TRUNC(s) ((s)&~3)
+#define SCTP_TRUNC4(s) ((s)&~3)
/*
* Function declarations.
@@ -433,7 +433,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
+ frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -462,7 +462,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
- pos.v += WORD_ROUND(ntohs(pos.p->length)))
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
#define sctp_walk_errors(err, chunk_hdr)\
_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
@@ -472,7 +472,7 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
- err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+ err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index bafe2a0ab908..ca6c971dd74a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -307,85 +307,27 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
}
/* Compare two TSNs */
+#define TSN_lt(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
-/* RFC 1982 - Serial Number Arithmetic
- *
- * 2. Comparison
- * Then, s1 is said to be equal to s2 if and only if i1 is equal to i2,
- * in all other cases, s1 is not equal to s2.
- *
- * s1 is said to be less than s2 if, and only if, s1 is not equal to s2,
- * and
- *
- * (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1))
- *
- * s1 is said to be greater than s2 if, and only if, s1 is not equal to
- * s2, and
- *
- * (i1 < i2 and i2 - i1 > 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 < 2^(SERIAL_BITS - 1))
- */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on TSNs in this document SHOULD use Serial
- * Number Arithmetic as defined in [RFC1982] where SERIAL_BITS = 32.
- */
-
-enum {
- TSN_SIGN_BIT = (1<<31)
-};
-
-static inline int TSN_lt(__u32 s, __u32 t)
-{
- return ((s) - (t)) & TSN_SIGN_BIT;
-}
-
-static inline int TSN_lte(__u32 s, __u32 t)
-{
- return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT);
-}
+#define TSN_lte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) <= 0))
/* Compare two SSNs */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on Stream Sequence Numbers in this document
- * SHOULD use Serial Number Arithmetic as defined in [RFC1982] where
- * SERIAL_BITS = 16.
- */
-enum {
- SSN_SIGN_BIT = (1<<15)
-};
-
-static inline int SSN_lt(__u16 s, __u16 t)
-{
- return ((s) - (t)) & SSN_SIGN_BIT;
-}
-
-static inline int SSN_lte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT);
-}
-
-/*
- * ADDIP 3.1.1
- * The valid range of Serial Number is from 0 to 4294967295 (2**32 - 1). Serial
- * Numbers wrap back to 0 after reaching 4294967295.
- */
-enum {
- ADDIP_SERIAL_SIGN_BIT = (1<<31)
-};
-
-static inline int ADDIP_SERIAL_gte(__u32 s, __u32 t)
-{
- return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
-}
+#define SSN_lt(a,b) \
+ (typecheck(__u16, a) && \
+ typecheck(__u16, b) && \
+ ((__s16)((a) - (b)) < 0))
+
+/* ADDIP 3.1.1 */
+#define ADDIP_SERIAL_gte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((b) - (a)) <= 0))
/* Check VTAG of the packet matches the sender's own tag. */
static inline int
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ced0df374e60..11c3bf262a85 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -537,6 +537,7 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
struct iov_iter *);
+void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
@@ -1069,7 +1070,7 @@ struct sctp_outq {
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
-int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
+void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
@@ -1077,7 +1078,7 @@ void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
sctp_retransmit_reason_t);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
-int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
+void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
/* Uncork and flush an outqueue. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 8741988e6880..ebf75db08e06 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1020,7 +1020,6 @@ struct proto {
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
- void (*clear_sk)(struct sock *sk, int size);
/* Keeping track of sockets in use */
#ifdef CONFIG_PROC_FS
@@ -1114,6 +1113,16 @@ static inline bool sk_stream_is_writeable(const struct sock *sk)
sk_stream_memory_free(sk);
}
+static inline int sk_under_cgroup_hierarchy(struct sock *sk,
+ struct cgroup *ancestor)
+{
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
+ ancestor);
+#else
+ return -ENOTSUPP;
+#endif
+}
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
@@ -1232,8 +1241,6 @@ static inline int __sk_prot_rehash(struct sock *sk)
return sk->sk_prot->hash(sk);
}
-void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
-
/* About 10 seconds */
#define SOCK_DESTROY_TIME (10*HZ)
diff --git a/include/net/strparser.h b/include/net/strparser.h
new file mode 100644
index 000000000000..0c28ad97c52f
--- /dev/null
+++ b/include/net/strparser.h
@@ -0,0 +1,142 @@
+/*
+ * Stream Parser
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __NET_STRPARSER_H_
+#define __NET_STRPARSER_H_
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+#define STRP_STATS_ADD(stat, count) ((stat) += (count))
+#define STRP_STATS_INCR(stat) ((stat)++)
+
+struct strp_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned int rx_mem_fail;
+ unsigned int rx_need_more_hdr;
+ unsigned int rx_msg_too_big;
+ unsigned int rx_msg_timeouts;
+ unsigned int rx_bad_hdr_len;
+};
+
+struct strp_aggr_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned int rx_mem_fail;
+ unsigned int rx_need_more_hdr;
+ unsigned int rx_msg_too_big;
+ unsigned int rx_msg_timeouts;
+ unsigned int rx_bad_hdr_len;
+ unsigned int rx_aborts;
+ unsigned int rx_interrupted;
+ unsigned int rx_unrecov_intr;
+};
+
+struct strparser;
+
+/* Callbacks are called with lock held for the attached socket */
+struct strp_callbacks {
+ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
+ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
+ int (*read_sock_done)(struct strparser *strp, int err);
+ void (*abort_parser)(struct strparser *strp, int err);
+};
+
+struct strp_rx_msg {
+ int full_len;
+ int offset;
+};
+
+static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb)
+{
+ return (struct strp_rx_msg *)((void *)skb->cb +
+ offsetof(struct qdisc_skb_cb, data));
+}
+
+/* Structure for an attached lower socket */
+struct strparser {
+ struct sock *sk;
+
+ u32 rx_stopped : 1;
+ u32 rx_paused : 1;
+ u32 rx_aborted : 1;
+ u32 rx_interrupted : 1;
+ u32 rx_unrecov_intr : 1;
+
+ struct sk_buff **rx_skb_nextp;
+ struct timer_list rx_msg_timer;
+ struct sk_buff *rx_skb_head;
+ unsigned int rx_need_bytes;
+ struct delayed_work rx_delayed_work;
+ struct work_struct rx_work;
+ struct strp_stats stats;
+ struct strp_callbacks cb;
+};
+
+/* Must be called with lock held for attached socket */
+static inline void strp_pause(struct strparser *strp)
+{
+ strp->rx_paused = 1;
+}
+
+/* May be called without holding lock for attached socket */
+void strp_unpause(struct strparser *strp);
+
+static inline void save_strp_stats(struct strparser *strp,
+ struct strp_aggr_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \
+ strp->stats._stat)
+ SAVE_PSOCK_STATS(rx_msgs);
+ SAVE_PSOCK_STATS(rx_bytes);
+ SAVE_PSOCK_STATS(rx_mem_fail);
+ SAVE_PSOCK_STATS(rx_need_more_hdr);
+ SAVE_PSOCK_STATS(rx_msg_too_big);
+ SAVE_PSOCK_STATS(rx_msg_timeouts);
+ SAVE_PSOCK_STATS(rx_bad_hdr_len);
+#undef SAVE_PSOCK_STATS
+
+ if (strp->rx_aborted)
+ agg_stats->rx_aborts++;
+ if (strp->rx_interrupted)
+ agg_stats->rx_interrupted++;
+ if (strp->rx_unrecov_intr)
+ agg_stats->rx_unrecov_intr++;
+}
+
+static inline void aggregate_strp_stats(struct strp_aggr_stats *stats,
+ struct strp_aggr_stats *agg_stats)
+{
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_PSOCK_STATS(rx_msgs);
+ SAVE_PSOCK_STATS(rx_bytes);
+ SAVE_PSOCK_STATS(rx_mem_fail);
+ SAVE_PSOCK_STATS(rx_need_more_hdr);
+ SAVE_PSOCK_STATS(rx_msg_too_big);
+ SAVE_PSOCK_STATS(rx_msg_timeouts);
+ SAVE_PSOCK_STATS(rx_bad_hdr_len);
+ SAVE_PSOCK_STATS(rx_aborts);
+ SAVE_PSOCK_STATS(rx_interrupted);
+ SAVE_PSOCK_STATS(rx_unrecov_intr);
+#undef SAVE_PSOCK_STATS
+
+}
+
+void strp_done(struct strparser *strp);
+void strp_stop(struct strparser *strp);
+void strp_check_rcv(struct strparser *strp);
+int strp_init(struct strparser *strp, struct sock *csk,
+ struct strp_callbacks *cb);
+void strp_data_ready(struct strparser *strp);
+
+#endif /* __NET_STRPARSER_H_ */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 62f6a967a1b7..eba80c4fc56f 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -68,7 +68,6 @@ struct switchdev_attr {
enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_UNDEFINED,
SWITCHDEV_OBJ_ID_PORT_VLAN,
- SWITCHDEV_OBJ_ID_IPV4_FIB,
SWITCHDEV_OBJ_ID_PORT_FDB,
SWITCHDEV_OBJ_ID_PORT_MDB,
};
@@ -92,21 +91,6 @@ struct switchdev_obj_port_vlan {
#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
container_of(obj, struct switchdev_obj_port_vlan, obj)
-/* SWITCHDEV_OBJ_ID_IPV4_FIB */
-struct switchdev_obj_ipv4_fib {
- struct switchdev_obj obj;
- u32 dst;
- int dst_len;
- struct fib_info *fi;
- u8 tos;
- u8 type;
- u32 nlflags;
- u32 tb_id;
-};
-
-#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
- container_of(obj, struct switchdev_obj_ipv4_fib, obj)
-
/* SWITCHDEV_OBJ_ID_PORT_FDB */
struct switchdev_obj_port_fdb {
struct switchdev_obj obj;
@@ -209,11 +193,6 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
int switchdev_port_bridge_dellink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
-int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags, u32 tb_id);
-int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id);
-void switchdev_fib_ipv4_abort(struct fib_info *fi);
int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
u16 vid, u16 nlm_flags);
@@ -222,7 +201,7 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
u16 vid);
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
- struct net_device *filter_dev, int idx);
+ struct net_device *filter_dev, int *idx);
void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
@@ -304,25 +283,6 @@ static inline int switchdev_port_bridge_dellink(struct net_device *dev,
return -EOPNOTSUPP;
}
-static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
-{
- return 0;
-}
-
-static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
-{
- return 0;
-}
-
-static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
-{
-}
-
static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
@@ -342,15 +302,9 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx)
-{
- return idx;
-}
-
-static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
- struct net_device *group_dev,
- bool joining)
+ int *idx)
{
+ return *idx;
}
static inline bool switchdev_port_same_parent_id(struct net_device *a,
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 5164bd7a38fb..9fd2bea0a6e0 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -50,9 +50,11 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);
int ife_validate_meta_u16(void *val, int len);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
void ife_release_meta_gen(struct tcf_meta_info *mi);
int register_ife_op(struct tcf_meta_ops *mops);
int unregister_ife_op(struct tcf_meta_ops *mops);
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..644a2116b47b
--- /dev/null
+++ b/include/net/tc_act/tc_skbmod.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __NET_TC_SKBMOD_H
+#define __NET_TC_SKBMOD_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_skbmod.h>
+
+struct tcf_skbmod_params {
+ struct rcu_head rcu;
+ u64 flags; /*up to 64 types of operations; extend if needed */
+ u8 eth_dst[ETH_ALEN];
+ u16 eth_type;
+ u8 eth_src[ETH_ALEN];
+};
+
+struct tcf_skbmod {
+ struct tc_action common;
+ struct tcf_skbmod_params __rcu *skbmod_p;
+};
+#define to_skbmod(a) ((struct tcf_skbmod *)a)
+
+#endif /* __NET_TC_SKBMOD_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..253f8da6c2a6
--- /dev/null
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_TUNNEL_KEY_H
+#define __NET_TC_TUNNEL_KEY_H
+
+#include <net/act_api.h>
+
+struct tcf_tunnel_key_params {
+ struct rcu_head rcu;
+ int tcft_action;
+ int action;
+ struct metadata_dst *tcft_enc_metadata;
+};
+
+struct tcf_tunnel_key {
+ struct tc_action common;
+ struct tcf_tunnel_key_params __rcu *params;
+};
+
+#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
+
+#endif /* __NET_TC_TUNNEL_KEY_H */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index e29f52e8bdf1..48cca321ee6c 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,7 @@
#define __NET_TC_VLAN_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_vlan.h>
#define VLAN_F_POP 0x1
#define VLAN_F_PUSH 0x2
@@ -20,7 +21,32 @@ struct tcf_vlan {
int tcfv_action;
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
+ u8 tcfv_push_prio;
};
#define to_vlan(a) ((struct tcf_vlan *)a)
+static inline bool is_tcf_vlan(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_VLAN)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_vlan_action(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_action;
+}
+
+static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_vid;
+}
+
+static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_proto;
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7717302cab91..f83b7f220a65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -227,10 +227,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TFO_SERVER_COOKIE_NOT_REQD 0x200
/* Force enable TFO on all listeners, i.e., not requiring the
- * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
+ * TCP_FASTOPEN socket option.
*/
#define TFO_SERVER_WO_SOCKOPT1 0x400
-#define TFO_SERVER_WO_SOCKOPT2 0x800
extern struct inet_timewait_death_row tcp_death_row;
@@ -534,6 +533,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ int min_tso_segs);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
@@ -604,8 +605,6 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
-typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
- unsigned int, size_t);
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
@@ -643,7 +642,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (skb_queue_empty(&tp->out_of_order_queue) &&
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
@@ -674,7 +673,7 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
/* Minimum RTT in usec. ~0 means not available. */
static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
{
- return tp->rtt_min[0].rtt;
+ return minmax_get(&tp->rtt_min);
}
/* Compute the actual receive window we are currently advertising.
@@ -766,8 +765,16 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
- /* There is space for up to 20 bytes */
- __u32 in_flight;/* Bytes in flight when packet sent */
+ /* There is space for up to 24 bytes */
+ __u32 in_flight:30,/* Bytes in flight at transmit */
+ is_app_limited:1, /* cwnd not fully used? */
+ unused:1;
+ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
+ __u32 delivered;
+ /* start of send pipeline phase */
+ struct skb_mstamp first_tx_mstamp;
+ /* when we reached the "delivered" count */
+ struct skb_mstamp delivered_mstamp;
} tx; /* only used for outgoing skbs */
union {
struct inet_skb_parm h4;
@@ -863,6 +870,27 @@ struct ack_sample {
u32 in_flight;
};
+/* A rate sample measures the number of (original/retransmitted) data
+ * packets delivered "delivered" over an interval of time "interval_us".
+ * The tcp_rate.c code fills in the rate sample, and congestion
+ * control modules that define a cong_control function to run at the end
+ * of ACK processing can optionally chose to consult this sample when
+ * setting cwnd and pacing rate.
+ * A sample is invalid if "delivered" or "interval_us" is negative.
+ */
+struct rate_sample {
+ struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ s32 delivered; /* number of packets delivered over interval */
+ long interval_us; /* time for tp->delivered to incr "delivered" */
+ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
+ int losses; /* number of packets marked lost upon ACK */
+ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
+ u32 prior_in_flight; /* in flight before this ACK */
+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
+ bool is_retrans; /* is sample from retransmission? */
+};
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -887,6 +915,14 @@ struct tcp_congestion_ops {
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+ /* suggest number of segments for each skb to transmit (optional) */
+ u32 (*tso_segs_goal)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+ /* call when packets are delivered to update cwnd and pacing rate,
+ * after all the ca_state processing. (optional)
+ */
+ void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
@@ -949,6 +985,14 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_rate.c */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs);
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ struct skb_mstamp *now, struct rate_sample *rs);
+void tcp_rate_check_app_limited(struct sock *sk);
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
@@ -1164,6 +1208,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
}
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
@@ -1853,6 +1898,8 @@ static inline int tcp_inq(struct sock *sk)
return answ;
}
+int tcp_peek_len(struct socket *sock);
+
static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
{
u16 segs_in;
diff --git a/include/net/udp.h b/include/net/udp.h
index 8894d7144189..ea53a87d880f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -251,6 +251,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
void udp_err(struct sk_buff *, u32);
+int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b96d0360c095..0255613a54a4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -350,24 +350,6 @@ static inline __be32 vxlan_vni_field(__be32 vni)
#endif
}
-static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be32)tun_id;
-#else
- return (__force __be32)((__force u64)tun_id >> 32);
-#endif
-}
-
-static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be64)vni;
-#else
- return (__force __be64)((u64)(__force u32)vni << 32);
-#endif
-}
-
static inline size_t vxlan_rco_start(__be32 vni_field)
{
return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 17934312eecb..31947b9c21d6 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -187,7 +187,7 @@ struct xfrm_state {
struct xfrm_replay_state_esn *preplay_esn;
/* The functions for replay detection. */
- struct xfrm_replay *repl;
+ const struct xfrm_replay *repl;
/* internal flag that only holds state for delayed aevent at the
* moment
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index b2017440b765..703a64b4681a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -24,6 +24,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
*/
struct rxrpc_wire_header {
__be32 epoch; /* client boot timestamp */
+#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */
__be32 cid; /* connection and channel ID */
#define RXRPC_MAXCALLS 4 /* max active calls per conn */
@@ -33,8 +34,6 @@ struct rxrpc_wire_header {
#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */
__be32 callNumber; /* call ID (0 for connection-level packets) */
-#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
-
__be32 seq; /* sequence number of pkt in call stream */
__be32 serial; /* serial number of pkt sent to network */
@@ -92,10 +91,14 @@ struct rxrpc_wire_header {
struct rxrpc_jumbo_header {
uint8_t flags; /* packet flags (as per rxrpc_header) */
uint8_t pad;
- __be16 _rsvd; /* reserved (used by kerberos security as cksum) */
+ union {
+ __be16 _rsvd; /* reserved */
+ __be16 cksum; /* kerberos security checksum */
+ };
};
#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
+#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
/*****************************************************************************/
/*
@@ -120,6 +123,7 @@ struct rxrpc_ackpacket {
#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
+#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */
uint8_t nAcks; /* number of ACKs */
#define RXRPC_MAXACKS 255
@@ -130,6 +134,13 @@ struct rxrpc_ackpacket {
} __packed;
+/* Some ACKs refer to specific packets and some are general and can be updated. */
+#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \
+ (1 << RXRPC_ACK_PING_RESPONSE) | \
+ (1 << RXRPC_ACK_DELAY) | \
+ (1 << RXRPC_ACK_IDLE))
+
+
/*
* ACK packets can have a further piece of information tagged on the end
*/
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
new file mode 100644
index 000000000000..0383e5e9a0f3
--- /dev/null
+++ b/include/trace/events/rxrpc.h
@@ -0,0 +1,625 @@
+/* AF_RXRPC tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rxrpc
+
+#if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RXRPC_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rxrpc_conn,
+ TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+ int usage, const void *where),
+
+ TP_ARGS(conn, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ ),
+
+ TP_printk("C=%p %s u=%d sp=%pSR",
+ __entry->conn,
+ rxrpc_conn_traces[__entry->op],
+ __entry->usage,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_client,
+ TP_PROTO(struct rxrpc_connection *conn, int channel,
+ enum rxrpc_client_trace op),
+
+ TP_ARGS(conn, channel, op),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(u32, cid )
+ __field(int, channel )
+ __field(int, usage )
+ __field(enum rxrpc_client_trace, op )
+ __field(enum rxrpc_conn_cache_state, cs )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->channel = channel;
+ __entry->usage = atomic_read(&conn->usage);
+ __entry->op = op;
+ __entry->cid = conn->proto.cid;
+ __entry->cs = conn->cache_state;
+ ),
+
+ TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
+ __entry->conn,
+ __entry->channel,
+ rxrpc_client_traces[__entry->op],
+ rxrpc_conn_cache_states[__entry->cs],
+ __entry->cid,
+ __entry->usage)
+ );
+
+TRACE_EVENT(rxrpc_call,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+ int usage, const void *where, const void *aux),
+
+ TP_ARGS(call, op, usage, where, aux),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ __field(const void *, aux )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ __entry->aux = aux;
+ ),
+
+ TP_printk("c=%p %s u=%d sp=%pSR a=%p",
+ __entry->call,
+ rxrpc_call_traces[__entry->op],
+ __entry->usage,
+ __entry->where,
+ __entry->aux)
+ );
+
+TRACE_EVENT(rxrpc_skb,
+ TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
+ int usage, int mod_count, const void *where),
+
+ TP_ARGS(skb, op, usage, mod_count, where),
+
+ TP_STRUCT__entry(
+ __field(struct sk_buff *, skb )
+ __field(enum rxrpc_skb_trace, op )
+ __field(int, usage )
+ __field(int, mod_count )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->mod_count = mod_count;
+ __entry->where = where;
+ ),
+
+ TP_printk("s=%p %s u=%d m=%d p=%pSR",
+ __entry->skb,
+ rxrpc_skb_traces[__entry->op],
+ __entry->usage,
+ __entry->mod_count,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_rx_packet,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_rx_done,
+ TP_PROTO(int result, int abort_code),
+
+ TP_ARGS(result, abort_code),
+
+ TP_STRUCT__entry(
+ __field(int, result )
+ __field(int, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ __entry->abort_code = abort_code;
+ ),
+
+ TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_abort,
+ TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq,
+ int abort_code, int error),
+
+ TP_ARGS(why, cid, call_id, seq, abort_code, error),
+
+ TP_STRUCT__entry(
+ __array(char, why, 4 )
+ __field(u32, cid )
+ __field(u32, call_id )
+ __field(rxrpc_seq_t, seq )
+ __field(int, abort_code )
+ __field(int, error )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->why, why, 4);
+ __entry->cid = cid;
+ __entry->call_id = call_id;
+ __entry->abort_code = abort_code;
+ __entry->error = error;
+ __entry->seq = seq;
+ ),
+
+ TP_printk("%08x:%08x s=%u a=%d e=%d %s",
+ __entry->cid, __entry->call_id, __entry->seq,
+ __entry->abort_code, __entry->error, __entry->why)
+ );
+
+TRACE_EVENT(rxrpc_transmit,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_transmit_trace, why )
+ __field(rxrpc_seq_t, tx_hard_ack )
+ __field(rxrpc_seq_t, tx_top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->tx_hard_ack = call->tx_hard_ack;
+ __entry->tx_top = call->tx_top;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_transmit_traces[__entry->why],
+ __entry->tx_hard_ack + 1,
+ __entry->tx_top - __entry->tx_hard_ack)
+ );
+
+TRACE_EVENT(rxrpc_rx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
+
+ TP_ARGS(call, first, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, first )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->first = first;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_ack_names[__entry->reason],
+ __entry->first,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_tx_data,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+
+ TP_ARGS(call, seq, serial, flags, retrans, lose),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, flags )
+ __field(bool, retrans )
+ __field(bool, lose )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->serial = serial;
+ __entry->flags = flags;
+ __entry->retrans = retrans;
+ __entry->lose = lose;
+ ),
+
+ TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
+ __entry->call,
+ __entry->serial,
+ __entry->seq,
+ __entry->flags,
+ __entry->retrans ? " *RETRANS*" : "",
+ __entry->lose ? " *LOSE*" : "")
+ );
+
+TRACE_EVENT(rxrpc_tx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
+ u8 reason, u8 n_acks),
+
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, ack_first )
+ __field(rxrpc_serial_t, ack_serial )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->ack_first = ack_first;
+ __entry->ack_serial = ack_serial;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
+ __entry->call,
+ __entry->serial,
+ rxrpc_ack_names[__entry->reason],
+ __entry->ack_first,
+ __entry->ack_serial,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_receive,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
+ rxrpc_serial_t serial, rxrpc_seq_t seq),
+
+ TP_ARGS(call, why, serial, seq),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_receive_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->seq = seq;
+ __entry->hard_ack = call->rx_hard_ack;
+ __entry->top = call->rx_top;
+ ),
+
+ TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
+ __entry->call,
+ rxrpc_receive_traces[__entry->why],
+ __entry->serial,
+ __entry->seq,
+ __entry->hard_ack,
+ __entry->top)
+ );
+
+TRACE_EVENT(rxrpc_recvmsg,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
+ rxrpc_seq_t seq, unsigned int offset, unsigned int len,
+ int ret),
+
+ TP_ARGS(call, why, seq, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_recvmsg_trace, why )
+ __field(rxrpc_seq_t, seq )
+ __field(unsigned int, offset )
+ __field(unsigned int, len )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
+ __entry->call,
+ rxrpc_recvmsg_traces[__entry->why],
+ __entry->seq,
+ __entry->offset,
+ __entry->len,
+ __entry->ret)
+ );
+
+TRACE_EVENT(rxrpc_rtt_tx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
+ rxrpc_serial_t send_serial),
+
+ TP_ARGS(call, why, send_serial),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_tx_trace, why )
+ __field(rxrpc_serial_t, send_serial )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ ),
+
+ TP_printk("c=%p %s sr=%08x",
+ __entry->call,
+ rxrpc_rtt_tx_traces[__entry->why],
+ __entry->send_serial)
+ );
+
+TRACE_EVENT(rxrpc_rtt_rx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ s64 rtt, u8 nr, s64 avg),
+
+ TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_rx_trace, why )
+ __field(u8, nr )
+ __field(rxrpc_serial_t, send_serial )
+ __field(rxrpc_serial_t, resp_serial )
+ __field(s64, rtt )
+ __field(u64, avg )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ __entry->resp_serial = resp_serial;
+ __entry->rtt = rtt;
+ __entry->nr = nr;
+ __entry->avg = avg;
+ ),
+
+ TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ __entry->call,
+ rxrpc_rtt_rx_traces[__entry->why],
+ __entry->send_serial,
+ __entry->resp_serial,
+ __entry->rtt,
+ __entry->nr,
+ __entry->avg)
+ );
+
+TRACE_EVENT(rxrpc_timer,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+ ktime_t now, unsigned long now_j),
+
+ TP_ARGS(call, why, now, now_j),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_timer_trace, why )
+ __field_struct(ktime_t, now )
+ __field_struct(ktime_t, expire_at )
+ __field_struct(ktime_t, ack_at )
+ __field_struct(ktime_t, resend_at )
+ __field(unsigned long, now_j )
+ __field(unsigned long, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->expire_at = call->expire_at;
+ __entry->ack_at = call->ack_at;
+ __entry->resend_at = call->resend_at;
+ __entry->now_j = now_j;
+ __entry->timer = call->timer.expires;
+ ),
+
+ TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+ __entry->call,
+ rxrpc_timer_traces[__entry->why],
+ ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
+ __entry->timer - __entry->now_j)
+ );
+
+TRACE_EVENT(rxrpc_rx_lose,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_propose_ack,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
+ u8 ack_reason, rxrpc_serial_t serial, bool immediate,
+ bool background, enum rxrpc_propose_ack_outcome outcome),
+
+ TP_ARGS(call, why, ack_reason, serial, immediate, background,
+ outcome),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_propose_ack_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, ack_reason )
+ __field(bool, immediate )
+ __field(bool, background )
+ __field(enum rxrpc_propose_ack_outcome, outcome )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->ack_reason = ack_reason;
+ __entry->immediate = immediate;
+ __entry->background = background;
+ __entry->outcome = outcome;
+ ),
+
+ TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
+ __entry->call,
+ rxrpc_propose_ack_traces[__entry->why],
+ rxrpc_ack_names[__entry->ack_reason],
+ __entry->serial,
+ __entry->immediate,
+ __entry->background,
+ rxrpc_propose_ack_outcomes[__entry->outcome])
+ );
+
+TRACE_EVENT(rxrpc_retransmit,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
+ s64 expiry),
+
+ TP_ARGS(call, seq, annotation, expiry),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(u8, annotation )
+ __field(s64, expiry )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->annotation = annotation;
+ __entry->expiry = expiry;
+ ),
+
+ TP_printk("c=%p q=%x a=%02x xp=%lld",
+ __entry->call,
+ __entry->seq,
+ __entry->annotation,
+ __entry->expiry)
+ );
+
+TRACE_EVENT(rxrpc_congest,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+
+ TP_ARGS(call, summary, ack_serial, change),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_congest_change, change )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ __field(rxrpc_seq_t, lowest_nak )
+ __field(rxrpc_serial_t, ack_serial )
+ __field_struct(struct rxrpc_ack_summary, sum )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->change = change;
+ __entry->hard_ack = call->tx_hard_ack;
+ __entry->top = call->tx_top;
+ __entry->lowest_nak = call->acks_lowest_nak;
+ __entry->ack_serial = ack_serial;
+ memcpy(&__entry->sum, summary, sizeof(__entry->sum));
+ ),
+
+ TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ __entry->call,
+ __entry->ack_serial,
+ rxrpc_ack_names[__entry->sum.ack_reason],
+ __entry->hard_ack,
+ rxrpc_congest_modes[__entry->sum.mode],
+ __entry->sum.cwnd,
+ __entry->sum.ssthresh,
+ __entry->sum.nr_acks, __entry->sum.nr_nacks,
+ __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks,
+ __entry->sum.nr_rot_new_acks,
+ __entry->top - __entry->hard_ack,
+ __entry->sum.cumulative_acks,
+ __entry->sum.dup_acks,
+ __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
+ rxrpc_congest_changes[__entry->change],
+ __entry->sum.retrans_timeo ? " rTxTo" : "")
+ );
+
+#endif /* _TRACE_RXRPC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 185f8ea2702f..d0352a971ebd 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -71,6 +71,7 @@ header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
header-y += bpf_common.h
+header-y += bpf_perf_event.h
header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 0fbf6fd4711b..734fe83ab645 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -23,6 +23,42 @@
#define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter"
/**
+ * enum batadv_tt_client_flags - TT client specific flags
+ * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table
+ * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new
+ * update telling its new real location has not been received/sent yet
+ * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface.
+ * This information is used by the "AP Isolation" feature
+ * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This
+ * information is used by the Extended Isolation feature
+ * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table
+ * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has
+ * not been announced yet
+ * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept
+ * in the table for one more originator interval for consistency purposes
+ * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of
+ * the network but no nnode has already announced it
+ *
+ * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire.
+ * Bits from 8 to 15 are called _local flags_ because they are used for local
+ * computations only.
+ *
+ * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with
+ * the other nodes in the network. To achieve this goal these flags are included
+ * in the TT CRC computation.
+ */
+enum batadv_tt_client_flags {
+ BATADV_TT_CLIENT_DEL = (1 << 0),
+ BATADV_TT_CLIENT_ROAM = (1 << 1),
+ BATADV_TT_CLIENT_WIFI = (1 << 4),
+ BATADV_TT_CLIENT_ISOLA = (1 << 5),
+ BATADV_TT_CLIENT_NOPURGE = (1 << 8),
+ BATADV_TT_CLIENT_NEW = (1 << 9),
+ BATADV_TT_CLIENT_PENDING = (1 << 10),
+ BATADV_TT_CLIENT_TEMP = (1 << 11),
+};
+
+/**
* enum batadv_nl_attrs - batman-adv netlink attributes
*
* @BATADV_ATTR_UNSPEC: unspecified attribute to catch errors
@@ -40,6 +76,26 @@
* @BATADV_ATTR_TPMETER_BYTES: amount of acked bytes during run
* @BATADV_ATTR_TPMETER_COOKIE: session cookie to match tp_meter session
* @BATADV_ATTR_PAD: attribute used for padding for 64-bit alignment
+ * @BATADV_ATTR_ACTIVE: Flag indicating if the hard interface is active
+ * @BATADV_ATTR_TT_ADDRESS: Client MAC address
+ * @BATADV_ATTR_TT_TTVN: Translation table version
+ * @BATADV_ATTR_TT_LAST_TTVN: Previous translation table version
+ * @BATADV_ATTR_TT_CRC32: CRC32 over translation table
+ * @BATADV_ATTR_TT_VID: VLAN ID
+ * @BATADV_ATTR_TT_FLAGS: Translation table client flags
+ * @BATADV_ATTR_FLAG_BEST: Flags indicating entry is the best
+ * @BATADV_ATTR_LAST_SEEN_MSECS: Time in milliseconds since last seen
+ * @BATADV_ATTR_NEIGH_ADDRESS: Neighbour MAC address
+ * @BATADV_ATTR_TQ: TQ to neighbour
+ * @BATADV_ATTR_THROUGHPUT: Estimated throughput to Neighbour
+ * @BATADV_ATTR_BANDWIDTH_UP: Reported uplink bandwidth
+ * @BATADV_ATTR_BANDWIDTH_DOWN: Reported downlink bandwidth
+ * @BATADV_ATTR_ROUTER: Gateway router MAC address
+ * @BATADV_ATTR_BLA_OWN: Flag indicating own originator
+ * @BATADV_ATTR_BLA_ADDRESS: Bridge loop avoidance claim MAC address
+ * @BATADV_ATTR_BLA_VID: BLA VLAN ID
+ * @BATADV_ATTR_BLA_BACKBONE: BLA gateway originator MAC address
+ * @BATADV_ATTR_BLA_CRC: BLA CRC
* @__BATADV_ATTR_AFTER_LAST: internal use
* @NUM_BATADV_ATTR: total number of batadv_nl_attrs available
* @BATADV_ATTR_MAX: highest attribute number currently defined
@@ -60,6 +116,26 @@ enum batadv_nl_attrs {
BATADV_ATTR_TPMETER_BYTES,
BATADV_ATTR_TPMETER_COOKIE,
BATADV_ATTR_PAD,
+ BATADV_ATTR_ACTIVE,
+ BATADV_ATTR_TT_ADDRESS,
+ BATADV_ATTR_TT_TTVN,
+ BATADV_ATTR_TT_LAST_TTVN,
+ BATADV_ATTR_TT_CRC32,
+ BATADV_ATTR_TT_VID,
+ BATADV_ATTR_TT_FLAGS,
+ BATADV_ATTR_FLAG_BEST,
+ BATADV_ATTR_LAST_SEEN_MSECS,
+ BATADV_ATTR_NEIGH_ADDRESS,
+ BATADV_ATTR_TQ,
+ BATADV_ATTR_THROUGHPUT,
+ BATADV_ATTR_BANDWIDTH_UP,
+ BATADV_ATTR_BANDWIDTH_DOWN,
+ BATADV_ATTR_ROUTER,
+ BATADV_ATTR_BLA_OWN,
+ BATADV_ATTR_BLA_ADDRESS,
+ BATADV_ATTR_BLA_VID,
+ BATADV_ATTR_BLA_BACKBONE,
+ BATADV_ATTR_BLA_CRC,
/* add attributes above here, update the policy in netlink.c */
__BATADV_ATTR_AFTER_LAST,
NUM_BATADV_ATTR = __BATADV_ATTR_AFTER_LAST,
@@ -73,6 +149,15 @@ enum batadv_nl_attrs {
* @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv device
* @BATADV_CMD_TP_METER: Start a tp meter session
* @BATADV_CMD_TP_METER_CANCEL: Cancel a tp meter session
+ * @BATADV_CMD_GET_ROUTING_ALGOS: Query the list of routing algorithms.
+ * @BATADV_CMD_GET_HARDIFS: Query list of hard interfaces
+ * @BATADV_CMD_GET_TRANSTABLE_LOCAL: Query list of local translations
+ * @BATADV_CMD_GET_TRANSTABLE_GLOBAL Query list of global translations
+ * @BATADV_CMD_GET_ORIGINATORS: Query list of originators
+ * @BATADV_CMD_GET_NEIGHBORS: Query list of neighbours
+ * @BATADV_CMD_GET_GATEWAYS: Query list of gateways
+ * @BATADV_CMD_GET_BLA_CLAIM: Query list of bridge loop avoidance claims
+ * @BATADV_CMD_GET_BLA_BACKBONE: Query list of bridge loop avoidance backbones
* @__BATADV_CMD_AFTER_LAST: internal use
* @BATADV_CMD_MAX: highest used command number
*/
@@ -81,6 +166,15 @@ enum batadv_nl_commands {
BATADV_CMD_GET_MESH_INFO,
BATADV_CMD_TP_METER,
BATADV_CMD_TP_METER_CANCEL,
+ BATADV_CMD_GET_ROUTING_ALGOS,
+ BATADV_CMD_GET_HARDIFS,
+ BATADV_CMD_GET_TRANSTABLE_LOCAL,
+ BATADV_CMD_GET_TRANSTABLE_GLOBAL,
+ BATADV_CMD_GET_ORIGINATORS,
+ BATADV_CMD_GET_NEIGHBORS,
+ BATADV_CMD_GET_GATEWAYS,
+ BATADV_CMD_GET_BLA_CLAIM,
+ BATADV_CMD_GET_BLA_BACKBONE,
/* add new commands above here */
__BATADV_CMD_AFTER_LAST,
BATADV_CMD_MAX = __BATADV_CMD_AFTER_LAST - 1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 9e5fc168c8a3..f09c70b97eca 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -95,6 +95,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
+ BPF_PROG_TYPE_PERF_EVENT,
};
#define BPF_PSEUDO_MAP_FD 1
@@ -375,6 +376,56 @@ enum bpf_func_id {
*/
BPF_FUNC_probe_write_user,
+ /**
+ * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 current failed the cgroup2 descendant test
+ * == 1 current succeeded the cgroup2 descendant test
+ * < 0 error
+ */
+ BPF_FUNC_current_task_under_cgroup,
+
+ /**
+ * bpf_skb_change_tail(skb, len, flags)
+ * The helper will resize the skb to the given new size,
+ * to be used f.e. with control messages.
+ * @skb: pointer to skb
+ * @len: new skb length
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_change_tail,
+
+ /**
+ * bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the
+ * skb is non-linear and not all of len are part of the
+ * linear section. Only needed for read/write with direct
+ * packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_pull_data,
+
+ /**
+ * bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ */
+ BPF_FUNC_csum_update,
+
+ /**
+ * bpf_set_hash_invalid(skb)
+ * Invalidate current skb>hash.
+ * @skb: pointer to skb
+ */
+ BPF_FUNC_set_hash_invalid,
+
__BPF_FUNC_MAX_ID,
};
diff --git a/include/uapi/linux/bpf_perf_event.h b/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 000000000000..067427259820
--- /dev/null
+++ b/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
+#define _UAPI__LINUX_BPF_PERF_EVENT_H__
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct bpf_perf_event_data {
+ struct pt_regs regs;
+ __u64 sample_period;
+};
+
+#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b8f38e84d93a..099a4200732c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1362,7 +1362,14 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
- ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
+
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1371,7 +1378,7 @@ enum ethtool_link_mode_bit_indices {
*/
__ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index c186f64fffca..ab92bca6d448 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -140,7 +140,7 @@ struct bridge_vlan_xstats {
__u64 tx_bytes;
__u64 tx_packets;
__u16 vid;
- __u16 pad1;
+ __u16 flags;
__u32 pad2;
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a1b5202c5f6b..b4fba662cd32 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -318,6 +318,7 @@ enum {
IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER,
IFLA_BRPORT_PAD,
+ IFLA_BRPORT_MCAST_FLOOD,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -463,6 +464,7 @@ enum {
enum ipvlan_mode {
IPVLAN_MODE_L2 = 0,
IPVLAN_MODE_L3,
+ IPVLAN_MODE_L3S,
IPVLAN_MODE_MAX
};
@@ -617,7 +619,7 @@ enum {
enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC, /* Hardware queue specific attributes */
- IFLA_VF_VLAN,
+ IFLA_VF_VLAN, /* VLAN ID and QoS */
IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
@@ -629,6 +631,7 @@ enum {
IFLA_VF_TRUST, /* Trust VF */
IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
+ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
__IFLA_VF_MAX,
};
@@ -645,6 +648,22 @@ struct ifla_vf_vlan {
__u32 qos;
};
+enum {
+ IFLA_VF_VLAN_INFO_UNSPEC,
+ IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */
+ __IFLA_VF_VLAN_INFO_MAX,
+};
+
+#define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1)
+#define MAX_VLAN_LIST_LEN 1
+
+struct ifla_vf_vlan_info {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+ __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
+};
+
struct ifla_vf_tx_rate {
__u32 vf;
__u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
@@ -825,6 +844,7 @@ enum {
IFLA_STATS_LINK_64,
IFLA_STATS_LINK_XSTATS,
IFLA_STATS_LINK_XSTATS_SLAVE,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS,
__IFLA_STATS_MAX,
};
@@ -844,6 +864,14 @@ enum {
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+/* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */
+enum {
+ IFLA_OFFLOAD_XSTATS_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ __IFLA_OFFLOAD_XSTATS_MAX
+};
+#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+
/* XDP section */
enum {
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 777b6cdb1b7b..92f3c8677523 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -27,9 +27,23 @@
#define GRE_SEQ __cpu_to_be16(0x1000)
#define GRE_STRICT __cpu_to_be16(0x0800)
#define GRE_REC __cpu_to_be16(0x0700)
-#define GRE_FLAGS __cpu_to_be16(0x00F8)
+#define GRE_ACK __cpu_to_be16(0x0080)
+#define GRE_FLAGS __cpu_to_be16(0x0078)
#define GRE_VERSION __cpu_to_be16(0x0007)
+#define GRE_IS_CSUM(f) ((f) & GRE_CSUM)
+#define GRE_IS_ROUTING(f) ((f) & GRE_ROUTING)
+#define GRE_IS_KEY(f) ((f) & GRE_KEY)
+#define GRE_IS_SEQ(f) ((f) & GRE_SEQ)
+#define GRE_IS_STRICT(f) ((f) & GRE_STRICT)
+#define GRE_IS_REC(f) ((f) & GRE_REC)
+#define GRE_IS_ACK(f) ((f) & GRE_ACK)
+
+#define GRE_VERSION_0 __cpu_to_be16(0x0000)
+#define GRE_VERSION_1 __cpu_to_be16(0x0001)
+#define GRE_PROTO_PPP __cpu_to_be16(0x880b)
+#define GRE_PPTP_KEY_MASK __cpu_to_be32(0xffff)
+
struct ip_tunnel_parm {
char name[IFNAMSIZ];
int link;
@@ -60,6 +74,7 @@ enum {
IFLA_IPTUN_ENCAP_FLAGS,
IFLA_IPTUN_ENCAP_SPORT,
IFLA_IPTUN_ENCAP_DPORT,
+ IFLA_IPTUN_COLLECT_METADATA,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index abbd1dc5d683..509cd961068d 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -73,6 +73,7 @@ enum {
INET_DIAG_BC_S_COND,
INET_DIAG_BC_D_COND,
INET_DIAG_BC_DEV_COND, /* u32 ifindex */
+ INET_DIAG_BC_MARK_COND,
};
struct inet_diag_hostcond {
@@ -82,6 +83,11 @@ struct inet_diag_hostcond {
__be32 addr[0];
};
+struct inet_diag_markcond {
+ __u32 mark;
+ __u32 mask;
+};
+
/* Base info structure. It contains socket identity (addrs/ports/cookie)
* and, alas, the information shown by netstat. */
struct inet_diag_msg {
@@ -117,6 +123,8 @@ enum {
INET_DIAG_LOCALS,
INET_DIAG_PEERS,
INET_DIAG_PAD,
+ INET_DIAG_MARK,
+ INET_DIAG_BBRINFO,
__INET_DIAG_MAX,
};
@@ -150,8 +158,20 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+/* INET_DIAG_BBRINFO */
+
+struct tcp_bbr_info {
+ /* u64 bw: max-filtered BW (app throughput) estimate in Byte per sec: */
+ __u32 bbr_bw_lo; /* lower 32 bits of bw */
+ __u32 bbr_bw_hi; /* upper 32 bits of bw */
+ __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
+ __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
+ __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
+};
+
union tcp_cc_info {
struct tcpvegas_info vegas;
struct tcp_dctcp_info dctcp;
+ struct tcp_bbr_info bbr;
};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 395876060f50..8c2772340c3f 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -177,6 +177,7 @@ enum {
DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
DEVCONF_DROP_UNSOLICITED_NA,
DEVCONF_KEEP_ADDR_ON_DOWN,
+ DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 237fac4bc17b..15d8510cdae0 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -48,6 +48,7 @@
#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
#define BMCR_RESET 0x8000 /* Reset to default state */
+#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
/* Basic mode status register. */
#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
new file mode 100644
index 000000000000..8be21e02387d
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -0,0 +1,12 @@
+#ifndef _NETFILTER_NF_LOG_H
+#define _NETFILTER_NF_LOG_H
+
+#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
+#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
+#define NF_LOG_IPOPT 0x04 /* Log IP options */
+#define NF_LOG_UID 0x08 /* Log UID owning local socket */
+#define NF_LOG_NFLOG 0x10 /* Unsupported, don't reuse */
+#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
+#define NF_LOG_MASK 0x2f
+
+#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c674ba2563b7..c6c4477c136b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -546,6 +546,35 @@ enum nft_cmp_attributes {
};
#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1)
+/**
+ * enum nft_range_ops - nf_tables range operator
+ *
+ * @NFT_RANGE_EQ: equal
+ * @NFT_RANGE_NEQ: not equal
+ */
+enum nft_range_ops {
+ NFT_RANGE_EQ,
+ NFT_RANGE_NEQ,
+};
+
+/**
+ * enum nft_range_attributes - nf_tables range expression netlink attributes
+ *
+ * @NFTA_RANGE_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_RANGE_FROM_DATA: data range from (NLA_NESTED: nft_data_attributes)
+ * @NFTA_RANGE_TO_DATA: data range to (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_range_attributes {
+ NFTA_RANGE_UNSPEC,
+ NFTA_RANGE_SREG,
+ NFTA_RANGE_OP,
+ NFTA_RANGE_FROM_DATA,
+ NFTA_RANGE_TO_DATA,
+ __NFTA_RANGE_MAX
+};
+#define NFTA_RANGE_MAX (__NFTA_RANGE_MAX - 1)
+
enum nft_lookup_flags {
NFT_LOOKUP_F_INV = (1 << 0),
};
@@ -575,6 +604,10 @@ enum nft_dynset_ops {
NFT_DYNSET_OP_UPDATE,
};
+enum nft_dynset_flags {
+ NFT_DYNSET_F_INV = (1 << 0),
+};
+
/**
* enum nft_dynset_attributes - dynset expression attributes
*
@@ -585,6 +618,7 @@ enum nft_dynset_ops {
* @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
* @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
* @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_DYNSET_FLAGS: flags (NLA_U32)
*/
enum nft_dynset_attributes {
NFTA_DYNSET_UNSPEC,
@@ -596,6 +630,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_TIMEOUT,
NFTA_DYNSET_EXPR,
NFTA_DYNSET_PAD,
+ NFTA_DYNSET_FLAGS,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -724,6 +759,28 @@ enum nft_meta_keys {
};
/**
+ * enum nft_hash_attributes - nf_tables hash expression netlink attributes
+ *
+ * @NFTA_HASH_SREG: source register (NLA_U32)
+ * @NFTA_HASH_DREG: destination register (NLA_U32)
+ * @NFTA_HASH_LEN: source data length (NLA_U32)
+ * @NFTA_HASH_MODULUS: modulus value (NLA_U32)
+ * @NFTA_HASH_SEED: seed value (NLA_U32)
+ * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
+ */
+enum nft_hash_attributes {
+ NFTA_HASH_UNSPEC,
+ NFTA_HASH_SREG,
+ NFTA_HASH_DREG,
+ NFTA_HASH_LEN,
+ NFTA_HASH_MODULUS,
+ NFTA_HASH_SEED,
+ NFTA_HASH_OFFSET,
+ __NFTA_HASH_MAX,
+};
+#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
+
+/**
* enum nft_meta_attributes - nf_tables meta expression netlink attributes
*
* @NFTA_META_DREG: destination register (NLA_U32)
@@ -866,12 +923,14 @@ enum nft_log_attributes {
* @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16)
* @NFTA_QUEUE_TOTAL: number of queues to load balance packets on (NLA_U16)
* @NFTA_QUEUE_FLAGS: various flags (NLA_U16)
+ * @NFTA_QUEUE_SREG_QNUM: source register of queue number (NLA_U32: nft_registers)
*/
enum nft_queue_attributes {
NFTA_QUEUE_UNSPEC,
NFTA_QUEUE_NUM,
NFTA_QUEUE_TOTAL,
NFTA_QUEUE_FLAGS,
+ NFTA_QUEUE_SREG_QNUM,
__NFTA_QUEUE_MAX
};
#define NFTA_QUEUE_MAX (__NFTA_QUEUE_MAX - 1)
@@ -880,6 +939,25 @@ enum nft_queue_attributes {
#define NFT_QUEUE_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */
#define NFT_QUEUE_FLAG_MASK 0x03
+enum nft_quota_flags {
+ NFT_QUOTA_F_INV = (1 << 0),
+};
+
+/**
+ * enum nft_quota_attributes - nf_tables quota expression netlink attributes
+ *
+ * @NFTA_QUOTA_BYTES: quota in bytes (NLA_U16)
+ * @NFTA_QUOTA_FLAGS: flags (NLA_U32)
+ */
+enum nft_quota_attributes {
+ NFTA_QUOTA_UNSPEC,
+ NFTA_QUOTA_BYTES,
+ NFTA_QUOTA_FLAGS,
+ NFTA_QUOTA_PAD,
+ __NFTA_QUOTA_MAX
+};
+#define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1)
+
/**
* enum nft_reject_types - nf_tables reject expression reject types
*
@@ -1051,7 +1129,7 @@ enum nft_gen_attributes {
* @NFTA_TRACE_NFPROTO: nf protocol processed (NLA_U32)
* @NFTA_TRACE_POLICY: policy that decided fate of packet (NLA_U32)
*/
-enum nft_trace_attibutes {
+enum nft_trace_attributes {
NFTA_TRACE_UNSPEC,
NFTA_TRACE_TABLE,
NFTA_TRACE_CHAIN,
@@ -1082,4 +1160,30 @@ enum nft_trace_types {
__NFT_TRACETYPE_MAX
};
#define NFT_TRACETYPE_MAX (__NFT_TRACETYPE_MAX - 1)
+
+/**
+ * enum nft_ng_attributes - nf_tables number generator expression netlink attributes
+ *
+ * @NFTA_NG_DREG: destination register (NLA_U32)
+ * @NFTA_NG_MODULUS: maximum counter value (NLA_U32)
+ * @NFTA_NG_TYPE: operation type (NLA_U32)
+ * @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32)
+ */
+enum nft_ng_attributes {
+ NFTA_NG_UNSPEC,
+ NFTA_NG_DREG,
+ NFTA_NG_MODULUS,
+ NFTA_NG_TYPE,
+ NFTA_NG_OFFSET,
+ __NFTA_NG_MAX
+};
+#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
+
+enum nft_ng_types {
+ NFT_NG_INCREMENTAL,
+ NFT_NG_RANDOM,
+ __NFT_NG_MAX
+};
+#define NFT_NG_MAX (__NFT_NG_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 9df789709abe..6deb8867c5fc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -231,13 +231,13 @@ enum ctattr_secctx {
enum ctattr_stats_cpu {
CTA_STATS_UNSPEC,
- CTA_STATS_SEARCHED,
+ CTA_STATS_SEARCHED, /* no longer used */
CTA_STATS_FOUND,
- CTA_STATS_NEW,
+ CTA_STATS_NEW, /* no longer used */
CTA_STATS_INVALID,
CTA_STATS_IGNORE,
- CTA_STATS_DELETE,
- CTA_STATS_DELETE_LIST,
+ CTA_STATS_DELETE, /* no longer used */
+ CTA_STATS_DELETE_LIST, /* no longer used */
CTA_STATS_INSERT,
CTA_STATS_INSERT_FAILED,
CTA_STATS_DROP,
diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h
index 6db90372f09c..3efc0ca18345 100644
--- a/include/uapi/linux/netfilter/xt_hashlimit.h
+++ b/include/uapi/linux/netfilter/xt_hashlimit.h
@@ -6,6 +6,7 @@
/* timings are in milliseconds. */
#define XT_HASHLIMIT_SCALE 10000
+#define XT_HASHLIMIT_SCALE_v2 1000000llu
/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
* seconds, or one packet every 59 hours.
*/
@@ -63,6 +64,20 @@ struct hashlimit_cfg1 {
__u8 srcmask, dstmask;
};
+struct hashlimit_cfg2 {
+ __u64 avg; /* Average secs between packets * scale */
+ __u64 burst; /* Period multiplier for upper limit. */
+ __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
+
+ /* user specified */
+ __u32 size; /* how many buckets */
+ __u32 max; /* max number of entries */
+ __u32 gc_interval; /* gc interval */
+ __u32 expire; /* when do entries expire? */
+
+ __u8 srcmask, dstmask;
+};
+
struct xt_hashlimit_mtinfo1 {
char name[IFNAMSIZ];
struct hashlimit_cfg1 cfg;
@@ -71,4 +86,12 @@ struct xt_hashlimit_mtinfo1 {
struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
};
+struct xt_hashlimit_mtinfo2 {
+ char name[NAME_MAX];
+ struct hashlimit_cfg2 cfg;
+
+ /* Used internally by the kernel */
+ struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
+};
+
#endif /* _UAPI_XT_HASHLIMIT_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 220694151434..56368e9b4622 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -48,6 +48,7 @@
#define NL80211_MULTICAST_GROUP_REG "regulatory"
#define NL80211_MULTICAST_GROUP_MLME "mlme"
#define NL80211_MULTICAST_GROUP_VENDOR "vendor"
+#define NL80211_MULTICAST_GROUP_NAN "nan"
#define NL80211_MULTICAST_GROUP_TESTMODE "testmode"
/**
@@ -838,6 +839,41 @@
* not running. The driver indicates the status of the scan through
* cfg80211_scan_done().
*
+ * @NL80211_CMD_START_NAN: Start NAN operation, identified by its
+ * %NL80211_ATTR_WDEV interface. This interface must have been previously
+ * created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the
+ * NAN interface will create or join a cluster. This command must have a
+ * valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
+ * %NL80211_ATTR_NAN_DUAL attributes.
+ * After this command NAN functions can be added.
+ * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by
+ * its %NL80211_ATTR_WDEV interface.
+ * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined
+ * with %NL80211_ATTR_NAN_FUNC nested attribute. When called, this
+ * operation returns the strictly positive and unique instance id
+ * (%NL80211_ATTR_NAN_FUNC_INST_ID) and a cookie (%NL80211_ATTR_COOKIE)
+ * of the function upon success.
+ * Since instance ID's can be re-used, this cookie is the right
+ * way to identify the function. This will avoid races when a termination
+ * event is handled by the user space after it has already added a new
+ * function that got the same instance id from the kernel as the one
+ * which just terminated.
+ * This cookie may be used in NAN events even before the command
+ * returns, so userspace shouldn't process NAN events until it processes
+ * the response to this command.
+ * Look at %NL80211_ATTR_SOCKET_OWNER as well.
+ * @NL80211_CMD_DEL_NAN_FUNCTION: Delete a NAN function by cookie.
+ * This command is also used as a notification sent when a NAN function is
+ * terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID
+ * and %NL80211_ATTR_COOKIE attributes.
+ * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN
+ * must be operational (%NL80211_CMD_START_NAN was executed).
+ * It must contain at least one of the following attributes:
+ * %NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL.
+ * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported.
+ * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
+ * %NL80211_ATTR_COOKIE.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1026,6 +1062,13 @@ enum nl80211_commands {
NL80211_CMD_ABORT_SCAN,
+ NL80211_CMD_START_NAN,
+ NL80211_CMD_STOP_NAN,
+ NL80211_CMD_ADD_NAN_FUNCTION,
+ NL80211_CMD_DEL_NAN_FUNCTION,
+ NL80211_CMD_CHANGE_NAN_CONFIG,
+ NL80211_CMD_NAN_MATCH,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1343,7 +1386,13 @@ enum nl80211_commands {
* enum nl80211_band value is used as the index (nla_type() of the nested
* data. If a band is not included, it will be configured to allow all
* rates based on negotiated supported rates information. This attribute
- * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
+ * is used with %NL80211_CMD_SET_TX_BITRATE_MASK and with starting AP,
+ * and joining mesh networks (not IBSS yet). In the later case, it must
+ * specify just a single bitrate, which is to be used for the beacon.
+ * The driver must also specify support for this with the extended
+ * features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ * NL80211_EXT_FEATURE_BEACON_RATE_HT and
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -1733,6 +1782,12 @@ enum nl80211_commands {
* regulatory indoor configuration would be owned by the netlink socket
* that configured the indoor setting, and the indoor operation would be
* cleared when the socket is closed.
+ * If set during NAN interface creation, the interface will be destroyed
+ * if the socket is closed just like any other interface. Moreover, only
+ * the netlink socket that created the interface will be allowed to add
+ * and remove functions. NAN notifications will be sent in unicast to that
+ * socket. Without this attribute, any socket can add functions and the
+ * notifications will be sent to the %NL80211_MCGRP_NAN multicast group.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1867,6 +1922,21 @@ enum nl80211_commands {
* @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is
* used to pull the stored data for mesh peer in power save state.
*
+ * @NL80211_ATTR_NAN_MASTER_PREF: the master preference to be used by
+ * %NL80211_CMD_START_NAN and optionally with
+ * %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0.
+ * Also, values 1 and 255 are reserved for certification purposes and
+ * should not be used during a normal device operation.
+ * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see
+ * &enum nl80211_nan_dual_band_conf). This attribute is used with
+ * %NL80211_CMD_START_NAN and optionally with
+ * %NL80211_CMD_CHANGE_NAN_CONFIG.
+ * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See
+ * &enum nl80211_nan_func_attributes for description of this nested
+ * attribute.
+ * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
+ * See &enum nl80211_nan_match_attributes.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2261,6 +2331,11 @@ enum nl80211_attrs {
NL80211_ATTR_MESH_PEER_AID,
+ NL80211_ATTR_NAN_MASTER_PREF,
+ NL80211_ATTR_NAN_DUAL,
+ NL80211_ATTR_NAN_FUNC,
+ NL80211_ATTR_NAN_MATCH,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2339,6 +2414,7 @@ enum nl80211_attrs {
* commands to create and destroy one
* @NL80211_IF_TYPE_OCB: Outside Context of a BSS
* This mode corresponds to the MIB variable dot11OCBActivated=true
+ * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
* @NUM_NL80211_IFTYPES: number of defined interface types
*
@@ -2359,6 +2435,7 @@ enum nl80211_iftype {
NL80211_IFTYPE_P2P_GO,
NL80211_IFTYPE_P2P_DEVICE,
NL80211_IFTYPE_OCB,
+ NL80211_IFTYPE_NAN,
/* keep last */
NUM_NL80211_IFTYPES,
@@ -4551,6 +4628,12 @@ enum nl80211_feature_flags {
* (if available).
* @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
* channel dwell time.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
+ * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
+ * configuration (AP/mesh) with HT rates.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
+ * configuration (AP/mesh) with VHT rates.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4562,6 +4645,9 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SCAN_START_TIME,
NL80211_EXT_FEATURE_BSS_PARENT_TSF,
NL80211_EXT_FEATURE_SET_SCAN_DWELL,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -4855,4 +4941,186 @@ enum nl80211_bss_select_attr {
NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
};
+/**
+ * enum nl80211_nan_dual_band_conf - NAN dual band configuration
+ *
+ * Defines the NAN dual band mode of operation
+ *
+ * @NL80211_NAN_BAND_DEFAULT: device default mode
+ * @NL80211_NAN_BAND_2GHZ: 2.4GHz mode
+ * @NL80211_NAN_BAND_5GHZ: 5GHz mode
+ */
+enum nl80211_nan_dual_band_conf {
+ NL80211_NAN_BAND_DEFAULT = 1 << 0,
+ NL80211_NAN_BAND_2GHZ = 1 << 1,
+ NL80211_NAN_BAND_5GHZ = 1 << 2,
+};
+
+/**
+ * enum nl80211_nan_function_type - NAN function type
+ *
+ * Defines the function type of a NAN function
+ *
+ * @NL80211_NAN_FUNC_PUBLISH: function is publish
+ * @NL80211_NAN_FUNC_SUBSCRIBE: function is subscribe
+ * @NL80211_NAN_FUNC_FOLLOW_UP: function is follow-up
+ */
+enum nl80211_nan_function_type {
+ NL80211_NAN_FUNC_PUBLISH,
+ NL80211_NAN_FUNC_SUBSCRIBE,
+ NL80211_NAN_FUNC_FOLLOW_UP,
+
+ /* keep last */
+ __NL80211_NAN_FUNC_TYPE_AFTER_LAST,
+ NL80211_NAN_FUNC_MAX_TYPE = __NL80211_NAN_FUNC_TYPE_AFTER_LAST - 1,
+};
+
+/**
+ * enum nl80211_nan_publish_type - NAN publish tx type
+ *
+ * Defines how to send publish Service Discovery Frames
+ *
+ * @NL80211_NAN_SOLICITED_PUBLISH: publish function is solicited
+ * @NL80211_NAN_UNSOLICITED_PUBLISH: publish function is unsolicited
+ */
+enum nl80211_nan_publish_type {
+ NL80211_NAN_SOLICITED_PUBLISH = 1 << 0,
+ NL80211_NAN_UNSOLICITED_PUBLISH = 1 << 1,
+};
+
+/**
+ * enum nl80211_nan_func_term_reason - NAN functions termination reason
+ *
+ * Defines termination reasons of a NAN function
+ *
+ * @NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST: requested by user
+ * @NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED: timeout
+ * @NL80211_NAN_FUNC_TERM_REASON_ERROR: errored
+ */
+enum nl80211_nan_func_term_reason {
+ NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST,
+ NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED,
+ NL80211_NAN_FUNC_TERM_REASON_ERROR,
+};
+
+#define NL80211_NAN_FUNC_SERVICE_ID_LEN 6
+#define NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN 0xff
+#define NL80211_NAN_FUNC_SRF_MAX_LEN 0xff
+
+/**
+ * enum nl80211_nan_func_attributes - NAN function attributes
+ * @__NL80211_NAN_FUNC_INVALID: invalid
+ * @NL80211_NAN_FUNC_TYPE: &enum nl80211_nan_function_type (u8).
+ * @NL80211_NAN_FUNC_SERVICE_ID: 6 bytes of the service ID hash as
+ * specified in NAN spec. This is a binary attribute.
+ * @NL80211_NAN_FUNC_PUBLISH_TYPE: relevant if the function's type is
+ * publish. Defines the transmission type for the publish Service Discovery
+ * Frame, see &enum nl80211_nan_publish_type. Its type is u8.
+ * @NL80211_NAN_FUNC_PUBLISH_BCAST: relevant if the function is a solicited
+ * publish. Should the solicited publish Service Discovery Frame be sent to
+ * the NAN Broadcast address. This is a flag.
+ * @NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE: relevant if the function's type is
+ * subscribe. Is the subscribe active. This is a flag.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_ID: relevant if the function's type is follow up.
+ * The instance ID for the follow up Service Discovery Frame. This is u8.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type
+ * is follow up. This is a u8.
+ * The requestor instance ID for the follow up Service Discovery Frame.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the
+ * follow up Service Discovery Frame. This is a binary attribute.
+ * @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a
+ * close range. The range itself (RSSI) is defined by the device.
+ * This is a flag.
+ * @NL80211_NAN_FUNC_TTL: strictly positive number of DWs this function should
+ * stay active. If not present infinite TTL is assumed. This is a u32.
+ * @NL80211_NAN_FUNC_SERVICE_INFO: array of bytes describing the service
+ * specific info. This is a binary attribute.
+ * @NL80211_NAN_FUNC_SRF: Service Receive Filter. This is a nested attribute.
+ * See &enum nl80211_nan_srf_attributes.
+ * @NL80211_NAN_FUNC_RX_MATCH_FILTER: Receive Matching filter. This is a nested
+ * attribute. It is a list of binary values.
+ * @NL80211_NAN_FUNC_TX_MATCH_FILTER: Transmit Matching filter. This is a
+ * nested attribute. It is a list of binary values.
+ * @NL80211_NAN_FUNC_INSTANCE_ID: The instance ID of the function.
+ * Its type is u8 and it cannot be 0.
+ * @NL80211_NAN_FUNC_TERM_REASON: NAN function termination reason.
+ * See &enum nl80211_nan_func_term_reason.
+ *
+ * @NUM_NL80211_NAN_FUNC_ATTR: internal
+ * @NL80211_NAN_FUNC_ATTR_MAX: highest NAN function attribute
+ */
+enum nl80211_nan_func_attributes {
+ __NL80211_NAN_FUNC_INVALID,
+ NL80211_NAN_FUNC_TYPE,
+ NL80211_NAN_FUNC_SERVICE_ID,
+ NL80211_NAN_FUNC_PUBLISH_TYPE,
+ NL80211_NAN_FUNC_PUBLISH_BCAST,
+ NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE,
+ NL80211_NAN_FUNC_FOLLOW_UP_ID,
+ NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID,
+ NL80211_NAN_FUNC_FOLLOW_UP_DEST,
+ NL80211_NAN_FUNC_CLOSE_RANGE,
+ NL80211_NAN_FUNC_TTL,
+ NL80211_NAN_FUNC_SERVICE_INFO,
+ NL80211_NAN_FUNC_SRF,
+ NL80211_NAN_FUNC_RX_MATCH_FILTER,
+ NL80211_NAN_FUNC_TX_MATCH_FILTER,
+ NL80211_NAN_FUNC_INSTANCE_ID,
+ NL80211_NAN_FUNC_TERM_REASON,
+
+ /* keep last */
+ NUM_NL80211_NAN_FUNC_ATTR,
+ NL80211_NAN_FUNC_ATTR_MAX = NUM_NL80211_NAN_FUNC_ATTR - 1
+};
+
+/**
+ * enum nl80211_nan_srf_attributes - NAN Service Response filter attributes
+ * @__NL80211_NAN_SRF_INVALID: invalid
+ * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set.
+ * This is a flag.
+ * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if
+ * &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
+ * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if
+ * &NL80211_NAN_SRF_BF is present. This is a u8.
+ * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if
+ * and only if &NL80211_NAN_SRF_BF isn't present. This is a nested
+ * attribute. Each nested attribute is a MAC address.
+ * @NUM_NL80211_NAN_SRF_ATTR: internal
+ * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute
+ */
+enum nl80211_nan_srf_attributes {
+ __NL80211_NAN_SRF_INVALID,
+ NL80211_NAN_SRF_INCLUDE,
+ NL80211_NAN_SRF_BF,
+ NL80211_NAN_SRF_BF_IDX,
+ NL80211_NAN_SRF_MAC_ADDRS,
+
+ /* keep last */
+ NUM_NL80211_NAN_SRF_ATTR,
+ NL80211_NAN_SRF_ATTR_MAX = NUM_NL80211_NAN_SRF_ATTR - 1,
+};
+
+/**
+ * enum nl80211_nan_match_attributes - NAN match attributes
+ * @__NL80211_NAN_MATCH_INVALID: invalid
+ * @NL80211_NAN_MATCH_FUNC_LOCAL: the local function that had the
+ * match. This is a nested attribute.
+ * See &enum nl80211_nan_func_attributes.
+ * @NL80211_NAN_MATCH_FUNC_PEER: the peer function
+ * that caused the match. This is a nested attribute.
+ * See &enum nl80211_nan_func_attributes.
+ *
+ * @NUM_NL80211_NAN_MATCH_ATTR: internal
+ * @NL80211_NAN_MATCH_ATTR_MAX: highest NAN match attribute
+ */
+enum nl80211_nan_match_attributes {
+ __NL80211_NAN_MATCH_INVALID,
+ NL80211_NAN_MATCH_FUNC_LOCAL,
+ NL80211_NAN_MATCH_FUNC_PEER,
+
+ /* keep last */
+ NUM_NL80211_NAN_MATCH_ATTR,
+ NL80211_NAN_MATCH_ATTR_MAX = NUM_NL80211_NAN_MATCH_ATTR - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 54c3b4f4aceb..59ed3992c760 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -605,13 +605,13 @@ struct ovs_action_push_mpls {
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
* (but it will not be set in the 802.1Q header that is pushed).
*
- * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID
- * values are those that the kernel module also parses as 802.1Q headers, to
- * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
- * from having surprising results.
+ * The @vlan_tpid value is typically %ETH_P_8021Q or %ETH_P_8021AD.
+ * The only acceptable TPID values are those that the kernel module also parses
+ * as 802.1Q or 802.1AD headers, to prevent %OVS_ACTION_ATTR_PUSH_VLAN followed
+ * by %OVS_ACTION_ATTR_POP_VLAN from having surprising results.
*/
struct ovs_action_push_vlan {
- __be16 vlan_tpid; /* 802.1Q TPID. */
+ __be16 vlan_tpid; /* 802.1Q or 802.1ad TPID. */
__be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
};
@@ -721,9 +721,10 @@ enum ovs_nat_attr {
* is copied from the value to the packet header field, rest of the bits are
* left unchanged. The non-masked value bits must be passed in as zeroes.
* Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
- * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
- * packet.
- * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q or 802.1ad header
+ * onto the packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q or 802.1ad header
+ * from the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
* @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index d1c1ccaba787..8fd715f806a2 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -396,6 +396,7 @@ enum {
TCA_BPF_FD,
TCA_BPF_NAME,
TCA_BPF_FLAGS,
+ TCA_BPF_FLAGS_GEN,
__TCA_BPF_MAX,
};
@@ -428,6 +429,24 @@ enum {
TCA_FLOWER_KEY_UDP_DST, /* be16 */
TCA_FLOWER_FLAGS,
+ TCA_FLOWER_KEY_VLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
+
+ TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 2382eed50278..df7451d35131 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -792,6 +792,8 @@ enum {
TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
__TCA_FQ_MAX
};
@@ -809,7 +811,7 @@ struct tc_fq_qd_stats {
__u32 flows;
__u32 inactive_flows;
__u32 throttled_flows;
- __u32 pad;
+ __u32 unthrottle_latency_ns;
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 25a9ad8bcef1..e7a31f830690 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -235,6 +235,7 @@ enum
LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */
LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */
LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */
+ LINUX_MIB_TCPMD5FAILURE, /* TCPMD5Failure */
LINUX_MIB_SACKSHIFTED,
LINUX_MIB_SACKMERGED,
LINUX_MIB_SACKSHIFTFALLBACK,
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index 4ece02a77b9a..cd18360eca24 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -32,8 +32,9 @@ enum {
#define IFE_META_HASHID 2
#define IFE_META_PRIO 3
#define IFE_META_QMAP 4
+#define IFE_META_TCINDEX 5
/*Can be overridden at runtime by module option*/
-#define __IFE_META_MAX 5
+#define __IFE_META_MAX 6
#define IFE_META_MAX (__IFE_META_MAX - 1)
#endif
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..10fc07da6c69
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __LINUX_TC_SKBMOD_H
+#define __LINUX_TC_SKBMOD_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_SKBMOD 15
+
+#define SKBMOD_F_DMAC 0x1
+#define SKBMOD_F_SMAC 0x2
+#define SKBMOD_F_ETYPE 0x4
+#define SKBMOD_F_SWAPMAC 0x8
+
+struct tc_skbmod {
+ tc_gen;
+ __u64 flags;
+};
+
+enum {
+ TCA_SKBMOD_UNSPEC,
+ TCA_SKBMOD_TM,
+ TCA_SKBMOD_PARMS,
+ TCA_SKBMOD_DMAC,
+ TCA_SKBMOD_SMAC,
+ TCA_SKBMOD_ETYPE,
+ TCA_SKBMOD_PAD,
+ __TCA_SKBMOD_MAX
+};
+#define TCA_SKBMOD_MAX (__TCA_SKBMOD_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..890106ff16e6
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_TUNNEL_KEY_H
+#define __LINUX_TC_TUNNEL_KEY_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_TUNNEL_KEY 17
+
+#define TCA_TUNNEL_KEY_ACT_SET 1
+#define TCA_TUNNEL_KEY_ACT_RELEASE 2
+
+struct tc_tunnel_key {
+ tc_gen;
+ int t_action;
+};
+
+enum {
+ TCA_TUNNEL_KEY_UNSPEC,
+ TCA_TUNNEL_KEY_TM,
+ TCA_TUNNEL_KEY_PARMS,
+ TCA_TUNNEL_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
+ TCA_TUNNEL_KEY_PAD,
+ __TCA_TUNNEL_KEY_MAX,
+};
+
+#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 31151ff6264f..bddb272b843f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -16,6 +16,7 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
struct tc_vlan {
tc_gen;
@@ -29,6 +30,7 @@ enum {
TCA_VLAN_PUSH_VLAN_ID,
TCA_VLAN_PUSH_VLAN_PROTOCOL,
TCA_VLAN_PAD,
+ TCA_VLAN_PUSH_VLAN_PRIORITY,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 482898fc433a..73ac0db487f8 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -167,6 +167,7 @@ struct tcp_info {
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+ __u8 tcpi_delivery_rate_app_limited:1;
__u32 tcpi_rto;
__u32 tcpi_ato;
@@ -211,6 +212,8 @@ struct tcp_info {
__u32 tcpi_min_rtt;
__u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
__u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
+
+ __u64 tcpi_delivery_rate;
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 5f3f6d09fb79..f9edd20fe9ba 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -59,6 +59,9 @@ enum {
TIPC_NL_MON_SET,
TIPC_NL_MON_GET,
TIPC_NL_MON_PEER_GET,
+ TIPC_NL_PEER_REMOVE,
+ TIPC_NL_BEARER_ADD,
+ TIPC_NL_UDP_GET_REMOTEIP,
__TIPC_NL_CMD_MAX,
TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -98,6 +101,7 @@ enum {
TIPC_NLA_UDP_UNSPEC,
TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */
TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */
+ TIPC_NLA_UDP_MULTI_REMOTEIP, /* flag */
__TIPC_NLA_UDP_MAX,
TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 143338978b48..1fc62b239f1b 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -298,7 +298,7 @@ enum xfrm_attr_type_t {
XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
XFRMA_TFCPAD, /* __u32 */
- XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
+ XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_state_esn */
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */