summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 21:15:09 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 21:15:09 +0300
commit3051bf36c25d5153051704291782f8d44e744d36 (patch)
tree72dfc8a1d12675c6f2981d13102df954b678f11b /include/linux
parent1e74a2eb1f5cc7f2f2b5aa9c9eeecbcf352220a3 (diff)
parent005c3490e9db23738d91e02788606c0fe4734723 (diff)
downloadlinux-3051bf36c25d5153051704291782f8d44e744d36.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support TX_RING in AF_PACKET TPACKET_V3 mode, from Sowmini Varadhan. 2) Simplify classifier state on sk_buff in order to shrink it a bit. From Willem de Bruijn. 3) Introduce SIPHASH and it's usage for secure sequence numbers and syncookies. From Jason A. Donenfeld. 4) Reduce CPU usage for ICMP replies we are going to limit or suppress, from Jesper Dangaard Brouer. 5) Introduce Shared Memory Communications socket layer, from Ursula Braun. 6) Add RACK loss detection and allow it to actually trigger fast recovery instead of just assisting after other algorithms have triggered it. From Yuchung Cheng. 7) Add xmit_more and BQL support to mvneta driver, from Simon Guinot. 8) skb_cow_data avoidance in esp4 and esp6, from Steffen Klassert. 9) Export MPLS packet stats via netlink, from Robert Shearman. 10) Significantly improve inet port bind conflict handling, especially when an application is restarted and changes it's setting of reuseport. From Josef Bacik. 11) Implement TX batching in vhost_net, from Jason Wang. 12) Extend the dummy device so that VF (virtual function) features, such as configuration, can be more easily tested. From Phil Sutter. 13) Avoid two atomic ops per page on x86 in bnx2x driver, from Eric Dumazet. 14) Add new bpf MAP, implementing a longest prefix match trie. From Daniel Mack. 15) Packet sample offloading support in mlxsw driver, from Yotam Gigi. 16) Add new aquantia driver, from David VomLehn. 17) Add bpf tracepoints, from Daniel Borkmann. 18) Add support for port mirroring to b53 and bcm_sf2 drivers, from Florian Fainelli. 19) Remove custom busy polling in many drivers, it is done in the core networking since 4.5 times. From Eric Dumazet. 20) Support XDP adjust_head in virtio_net, from John Fastabend. 21) Fix several major holes in neighbour entry confirmation, from Julian Anastasov. 22) Add XDP support to bnxt_en driver, from Michael Chan. 23) VXLAN offloads for enic driver, from Govindarajulu Varadarajan. 24) Add IPVTAP driver (IP-VLAN based tap driver) from Sainath Grandhi. 25) Support GRO in IPSEC protocols, from Steffen Klassert" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1764 commits) Revert "ath10k: Search SMBIOS for OEM board file extension" net: socket: fix recvmmsg not returning error from sock_error bnxt_en: use eth_hw_addr_random() bpf: fix unlocking of jited image when module ronx not set arch: add ARCH_HAS_SET_MEMORY config net: napi_watchdog() can use napi_schedule_irqoff() tcp: Revert "tcp: tcp_probe: use spin_lock_bh()" net/hsr: use eth_hw_addr_random() net: mvpp2: enable building on 64-bit platforms net: mvpp2: switch to build_skb() in the RX path net: mvpp2: simplify MVPP2_PRS_RI_* definitions net: mvpp2: fix indentation of MVPP2_EXT_GLOBAL_CTRL_DEFAULT net: mvpp2: remove unused register definitions net: mvpp2: simplify mvpp2_bm_bufs_add() net: mvpp2: drop useless fields in mvpp2_bm_pool and related code net: mvpp2: remove unused 'tx_skb' field of 'struct mvpp2_tx_queue' net: mvpp2: release reference to txq_cpu[] entry after unmapping net: mvpp2: handle too large value in mvpp2_rx_time_coal_set() net: mvpp2: handle too large value handling in mvpp2_rx_pkts_coal_set() net: mvpp2: remove useless arguments in mvpp2_rx_{pkts, time}_coal_set ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitfield.h13
-rw-r--r--include/linux/bpf.h23
-rw-r--r--include/linux/bpf_trace.h7
-rw-r--r--include/linux/brcmphy.h19
-rw-r--r--include/linux/can/dev.h8
-rw-r--r--include/linux/can/rx-offload.h59
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/etherdevice.h65
-rw-r--r--include/linux/filter.h125
-rw-r--r--include/linux/ieee80211.h51
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/if_frad.h2
-rw-r--r--include/linux/if_macvlan.h17
-rw-r--r--include/linux/if_tap.h75
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/list.h13
-rw-r--r--include/linux/marvell_phy.h7
-rw-r--r--include/linux/mdio.h26
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/cq.h5
-rw-r--r--include/linux/mlx5/device.h111
-rw-r--r--include/linux/mlx5/doorbell.h6
-rw-r--r--include/linux/mlx5/driver.h198
-rw-r--r--include/linux/mlx5/mlx5_ifc.h262
-rw-r--r--include/linux/mlx5/qp.h92
-rw-r--r--include/linux/mlx5/vport.h1
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/mroute.h59
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h75
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/x_tables.h9
-rw-r--r--include/linux/parman.h76
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/phy.h39
-rw-r--r--include/linux/ptr_ring.h36
-rw-r--r--include/linux/qed/common_hsi.h43
-rw-r--r--include/linux/qed/eth_common.h32
-rw-r--r--include/linux/qed/fcoe_common.h715
-rw-r--r--include/linux/qed/iscsi_common.h32
-rw-r--r--include/linux/qed/qed_chain.h34
-rw-r--r--include/linux/qed/qed_eth_if.h56
-rw-r--r--include/linux/qed/qed_fcoe_if.h145
-rw-r--r--include/linux/qed/qed_if.h76
-rw-r--r--include/linux/qed/qed_iov_if.h34
-rw-r--r--include/linux/qed/qed_iscsi_if.h32
-rw-r--r--include/linux/qed/qed_ll2_if.h31
-rw-r--r--include/linux/qed/qed_roce_if.h2
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/qed/rdma_common.h32
-rw-r--r--include/linux/qed/roce_common.h32
-rw-r--r--include/linux/qed/storage_common.h32
-rw-r--r--include/linux/qed/tcp_common.h32
-rw-r--r--include/linux/rfkill-regulator.h48
-rw-r--r--include/linux/rhashtable.h78
-rw-r--r--include/linux/sctp.h143
-rw-r--r--include/linux/siphash.h140
-rw-r--r--include/linux/skbuff.h64
-rw-r--r--include/linux/soc/qcom/smem_state.h2
-rw-r--r--include/linux/soc/ti/knav_dma.h2
-rw-r--r--include/linux/socket.h13
-rw-r--r--include/linux/stmmac.h8
-rw-r--r--include/linux/tcp.h20
-rw-r--r--include/linux/trace_events.h3
-rw-r--r--include/linux/uuid.h24
-rw-r--r--include/linux/virtio.h4
68 files changed, 2946 insertions, 475 deletions
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index f6505d83069d..8b9d6fff002d 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -63,6 +63,19 @@
})
/**
+ * FIELD_FIT() - check if value fits in the field
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to test against the field
+ *
+ * Return: true if @_val can fit inside @_mask, false if @_val is too big.
+ */
+#define FIELD_FIT(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
+ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
+ })
+
+/**
* FIELD_PREP() - prepare a bitfield element
* @_mask: shifted mask defining the field's length and position
* @_val: value to put in the field
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3ed1f3b1d594..909fc033173a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -8,10 +8,12 @@
#define _LINUX_BPF_H 1
#include <uapi/linux/bpf.h>
+
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
#include <linux/err.h>
+#include <linux/rbtree_latch.h>
struct perf_event;
struct bpf_map;
@@ -69,14 +71,14 @@ enum bpf_arg_type {
/* the following constraints used to prototype bpf_memcmp() and other
* functions that access data on eBPF program stack
*/
- ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
- ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not
- * need to be initialized, helper function must fill
- * all bytes or clear them in error case.
+ ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
+ ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
+ * helper function must fill all bytes or clear
+ * them in error case.
*/
- ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
- ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
+ ARG_CONST_SIZE, /* number of bytes accessed from memory */
+ ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
@@ -161,9 +163,10 @@ struct bpf_verifier_ops {
enum bpf_reg_type *reg_type);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
- u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
- struct bpf_insn *insn, struct bpf_prog *prog);
+ u32 (*convert_ctx_access)(enum bpf_access_type type,
+ const struct bpf_insn *src,
+ struct bpf_insn *dst,
+ struct bpf_prog *prog);
};
struct bpf_prog_type_list {
@@ -176,6 +179,8 @@ struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ struct latch_tree_node ksym_tnode;
+ struct list_head ksym_lnode;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
new file mode 100644
index 000000000000..b22efbdd2eb4
--- /dev/null
+++ b/include/linux/bpf_trace.h
@@ -0,0 +1,7 @@
+#ifndef __LINUX_BPF_TRACE_H__
+#define __LINUX_BPF_TRACE_H__
+
+#include <trace/events/bpf.h>
+#include <trace/events/xdp.h>
+
+#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 4f7d8be9ddbf..55e517130311 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -17,6 +17,7 @@
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
+#define PHY_ID_BCM54210E 0x600d84a0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM54612E 0x03625e60
@@ -24,6 +25,7 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7278 0xae0251a0
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7346 0x600d8650
@@ -31,6 +33,7 @@
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
+#define PHY_ID_BCM74371 0xae0252e0
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
@@ -103,19 +106,17 @@
/*
* AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
*/
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
-#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
-#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
-#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
/*
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5f5270941ba0..141b05aade81 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -38,6 +38,13 @@ struct can_priv {
struct can_bittiming bittiming, data_bittiming;
const struct can_bittiming_const *bittiming_const,
*data_bittiming_const;
+ const u16 *termination_const;
+ unsigned int termination_const_cnt;
+ u16 termination;
+ const u32 *bitrate_const;
+ unsigned int bitrate_const_cnt;
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
struct can_clock clock;
enum can_state state;
@@ -53,6 +60,7 @@ struct can_priv {
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
+ int (*do_set_termination)(struct net_device *dev, u16 term);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
new file mode 100644
index 000000000000..cb31683bbe15
--- /dev/null
+++ b/include/linux/can/rx-offload.h
@@ -0,0 +1,59 @@
+/*
+ * linux/can/rx-offload.h
+ *
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_RX_OFFLOAD_H
+#define _CAN_RX_OFFLOAD_H
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+
+struct can_rx_offload {
+ struct net_device *dev;
+
+ unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf,
+ u32 *timestamp, unsigned int mb);
+
+ struct sk_buff_head skb_queue;
+ u32 skb_queue_len_max;
+
+ unsigned int mb_first;
+ unsigned int mb_last;
+
+ struct napi_struct napi;
+
+ bool inc;
+};
+
+int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload);
+int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
+int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
+int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+void can_rx_offload_reset(struct can_rx_offload *offload);
+void can_rx_offload_del(struct can_rx_offload *offload);
+void can_rx_offload_enable(struct can_rx_offload *offload);
+
+static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
+{
+ napi_schedule(&offload->napi);
+}
+
+static inline void can_rx_offload_disable(struct can_rx_offload *offload)
+{
+ napi_disable(&offload->napi);
+}
+
+#endif /* !_CAN_RX_OFFLOAD_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 491b4c0ca633..bd684fc8ec1d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
*
* @suspend: Called when a device on this bus wants to go to sleep mode.
* @resume: Called to bring a device on this bus out of sleep mode.
+ * @num_vf: Called to find out how many virtual functions a device on this
+ * bus supports.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -127,6 +129,8 @@ struct bus_type {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*num_vf)(struct device *dev);
+
const struct dev_pm_ops *pm;
const struct iommu_ops *iommu_ops;
@@ -1140,6 +1144,13 @@ extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+static inline int dev_num_vf(struct device *dev)
+{
+ if (dev->bus && dev->bus->num_vf)
+ return dev->bus->num_vf(dev);
+ return 0;
+}
+
/*
* Root device objects for grouping under /sys/devices
*/
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 6fec9e81bd70..c62b709b1ce0 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+ unsigned int txqs,
+ unsigned int rxqs);
+#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
+
struct sk_buff **eth_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
@@ -397,6 +402,66 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
}
/**
+ * ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return a u64 value of the address
+ */
+static inline u64 ether_addr_to_u64(const u8 *addr)
+{
+ u64 u = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ u = u << 8 | addr[i];
+
+ return u;
+}
+
+/**
+ * u64_to_ether_addr - Convert a u64 to an Ethernet address.
+ * @u: u64 to convert to an Ethernet MAC address
+ * @addr: Pointer to a six-byte array to contain the Ethernet address
+ */
+static inline void u64_to_ether_addr(u64 u, u8 *addr)
+{
+ int i;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--) {
+ addr[i] = u & 0xff;
+ u = u >> 8;
+ }
+}
+
+/**
+ * eth_addr_dec - Decrement the given MAC address
+ *
+ * @addr: Pointer to a six-byte array containing Ethernet address to decrement
+ */
+static inline void eth_addr_dec(u8 *addr)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u--;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
+ * ether_addr_greater - Compare two Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true addr1 is greater than addr2
+ */
+static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
+{
+ u64 u1 = ether_addr_to_u64(addr1);
+ u64 u2 = ether_addr_to_u64(addr2);
+
+ return u1 > u2;
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c2d282764d5d..0c167fdee5f7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -54,6 +54,12 @@ struct bpf_prog_aux;
#define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+/* As per nm, we expose JITed images as text (code) section for
+ * kallsyms. That way, tools like perf can find it to match
+ * addresses.
+ */
+#define BPF_SYM_ELF_TYPE 't'
+
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
@@ -545,7 +551,7 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_STRICT_MODULE_RWX
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
set_memory_ro((unsigned long)fp, fp->pages);
@@ -555,6 +561,16 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
set_memory_rw((unsigned long)fp, fp->pages);
}
+
+static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+{
+ set_memory_ro((unsigned long)hdr, hdr->pages);
+}
+
+static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+{
+ set_memory_rw((unsigned long)hdr, hdr->pages);
+}
#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
@@ -563,7 +579,24 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
}
-#endif /* CONFIG_STRICT_MODULE_RWX */
+
+static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+{
+}
+
+static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
+
+static inline struct bpf_binary_header *
+bpf_jit_binary_hdr(const struct bpf_prog *fp)
+{
+ unsigned long real_start = (unsigned long)fp->bpf_func;
+ unsigned long addr = real_start & PAGE_MASK;
+
+ return (void *)addr;
+}
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
@@ -607,6 +640,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
@@ -616,6 +650,7 @@ void bpf_warn_invalid_xdp_action(u32 act);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
+extern int bpf_jit_kallsyms;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -625,7 +660,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
-void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
@@ -651,6 +685,11 @@ static inline bool bpf_jit_is_ebpf(void)
# endif
}
+static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
+{
+ return fp->jited && bpf_jit_is_ebpf();
+}
+
static inline bool bpf_jit_blinding_enabled(void)
{
/* These are the prerequisites, should someone ever have the
@@ -668,15 +707,91 @@ static inline bool bpf_jit_blinding_enabled(void)
return true;
}
-#else
-static inline void bpf_jit_compile(struct bpf_prog *fp)
+
+static inline bool bpf_jit_kallsyms_enabled(void)
{
+ /* There are a couple of corner cases where kallsyms should
+ * not be enabled f.e. on hardening.
+ */
+ if (bpf_jit_harden)
+ return false;
+ if (!bpf_jit_kallsyms)
+ return false;
+ if (bpf_jit_kallsyms == 1)
+ return true;
+
+ return false;
+}
+
+const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym);
+bool is_bpf_text_address(unsigned long addr);
+int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *sym);
+
+static inline const char *
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym)
+{
+ const char *ret = __bpf_address_lookup(addr, size, off, sym);
+
+ if (ret && modname)
+ *modname = NULL;
+ return ret;
+}
+
+void bpf_prog_kallsyms_add(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del(struct bpf_prog *fp);
+
+#else /* CONFIG_BPF_JIT */
+
+static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
+{
+ return false;
}
static inline void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
}
+
+static inline bool bpf_jit_kallsyms_enabled(void)
+{
+ return false;
+}
+
+static inline const char *
+__bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym)
+{
+ return NULL;
+}
+
+static inline bool is_bpf_text_address(unsigned long addr)
+{
+ return false;
+}
+
+static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *sym)
+{
+ return -ERANGE;
+}
+
+static inline const char *
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym)
+{
+ return NULL;
+}
+
+static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
+{
+}
#endif /* CONFIG_BPF_JIT */
#define BPF_ANC BIT(15)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index fe849329511a..0dd9498c694f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* number of user priorities 802.11 uses */
#define IEEE80211_NUM_UPS 8
+/* number of ACs */
+#define IEEE80211_NUM_ACS 4
#define IEEE80211_QOS_CTL_LEN 2
/* 1d tag mask */
@@ -1041,8 +1043,9 @@ struct ieee80211_mgmt {
} u;
} __packed __aligned(2);
-/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
+/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
+#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -2322,31 +2325,33 @@ enum ieee80211_sa_query_action {
};
+#define SUITE(oui, id) (((oui) << 8) | (id))
+
/* cipher suite selectors */
-#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00
-#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01
-#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02
-/* reserved: 0x000FAC03 */
-#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
-#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
-#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
-#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
-#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
-#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
-#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
-#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
-#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
-
-#define WLAN_CIPHER_SUITE_SMS4 0x00147201
+#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0)
+#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1)
+#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2)
+/* reserved: SUITE(0x000FAC, 3) */
+#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4)
+#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5)
+#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6)
+#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8)
+#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9)
+#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12)
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13)
+
+#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1)
/* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X 0x000FAC01
-#define WLAN_AKM_SUITE_PSK 0x000FAC02
-#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05
-#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06
-#define WLAN_AKM_SUITE_TDLS 0x000FAC07
-#define WLAN_AKM_SUITE_SAE 0x000FAC08
-#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09
+#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
#define WLAN_MAX_KEY_LEN 32
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c6587c01d951..c5847dc75a93 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,6 +46,8 @@ struct br_ip_list {
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
#define BR_MCAST_FLOOD BIT(11)
+#define BR_MULTICAST_TO_UNICAST BIT(12)
+#define BR_VLAN_TUNNEL BIT(13)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 4316aa173dde..46df7e565d6f 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -66,8 +66,6 @@ struct dlci_local
struct frad_local
{
- struct net_device_stats stats;
-
/* devices which this FRAD is slaved to */
struct net_device *master[CONFIG_DLCI_MAX];
short dlci[CONFIG_DLCI_MAX];
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index a4ccc3122f93..c9ec1343d187 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -9,19 +9,6 @@
#include <net/netlink.h>
#include <linux/u64_stats_sync.h>
-#if IS_ENABLED(CONFIG_MACVTAP)
-struct socket *macvtap_get_socket(struct file *);
-#else
-#include <linux/err.h>
-#include <linux/errno.h>
-struct file;
-struct socket;
-static inline struct socket *macvtap_get_socket(struct file *f)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif /* CONFIG_MACVTAP */
-
struct macvlan_port;
struct macvtap_queue;
@@ -29,7 +16,7 @@ struct macvtap_queue;
* Maximum times a macvtap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
*/
-#define MAX_MACVTAP_QUEUES 256
+#define MAX_TAP_QUEUES 256
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
@@ -49,7 +36,7 @@ struct macvlan_dev {
enum macvlan_mode mode;
u16 flags;
/* This array tracks active taps. */
- struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES];
+ struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
/* This list tracks all taps (both enabled and disabled) */
struct list_head queue_list;
int numvtaps;
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
new file mode 100644
index 000000000000..3482c3c2037d
--- /dev/null
+++ b/include/linux/if_tap.h
@@ -0,0 +1,75 @@
+#ifndef _LINUX_IF_TAP_H_
+#define _LINUX_IF_TAP_H_
+
+#if IS_ENABLED(CONFIG_TAP)
+struct socket *tap_get_socket(struct file *);
+#else
+#include <linux/err.h>
+#include <linux/errno.h>
+struct file;
+struct socket;
+static inline struct socket *tap_get_socket(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_TAP */
+
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+#define MAX_TAP_QUEUES 256
+
+struct tap_queue;
+
+struct tap_dev {
+ struct net_device *dev;
+ u16 flags;
+ /* This array tracks active taps. */
+ struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
+ /* This list tracks all taps (both enabled and disabled) */
+ struct list_head queue_list;
+ int numvtaps;
+ int numqueues;
+ netdev_features_t tap_features;
+ int minor;
+
+ void (*update_features)(struct tap_dev *tap, netdev_features_t features);
+ void (*count_tx_dropped)(struct tap_dev *tap);
+ void (*count_rx_dropped)(struct tap_dev *tap);
+};
+
+/*
+ * A tap queue is the central object of tap module, it connects
+ * an open character device to virtual interface. There can be
+ * multiple queues on one interface, which map back to queues
+ * implemented in hardware on the underlying device.
+ *
+ * tap_proto is used to allocate queues through the sock allocation
+ * mechanism.
+ *
+ */
+
+struct tap_queue {
+ struct sock sk;
+ struct socket sock;
+ struct socket_wq wq;
+ int vnet_hdr_sz;
+ struct tap_dev __rcu *tap;
+ struct file *file;
+ unsigned int flags;
+ u16 queue_index;
+ bool enabled;
+ struct list_head next;
+ struct skb_array skb_array;
+};
+
+rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
+void tap_del_queues(struct tap_dev *tap);
+int tap_get_minor(dev_t major, struct tap_dev *tap);
+void tap_free_minor(dev_t major, struct tap_dev *tap);
+int tap_queue_resize(struct tap_dev *tap);
+int tap_create_cdev(struct cdev *tap_cdev,
+ dev_t *tap_major, const char *device_name);
+void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
+
+#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 671d014e6429..71be5b330d21 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -69,6 +69,7 @@ struct ipv6_devconf {
__s32 seg6_require_hmac;
#endif
__u32 enhanced_dad;
+ __u32 addr_gen_mode;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/list.h b/include/linux/list.h
index d1039ecaf94f..ae537fa46216 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -527,6 +527,19 @@ static inline void list_splice_tail_init(struct list_head *list,
pos = list_next_entry(pos, member))
/**
+ * list_for_each_entry_from_reverse - iterate backwards over list of given type
+ * from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from_reverse(pos, head, member) \
+ for (; &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index a57f0dfb6db7..4055cf8cc978 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -17,8 +17,15 @@
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
#define MARVELL_PHY_ID_88E1540 0x01410eb0
+#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+ * not have a model ID. So the switch driver traps reads to the ID2
+ * register and returns the switch family ID
+ */
+#define MARVELL_PHY_ID_88E6390 0x01410f90
+
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index bf9d1d750693..ca08ab16ecdc 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -10,6 +10,7 @@
#define __LINUX_MDIO_H__
#include <uapi/linux/mdio.h>
+#include <linux/mod_devicetable.h>
struct mii_bus;
@@ -29,6 +30,7 @@ struct mdio_device {
const struct dev_pm_ops *pm_ops;
struct mii_bus *bus;
+ char modalias[MDIO_NAME_SIZE];
int (*bus_match)(struct device *dev, struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
@@ -71,6 +73,7 @@ int mdio_device_register(struct mdio_device *mdiodev);
void mdio_device_remove(struct mdio_device *mdiodev);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
+int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
static inline bool mdio_phy_id_is_c45(int phy_id)
{
@@ -130,6 +133,10 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
struct ethtool_cmd *ecmd,
u32 npage_adv, u32 npage_lpa);
+extern void
+mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
+ struct ethtool_link_ksettings *cmd,
+ u32 npage_adv, u32 npage_lpa);
/**
* mdio45_ethtool_gset - get settings for ETHTOOL_GSET
@@ -147,6 +154,23 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
}
+/**
+ * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
+ * @mdio: MDIO interface
+ * @cmd: Ethtool request structure
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. Use
+ * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits
+ * from next pages.
+ */
+static inline void
+mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio,
+ struct ethtool_link_ksettings *cmd)
+{
+ mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0);
+}
+
extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
struct mii_ioctl_data *mii_data, int cmd);
@@ -244,7 +268,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
- * module_mdio_driver() - Helper macro for registering mdio drivers
+ * mdio_module_driver() - Helper macro for registering mdio drivers
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6533c16e27ad..7e66e4f62858 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1374,6 +1374,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
@@ -1539,8 +1540,13 @@ enum mlx4_ptys_proto {
MLX4_PTYS_EN = 1<<2,
};
+enum mlx4_ptys_flags {
+ MLX4_PTYS_AN_DISABLE_CAP = 1 << 5,
+ MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6,
+};
+
struct mlx4_ptys_reg {
- u8 resrvd1;
+ u8 flags;
u8 local_port;
u8 resrvd2;
u8 proto_mask;
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 7c3c0d3aca37..95898847c7d4 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -42,13 +42,13 @@ struct mlx5_core_cq {
int cqe_sz;
__be32 *set_ci_db;
__be32 *arm_db;
+ struct mlx5_uars_page *uar;
atomic_t refcount;
struct completion free;
unsigned vector;
unsigned int irqn;
void (*comp) (struct mlx5_core_cq *);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
- struct mlx5_uar *uar;
u32 cons_index;
unsigned arm_sn;
struct mlx5_rsc_debug *dbg;
@@ -144,7 +144,6 @@ enum {
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
void __iomem *uar_page,
- spinlock_t *doorbell_lock,
u32 cons_index)
{
__be32 doorbell[2];
@@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn);
- mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
+ mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
}
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 52b437431c6a..dd9a263ed368 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -67,10 +67,11 @@
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
+ u32 _v = v; \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
- (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+ (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
@@ -212,10 +213,20 @@ enum {
};
enum {
- MLX5_BF_REGS_PER_PAGE = 4,
- MLX5_MAX_UAR_PAGES = 1 << 8,
- MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
- MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
+ MLX5_ADAPTER_PAGE_SHIFT = 12,
+ MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
+};
+
+enum {
+ MLX5_BFREGS_PER_UAR = 4,
+ MLX5_MAX_UARS = 1 << 8,
+ MLX5_NON_FP_BFREGS_PER_UAR = 2,
+ MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
+ MLX5_NON_FP_BFREGS_PER_UAR,
+ MLX5_MAX_BFREGS = MLX5_MAX_UARS *
+ MLX5_NON_FP_BFREGS_PER_UAR,
+ MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
+ MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
};
enum {
@@ -279,6 +290,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
@@ -389,11 +401,6 @@ enum {
};
enum {
- MLX5_ADAPTER_PAGE_SHIFT = 12,
- MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
-};
-
-enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
@@ -534,7 +541,9 @@ struct mlx5_eqe_page_fault {
__be16 wqe_index;
u16 reserved2;
__be16 packet_length;
- u8 reserved3[12];
+ __be32 token;
+ u8 reserved4[8];
+ __be32 pftype_wq;
} __packed wqe;
struct {
__be32 r_key;
@@ -542,9 +551,9 @@ struct mlx5_eqe_page_fault {
__be16 packet_length;
__be32 rdma_op_len;
__be64 rdma_va;
+ __be32 pftype_token;
} __packed rdma;
} __packed;
- __be32 flags_qpn;
} __packed;
struct mlx5_eqe_vport_change {
@@ -562,6 +571,22 @@ struct mlx5_eqe_port_module {
u8 error_type;
} __packed;
+struct mlx5_eqe_pps {
+ u8 rsvd0[3];
+ u8 pin;
+ u8 rsvd1[4];
+ union {
+ struct {
+ __be32 time_sec;
+ __be32 time_nsec;
+ };
+ struct {
+ __be64 time_stamp;
+ };
+ };
+ u8 rsvd2[12];
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -576,6 +601,7 @@ union ev_data {
struct mlx5_eqe_page_fault page_fault;
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
+ struct mlx5_eqe_pps pps;
} __packed;
struct mlx5_eqe {
@@ -945,38 +971,54 @@ enum mlx5_cap_type {
MLX5_CAP_NUM
};
+enum mlx5_pcam_reg_groups {
+ MLX5_PCAM_REGS_5000_TO_507F = 0x0,
+};
+
+enum mlx5_pcam_feature_groups {
+ MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
+enum mlx5_mcam_reg_groups {
+ MLX5_MCAM_REGS_FIRST_128 = 0x0,
+};
+
+enum mlx5_mcam_feature_groups {
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ETH_MAX(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -998,11 +1040,11 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1024,21 +1066,27 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
- mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+ mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+
+#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
+ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
+
+#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1068,9 +1116,14 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
+enum {
+ MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
+};
+
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index afc78a3f4462..0787de28f2fc 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
{
unsigned long flags;
- spin_lock_irqsave(doorbell_lock, flags);
+ if (doorbell_lock)
+ spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
- spin_unlock_irqrestore(doorbell_lock, flags);
+ if (doorbell_lock)
+ spin_unlock_irqrestore(doorbell_lock, flags);
}
#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 735b36335f29..1bc4641734da 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -42,6 +42,7 @@
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
#include <linux/workqueue.h>
+#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/mlx5/device.h>
@@ -83,6 +84,7 @@ enum {
MLX5_EQ_VEC_PAGES = 0,
MLX5_EQ_VEC_CMD = 1,
MLX5_EQ_VEC_ASYNC = 2,
+ MLX5_EQ_VEC_PFAULT = 3,
MLX5_EQ_VEC_COMP_BASE,
};
@@ -119,10 +121,15 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MPCNT = 0x9051,
+ MLX5_REG_MTPPS = 0x9053,
+ MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MCAM = 0x907f,
};
enum mlx5_dcbx_oper_mode {
@@ -170,6 +177,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_PKEY_CHANGE,
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
+ MLX5_DEV_EVENT_PPS,
};
enum mlx5_port_status {
@@ -177,36 +185,26 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-struct mlx5_uuar_info {
- struct mlx5_uar *uars;
- int num_uars;
- int num_low_latency_uuars;
- unsigned long *bitmap;
+enum mlx5_eq_type {
+ MLX5_EQ_TYPE_COMP,
+ MLX5_EQ_TYPE_ASYNC,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ MLX5_EQ_TYPE_PF,
+#endif
+};
+
+struct mlx5_bfreg_info {
+ u32 *sys_pages;
+ int num_low_latency_bfregs;
unsigned int *count;
- struct mlx5_bf *bfs;
/*
- * protect uuar allocation data structs
+ * protect bfreg allocation data structs
*/
struct mutex lock;
u32 ver;
-};
-
-struct mlx5_bf {
- void __iomem *reg;
- void __iomem *regreg;
- int buf_size;
- struct mlx5_uar *uar;
- unsigned long offset;
- int need_lock;
- /* protect blue flame buffer selection when needed
- */
- spinlock_t lock;
-
- /* serialize 64 bit writes when done as two 32 bit accesses
- */
- spinlock_t lock32;
- int uuarn;
+ bool lib_uar_4k;
+ u32 num_sys_pages;
};
struct mlx5_cmd_first {
@@ -332,6 +330,14 @@ struct mlx5_eq_tasklet {
spinlock_t lock;
};
+struct mlx5_eq_pagefault {
+ struct work_struct work;
+ /* Pagefaults lock */
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ mempool_t *pool;
+};
+
struct mlx5_eq {
struct mlx5_core_dev *dev;
__be32 __iomem *doorbell;
@@ -345,7 +351,13 @@ struct mlx5_eq {
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
- struct mlx5_eq_tasklet tasklet_ctx;
+ enum mlx5_eq_type type;
+ union {
+ struct mlx5_eq_tasklet tasklet_ctx;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct mlx5_eq_pagefault pf_ctx;
+#endif
+ };
};
struct mlx5_core_psv {
@@ -369,13 +381,21 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
+enum {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+};
+
struct mlx5_core_mkey {
u64 iova;
u64 size;
u32 key;
u32 pd;
+ u32 type;
};
+#define MLX5_24BIT_MASK ((1 << 24) - 1)
+
enum mlx5_res_type {
MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
@@ -410,20 +430,47 @@ struct mlx5_eq_table {
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct mlx5_eq pfault_eq;
+#endif
int num_comp_vectors;
/* protect EQs list
*/
spinlock_t lock;
};
-struct mlx5_uar {
- u32 index;
- struct list_head bf_list;
- unsigned free_bf_bmap;
- void __iomem *bf_map;
+struct mlx5_uars_page {
void __iomem *map;
+ bool wc;
+ u32 index;
+ struct list_head list;
+ unsigned int bfregs;
+ unsigned long *reg_bitmap; /* for non fast path bf regs */
+ unsigned long *fp_bitmap;
+ unsigned int reg_avail;
+ unsigned int fp_avail;
+ struct kref ref_count;
+ struct mlx5_core_dev *mdev;
+};
+
+struct mlx5_bfreg_head {
+ /* protect blue flame registers allocations */
+ struct mutex lock;
+ struct list_head list;
+};
+
+struct mlx5_bfreg_data {
+ struct mlx5_bfreg_head reg_head;
+ struct mlx5_bfreg_head wc_head;
};
+struct mlx5_sq_bfreg {
+ void __iomem *map;
+ struct mlx5_uars_page *up;
+ bool wc;
+ u32 index;
+ unsigned int offset;
+};
struct mlx5_core_health {
struct health_buffer __iomem *health;
@@ -496,6 +543,7 @@ struct mlx5_fc_stats {
struct mlx5_eswitch;
struct mlx5_lag;
+struct mlx5_pagefault;
struct mlx5_rl_entry {
u32 rate;
@@ -542,8 +590,6 @@ struct mlx5_priv {
struct mlx5_eq_table eq_table;
struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info;
- struct mlx5_uuar_info uuari;
- MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
/* pages stuff */
struct workqueue_struct *pg_wq;
@@ -600,6 +646,16 @@ struct mlx5_priv {
struct mlx5_rl_table rl_table;
struct mlx5_port_module_event_stats pme_stats;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ void (*pfault)(struct mlx5_core_dev *dev,
+ void *context,
+ struct mlx5_pagefault *pfault);
+ void *pfault_ctx;
+ struct srcu_struct pfault_srcu;
+#endif
+ struct mlx5_bfreg_data bfregs;
+ struct mlx5_uars_page *uar;
};
enum mlx5_device_state {
@@ -618,13 +674,56 @@ enum mlx5_pci_status {
MLX5_PCI_STATUS_ENABLED,
};
+enum mlx5_pagefault_type_flags {
+ MLX5_PFAULT_REQUESTOR = 1 << 0,
+ MLX5_PFAULT_WRITE = 1 << 1,
+ MLX5_PFAULT_RDMA = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u32 token;
+ u8 event_subtype;
+ u8 type;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * Number of resource holding WQE, depends on type.
+ */
+ u32 wq_num;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+
+ struct mlx5_eq *eq;
+ struct work_struct work;
+};
+
struct mlx5_td {
struct list_head tirs_list;
u32 tdn;
};
struct mlx5e_resources {
- struct mlx5_uar cq_uar;
u32 pdn;
struct mlx5_td td;
struct mlx5_core_mkey mkey;
@@ -639,8 +738,12 @@ struct mlx5_core_dev {
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
- u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct {
+ u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
+ u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ } caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
@@ -814,11 +917,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
- bool map_wc);
-void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
@@ -878,15 +976,13 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
-#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name, struct mlx5_uar *uar);
+ int nent, u64 mask, const char *name,
+ enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
@@ -925,12 +1021,19 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
u8 port_num, void *out, size_t sz);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+ u32 wq_num, u8 type, int error);
+#endif
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+ bool map_wc, bool fast_path);
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -958,7 +1061,7 @@ enum {
};
enum {
- MAX_MR_CACHE_ENTRIES = 16,
+ MAX_MR_CACHE_ENTRIES = 21,
};
enum {
@@ -973,6 +1076,9 @@ struct mlx5_interface {
void (*detach)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
+ void (*pfault)(struct mlx5_core_dev *dev,
+ void *context,
+ struct mlx5_pagefault *pfault);
void * (*get_dev)(void *context);
int protocol;
struct list_head list;
@@ -987,6 +1093,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
struct mlx5_profile {
u64 mask;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a852e9db6f0d..afcd4736d8df 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -328,7 +328,7 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 receive[0x1];
u8 write[0x1];
u8 read[0x1];
- u8 reserved_at_4[0x1];
+ u8 atomic[0x1];
u8 srq_receive[0x1];
u8 reserved_at_6[0x1a];
};
@@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 ip_protocol[0x8];
u8 ip_dscp[0x6];
u8 ip_ecn[0x2];
- u8 vlan_tag[0x1];
- u8 reserved_at_91[0x1];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
u8 frag[0x1];
u8 reserved_at_93[0x4];
u8 tcp_flags[0x9];
@@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 inner_second_cfi[0x1];
u8 inner_second_vid[0xc];
- u8 outer_second_vlan_tag[0x1];
- u8 inner_second_vlan_tag[0x1];
- u8 reserved_at_62[0xe];
+ u8 outer_second_cvlan_tag[0x1];
+ u8 inner_second_cvlan_tag[0x1];
+ u8 outer_second_svlan_tag[0x1];
+ u8 inner_second_svlan_tag[0x1];
+ u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
u8 gre_key_h[0x18];
@@ -545,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1];
u8 esw_scheduling[0x1];
- u8 reserved_at_2[0x1e];
+ u8 esw_bw_share[0x1];
+ u8 esw_rate_limit[0x1];
+ u8 reserved_at_4[0x1c];
u8 reserved_at_20[0x20];
@@ -573,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 lro_cap[0x1];
u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1];
- u8 reserved_at_5[0x3];
+ u8 reserved_at_5[0x2];
+ u8 wqe_vlan_insert[0x1];
u8 self_lb_en_modifiable[0x1];
u8 reserved_at_9[0x2];
u8 max_lso_cap[0x5];
@@ -782,11 +787,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
- u8 reserved_at_108[0x1];
+ u8 fixed_buffer_size[0x1];
u8 log_max_mrw_sz[0x7];
u8 reserved_at_110[0x2];
u8 log_max_bsf_list_size[0x6];
- u8 reserved_at_118[0x2];
+ u8 umr_extended_translation_offset[0x1];
+ u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
u8 reserved_at_120[0xa];
@@ -799,10 +805,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
- u8 pad_cap[0x1];
+ u8 end_pad[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_at_163[0xd];
+ u8 start_pad[0x1];
+ u8 cache_line_128byte[0x1];
+ u8 reserved_at_163[0xb];
u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1];
@@ -823,18 +831,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
u8 early_vf_enable[0x1];
- u8 reserved_at_1a9[0x2];
+ u8 mcam_reg[0x1];
+ u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
- u8 reserved_at_1b0[0x1];
+ u8 reserved_at_1b1[0x1];
u8 ports_check[0x1];
- u8 reserved_at_1b2[0x1];
+ u8 reserved_at_1b3[0x1];
u8 disable_link_up[0x1];
u8 beacon_led[0x1];
u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_at_1c0[0x3];
+ u8 reserved_at_1c0[0x1];
+ u8 pps[0x1];
+ u8 pps_modify[0x1];
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
@@ -858,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 compact_address_vector[0x1];
u8 striding_rq[0x1];
- u8 reserved_at_201[0x2];
+ u8 reserved_at_202[0x2];
u8 ipoib_basic_offloads[0x1];
u8 reserved_at_205[0xa];
u8 drain_sigerr[0x1];
@@ -904,7 +915,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_at_240[0xa];
+ u8 uar_4k[0x1];
+ u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
u8 reserved_at_250[0x8];
u8 log_pg_sz[0x8];
@@ -996,7 +1008,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 device_frequency_mhz[0x20];
u8 device_frequency_khz[0x20];
- u8 reserved_at_500[0x80];
+ u8 reserved_at_500[0x20];
+ u8 num_of_uars_per_page[0x20];
+ u8 reserved_at_540[0x40];
u8 reserved_at_580[0x3f];
u8 cqe_compression[0x1];
@@ -1009,10 +1023,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1];
u8 log_tag_matching_list_sz[0x5];
- u8 reserved_at_5e8[0x3];
+ u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_5f0[0x200];
+ u8 reserved_at_600[0x200];
};
enum mlx5_flow_destination_type {
@@ -1375,6 +1389,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
u8 reserved_at_640[0x180];
};
+struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 phy_received_bits_high[0x20];
+
+ u8 phy_received_bits_low[0x20];
+
+ u8 phy_symbol_errors_high[0x20];
+
+ u8 phy_symbol_errors_low[0x20];
+
+ u8 phy_corrected_bits_high[0x20];
+
+ u8 phy_corrected_bits_low[0x20];
+
+ u8 phy_corrected_bits_lane0_high[0x20];
+
+ u8 phy_corrected_bits_lane0_low[0x20];
+
+ u8 phy_corrected_bits_lane1_high[0x20];
+
+ u8 phy_corrected_bits_lane1_low[0x20];
+
+ u8 phy_corrected_bits_lane2_high[0x20];
+
+ u8 phy_corrected_bits_lane2_low[0x20];
+
+ u8 phy_corrected_bits_lane3_high[0x20];
+
+ u8 phy_corrected_bits_lane3_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
+};
+
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 symbol_error_counter[0x10];
@@ -1757,6 +1807,30 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
+struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 rx_errors[0x20];
+
+ u8 tx_errors[0x20];
+
+ u8 l0_to_recovery_eieos[0x20];
+
+ u8 l0_to_recovery_ts[0x20];
+
+ u8 l0_to_recovery_framing[0x20];
+
+ u8 l0_to_recovery_retrain[0x20];
+
+ u8 crc_error_dllp[0x20];
+
+ u8 crc_error_tlp[0x20];
+
+ u8 reserved_at_140[0x680];
+};
+
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
@@ -2495,6 +2569,7 @@ enum {
MLX5_MKC_ACCESS_MODE_PA = 0x0,
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
+ MLX5_MKC_ACCESS_MODE_KSM = 0x3,
};
struct mlx5_ifc_mkc_bits {
@@ -2918,6 +2993,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+ u8 reserved_at_0[0x7c0];
+};
+
+union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
u8 reserved_at_0[0x7c0];
};
@@ -3597,6 +3678,10 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 dump_fill_mkey[0x20];
u8 resd_lkey[0x20];
+
+ u8 null_mkey[0x20];
+
+ u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_query_special_contexts_in_bits {
@@ -4689,12 +4774,11 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 error[0x1];
u8 reserved_at_41[0x4];
- u8 rdma[0x1];
- u8 read_write[0x1];
- u8 req_res[0x1];
- u8 qpn[0x18];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
- u8 reserved_at_60[0x20];
+ u8 reserved_at_60[0x8];
+ u8 token[0x18];
};
struct mlx5_ifc_nop_out_bits {
@@ -7240,6 +7324,18 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
+struct mlx5_ifc_mpcnt_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 pcie_index[0x8];
+ u8 reserved_at_10[0xa];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_at_21[0x1f];
+
+ union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
@@ -7469,6 +7565,63 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
+struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x7e];
+
+ u8 ppcnt_discard_group[0x1];
+ u8 ppcnt_statistical_group[0x1];
+};
+
+struct mlx5_ifc_pcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ union {
+ u8 reserved_at_0[0x80];
+ } port_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features;
+ u8 reserved_at_0[0x80];
+ } feature_cap_mask;
+
+ u8 reserved_at_1c0[0xc0];
+};
+
+struct mlx5_ifc_mcam_enhanced_features_bits {
+ u8 reserved_at_0[0x7f];
+
+ u8 pcie_performance_group[0x1];
+};
+
+struct mlx5_ifc_mcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ union {
+ u8 reserved_at_0[0x80];
+ } mng_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features;
+ u8 reserved_at_0[0x80];
+ } mng_feature_cap_mask;
+
+ u8 reserved_at_1c0[0x80];
+};
+
struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
@@ -7813,6 +7966,60 @@ struct mlx5_ifc_initial_seg_bits {
u8 reserved_at_80a0[0x17fc0];
};
+struct mlx5_ifc_mtpps_reg_bits {
+ u8 reserved_at_0[0xc];
+ u8 cap_number_of_pps_pins[0x4];
+ u8 reserved_at_10[0x4];
+ u8 cap_max_num_of_pps_in_pins[0x4];
+ u8 reserved_at_18[0x4];
+ u8 cap_max_num_of_pps_out_pins[0x4];
+
+ u8 reserved_at_20[0x24];
+ u8 cap_pin_3_mode[0x4];
+ u8 reserved_at_48[0x4];
+ u8 cap_pin_2_mode[0x4];
+ u8 reserved_at_50[0x4];
+ u8 cap_pin_1_mode[0x4];
+ u8 reserved_at_58[0x4];
+ u8 cap_pin_0_mode[0x4];
+
+ u8 reserved_at_60[0x4];
+ u8 cap_pin_7_mode[0x4];
+ u8 reserved_at_68[0x4];
+ u8 cap_pin_6_mode[0x4];
+ u8 reserved_at_70[0x4];
+ u8 cap_pin_5_mode[0x4];
+ u8 reserved_at_78[0x4];
+ u8 cap_pin_4_mode[0x4];
+
+ u8 reserved_at_80[0x80];
+
+ u8 enable[0x1];
+ u8 reserved_at_101[0xb];
+ u8 pattern[0x4];
+ u8 reserved_at_110[0x4];
+ u8 pin_mode[0x4];
+ u8 pin[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 time_stamp[0x40];
+
+ u8 out_pulse_duration[0x10];
+ u8 out_periodic_adjustment[0x10];
+
+ u8 reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_mtppse_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 pin[0x8];
+ u8 event_arm[0x1];
+ u8 reserved_at_21[0x1b];
+ u8 event_generation_mode[0x4];
+ u8 reserved_at_40[0x40];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -7845,6 +8052,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
@@ -7857,6 +8065,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
struct mlx5_ifc_slrg_reg_bits slrg_reg;
struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
+ struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 0aacb2a7480d..3096370fe831 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -50,9 +50,6 @@
#define MLX5_BSF_APPTAG_ESCAPE 0x1
#define MLX5_BSF_APPREF_ESCAPE 0x2
-#define MLX5_QPN_BITS 24
-#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
-
enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
MLX5_QP_OPTPAR_RRE = 1 << 1,
@@ -215,6 +212,7 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+#define MLX5_WQE_AV_EXT 0x80000000
enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
@@ -223,14 +221,26 @@ enum {
MLX5_ETH_WQE_L4_CSUM = 1 << 7,
};
+enum {
+ MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
+};
+
struct mlx5_wqe_eth_seg {
u8 rsvd0[4];
u8 cs_flags;
u8 rsvd1;
__be16 mss;
__be32 rsvd2;
- __be16 inline_hdr_sz;
- u8 inline_hdr_start[2];
+ union {
+ struct {
+ __be16 sz;
+ u8 start[2];
+ } inline_hdr;
+ struct {
+ __be16 type;
+ __be16 vlan_tci;
+ } insert;
+ };
};
struct mlx5_wqe_xrc_seg {
@@ -245,6 +255,23 @@ struct mlx5_wqe_masked_atomic_seg {
__be64 compare_mask;
};
+struct mlx5_base_av {
+ union {
+ struct {
+ __be32 qkey;
+ __be32 reserved;
+ } qkey;
+ __be64 dc_key;
+ } key;
+ __be32 dqp_dct;
+ u8 stat_rate_sl;
+ u8 fl_mlid;
+ union {
+ __be16 rlid;
+ __be16 udp_sport;
+ };
+};
+
struct mlx5_av {
union {
struct {
@@ -292,10 +319,14 @@ struct mlx5_wqe_data_seg {
struct mlx5_wqe_umr_ctrl_seg {
u8 flags;
u8 rsvd0[3];
- __be16 klm_octowords;
- __be16 bsf_octowords;
+ __be16 xlt_octowords;
+ union {
+ __be16 xlt_offset;
+ __be16 bsf_octowords;
+ };
__be64 mkey_mask;
- u8 rsvd1[32];
+ __be32 xlt_offset_47_16;
+ u8 rsvd1[28];
};
struct mlx5_seg_set_psv {
@@ -389,6 +420,10 @@ struct mlx5_bsf {
struct mlx5_bsf_inl m_inl;
};
+struct mlx5_mtt {
+ __be64 ptag;
+};
+
struct mlx5_klm {
__be32 bcount;
__be32 key;
@@ -410,46 +445,9 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
-enum mlx5_pagefault_flags {
- MLX5_PFAULT_REQUESTOR = 1 << 0,
- MLX5_PFAULT_WRITE = 1 << 1,
- MLX5_PFAULT_RDMA = 1 << 2,
-};
-
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
- u32 bytes_committed;
- u8 event_subtype;
- enum mlx5_pagefault_flags flags;
- union {
- /* Initiator or send message responder pagefault details. */
- struct {
- /* Received packet size, only valid for responders. */
- u32 packet_size;
- /*
- * WQE index. Refers to either the send queue or
- * receive queue, according to event_subtype.
- */
- u16 wqe_index;
- } wqe;
- /* RDMA responder pagefault details */
- struct {
- u32 r_key;
- /*
- * Received packet size, minimal size page fault
- * resolution required for forward progress.
- */
- u32 packet_size;
- u32 rdma_op_len;
- u64 rdma_va;
- } rdma;
- };
-};
-
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
- void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
@@ -549,10 +547,6 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
- u8 context, int error);
-#endif
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index ec35157ea725..656c70b65dd2 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline);
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 46794a7a531c..b733eb404ffc 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -36,6 +36,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
+#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8a57f0b1242d..8850fcaf50db 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -501,6 +501,7 @@ struct platform_device_id {
kernel_ulong_t driver_data;
};
+#define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:"
#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index e5fb81376e92..d7f63339ef0b 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -3,6 +3,7 @@
#include <linux/in.h>
#include <linux/pim.h>
+#include <linux/rhashtable.h>
#include <net/sock.h>
#include <uapi/linux/mroute.h>
@@ -60,7 +61,6 @@ struct vif_device {
#define VIFF_STATIC 0x8000
#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
-#define MFC_LINES 64
struct mr_table {
struct list_head list;
@@ -69,8 +69,9 @@ struct mr_table {
struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
- struct list_head mfc_cache_array[MFC_LINES];
struct vif_device vif_table[MAXVIFS];
+ struct rhltable mfc_hash;
+ struct list_head mfc_cache_list;
int maxvif;
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
@@ -85,17 +86,48 @@ enum {
MFC_STATIC = BIT(0),
};
+struct mfc_cache_cmp_arg {
+ __be32 mfc_mcastgrp;
+ __be32 mfc_origin;
+};
+
+/**
+ * struct mfc_cache - multicast routing entries
+ * @mnode: rhashtable list
+ * @mfc_mcastgrp: destination multicast group address
+ * @mfc_origin: source address
+ * @cmparg: used for rhashtable comparisons
+ * @mfc_parent: source interface (iif)
+ * @mfc_flags: entry flags
+ * @expires: unresolved entry expire time
+ * @unresolved: unresolved cached skbs
+ * @last_assert: time of last assert
+ * @minvif: minimum VIF id
+ * @maxvif: maximum VIF id
+ * @bytes: bytes that have passed for this entry
+ * @pkt: packets that have passed for this entry
+ * @wrong_if: number of wrong source interface hits
+ * @lastuse: time of last use of the group (traffic or update)
+ * @ttls: OIF TTL threshold array
+ * @list: global entry list
+ * @rcu: used for entry destruction
+ */
struct mfc_cache {
- struct list_head list;
- __be32 mfc_mcastgrp; /* Group the entry belongs to */
- __be32 mfc_origin; /* Source of packet */
- vifi_t mfc_parent; /* Source interface */
- int mfc_flags; /* Flags on line */
+ struct rhlist_head mnode;
+ union {
+ struct {
+ __be32 mfc_mcastgrp;
+ __be32 mfc_origin;
+ };
+ struct mfc_cache_cmp_arg cmparg;
+ };
+ vifi_t mfc_parent;
+ int mfc_flags;
union {
struct {
unsigned long expires;
- struct sk_buff_head unresolved; /* Unresolved buffers */
+ struct sk_buff_head unresolved;
} unres;
struct {
unsigned long last_assert;
@@ -105,20 +137,15 @@ struct mfc_cache {
unsigned long pkt;
unsigned long wrong_if;
unsigned long lastuse;
- unsigned char ttls[MAXVIFS]; /* TTL thresholds */
+ unsigned char ttls[MAXVIFS];
} res;
} mfc_un;
+ struct list_head list;
struct rcu_head rcu;
};
-#ifdef __BIG_ENDIAN
-#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
-#else
-#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
-#endif
-
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait, u32 portid);
+ struct rtmsg *rtm, u32 portid);
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 19a1c0c2993b..ce44e3e96d27 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
- struct rtmsg *rtm, int nowait, u32 portid);
+ struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 9c6c8ef2e9e7..9a0419594e84 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -71,7 +71,6 @@ enum {
NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
- NETIF_F_BUSY_POLL_BIT, /* Busy poll */
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
@@ -134,7 +133,6 @@ enum {
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
-#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define for_each_netdev_feature(mask_addr, bit) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 27914672602d..f40f0ab3847a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -352,6 +352,7 @@ enum gro_result {
GRO_HELD,
GRO_NORMAL,
GRO_DROP,
+ GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
@@ -463,7 +464,6 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
-bool __napi_complete(struct napi_struct *n);
bool napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
@@ -917,8 +917,8 @@ struct netdev_xdp {
* Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
- * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
- * struct rtnl_link_stats64 *storage);
+ * void (*ndo_get_stats64)(struct net_device *dev,
+ * struct rtnl_link_stats64 *storage);
* struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
* Called when a user wants to get the network device usage
* statistics. Drivers must do one of the following:
@@ -968,11 +968,12 @@ struct netdev_xdp {
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
- * Called to setup 'tc' number of traffic classes in the net device. This
- * is always called from the stack with the rtnl lock held and netif tx
- * queues stopped. This allows the netdevice to perform queue management
- * safely.
+ * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
+ * __be16 protocol, struct tc_to_netdev *tc);
+ * Called to setup any 'tc' scheduler, classifier or action on @dev.
+ * This is always called from the stack with the rtnl lock held and netif
+ * tx queues stopped. This allows the netdevice to perform queue
+ * management safely.
*
* Fiber Channel over Ethernet (FCoE) offload functions.
* int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1166,8 +1167,8 @@ struct net_device_ops {
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev);
- struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
- struct rtnl_link_stats64 *storage);
+ void (*ndo_get_stats64)(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
int (*ndo_get_offload_stats)(int attr_id,
const struct net_device *dev,
@@ -1184,9 +1185,6 @@ struct net_device_ops {
struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- int (*ndo_busy_poll)(struct napi_struct *dev);
-#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
@@ -1553,7 +1551,6 @@ enum netdev_priv_flags {
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
*
- * @last_rx: Time of last Rx
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
*
@@ -1780,8 +1777,6 @@ struct net_device {
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
- unsigned long last_rx;
-
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
@@ -1871,8 +1866,12 @@ struct net_device {
struct pcpu_vstats __percpu *vstats;
};
+#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
+#endif
+#if IS_ENABLED(CONFIG_MRP)
struct mrp_port __rcu *mrp_port;
+#endif
struct device dev;
const struct attribute_group *sysfs_groups[4];
@@ -2669,6 +2668,19 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS)
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+#else
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+#endif
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -3111,7 +3123,19 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
-void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
+/**
+ * netif_wake_subqueue - allow sending packets on subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Resume individual transmit queue of a device with multiple transmit queues.
+ */
+static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+ netif_tx_wake_queue(txq);
+}
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -3805,6 +3829,10 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
+extern int dev_weight_rx_bias;
+extern int dev_weight_tx_bias;
+extern int dev_rx_weight;
+extern int dev_tx_weight;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
@@ -3882,10 +3910,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
-int netdev_default_l2upper_neigh_construct(struct net_device *dev,
- struct neighbour *n);
-void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
- struct neighbour *n);
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
@@ -4338,6 +4362,15 @@ do { \
})
#endif
+/* if @cond then downgrade to debug, else print at @level */
+#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
+ do { \
+ if (cond) \
+ netif_dbg(priv, type, netdev, fmt, ##args); \
+ else \
+ netif_ ## level(priv, type, netdev, fmt, ##args); \
+ } while (0)
+
#if defined(VERBOSE_DEBUG)
#define netif_vdbg netif_dbg
#else
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 1d82dd5e9a08..1b49209dd5c7 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -28,6 +28,7 @@ struct nfnetlink_subsystem {
const struct nfnl_callback *cb; /* callback for individual types */
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb);
+ bool (*valid_genid)(struct net *net, u32 genid);
};
int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5117e4d2ddfa..be378cf47fcc 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -167,6 +167,7 @@ struct xt_match {
const char *table;
unsigned int matchsize;
+ unsigned int usersize;
#ifdef CONFIG_COMPAT
unsigned int compatsize;
#endif
@@ -207,6 +208,7 @@ struct xt_target {
const char *table;
unsigned int targetsize;
+ unsigned int usersize;
#ifdef CONFIG_COMPAT
unsigned int compatsize;
#endif
@@ -287,6 +289,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
+int xt_match_to_user(const struct xt_entry_match *m,
+ struct xt_entry_match __user *u);
+int xt_target_to_user(const struct xt_entry_target *t,
+ struct xt_entry_target __user *u);
+int xt_data_to_user(void __user *dst, const void *src,
+ int usersize, int size);
+
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
struct xt_counters_info *info, bool compat);
diff --git a/include/linux/parman.h b/include/linux/parman.h
new file mode 100644
index 000000000000..3c8cccc7d4da
--- /dev/null
+++ b/include/linux/parman.h
@@ -0,0 +1,76 @@
+/*
+ * include/linux/parman.h - Manager for linear priority array areas
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARMAN_H
+#define _PARMAN_H
+
+#include <linux/list.h>
+
+enum parman_algo_type {
+ PARMAN_ALGO_TYPE_LSORT,
+};
+
+struct parman_item {
+ struct list_head list;
+ unsigned long index;
+};
+
+struct parman_prio {
+ struct list_head list;
+ struct list_head item_list;
+ unsigned long priority;
+};
+
+struct parman_ops {
+ unsigned long base_count;
+ unsigned long resize_step;
+ int (*resize)(void *priv, unsigned long new_count);
+ void (*move)(void *priv, unsigned long from_index,
+ unsigned long to_index, unsigned long count);
+ enum parman_algo_type algo;
+};
+
+struct parman;
+
+struct parman *parman_create(const struct parman_ops *ops, void *priv);
+void parman_destroy(struct parman *parman);
+void parman_prio_init(struct parman *parman, struct parman_prio *prio,
+ unsigned long priority);
+void parman_prio_fini(struct parman_prio *prio);
+int parman_item_add(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+void parman_item_remove(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+
+#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e2d1a124216a..adbc859fe7c4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -885,7 +885,6 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
-#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
/* Generic PCI functions exported to card drivers */
@@ -1630,7 +1629,6 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
-#define dev_num_vf(d) (0)
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 7fc1105605bf..772476028a65 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -81,6 +81,9 @@ typedef enum {
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -141,6 +144,12 @@ static inline const char *phy_modes(phy_interface_t interface)
return "qsgmii";
case PHY_INTERFACE_MODE_TRGMII:
return "trgmii";
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return "1000base-x";
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return "2500base-x";
+ case PHY_INTERFACE_MODE_RXAUI:
+ return "rxaui";
default:
return "unknown";
}
@@ -157,11 +166,7 @@ static inline const char *phy_modes(phy_interface_t interface)
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
-/*
- * Need to be a little smaller than phydev->dev.bus_id to leave room
- * for the ":%02x"
- */
-#define MII_BUS_ID_SIZE (20 - 3)
+#define MII_BUS_ID_SIZE 61
/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -631,7 +636,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[20];
+ char bus_id[MII_BUS_ID_SIZE + 3];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
@@ -802,6 +807,9 @@ int phy_stop_interrupts(struct phy_device *phydev);
static inline int phy_read_status(struct phy_device *phydev)
{
+ if (!phydev->drv)
+ return -EIO;
+
return phydev->drv->read_status(phydev);
}
@@ -881,6 +889,25 @@ void mdio_bus_exit(void);
extern struct bus_type mdio_bus_type;
+struct mdio_board_info {
+ const char *bus_id;
+ char modalias[MDIO_NAME_SIZE];
+ int mdio_addr;
+ const void *platform_data;
+};
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+int mdiobus_register_board_info(const struct mdio_board_info *info,
+ unsigned int n);
+#else
+static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
+ unsigned int n)
+{
+ return 0;
+}
+#endif
+
+
/**
* module_phy_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 2052011bf9fb..6c70444da3b9 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -111,6 +111,11 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
return 0;
}
+/*
+ * Note: resize (below) nests producer lock within consumer lock, so if you
+ * consume in interrupt or BH context, you must disable interrupts/BH when
+ * calling this.
+ */
static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
int ret;
@@ -242,6 +247,11 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
return ptr;
}
+/*
+ * Note: resize (below) nests producer lock within consumer lock, so if you
+ * call this in interrupt or BH context, you must disable interrupts/BH when
+ * producing.
+ */
static inline void *ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;
@@ -357,7 +367,7 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
void **old;
void *ptr;
- while ((ptr = ptr_ring_consume(r)))
+ while ((ptr = __ptr_ring_consume(r)))
if (producer < size)
queue[producer++] = ptr;
else if (destroy)
@@ -372,6 +382,12 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
return old;
}
+/*
+ * Note: producer lock is nested within consumer lock, so if you
+ * resize you must make sure all uses nest correctly.
+ * In particular if you consume ring in interrupt or BH context, you must
+ * disable interrupts/BH when doing so.
+ */
static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
void (*destroy)(void *))
{
@@ -382,17 +398,25 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
if (!queue)
return -ENOMEM;
- spin_lock_irqsave(&(r)->producer_lock, flags);
+ spin_lock_irqsave(&(r)->consumer_lock, flags);
+ spin_lock(&(r)->producer_lock);
old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
- spin_unlock_irqrestore(&(r)->producer_lock, flags);
+ spin_unlock(&(r)->producer_lock);
+ spin_unlock_irqrestore(&(r)->consumer_lock, flags);
kfree(old);
return 0;
}
+/*
+ * Note: producer lock is nested within consumer lock, so if you
+ * resize you must make sure all uses nest correctly.
+ * In particular if you consume ring in interrupt or BH context, you must
+ * disable interrupts/BH when doing so.
+ */
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
int size,
gfp_t gfp, void (*destroy)(void *))
@@ -412,10 +436,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
}
for (i = 0; i < nrings; ++i) {
- spin_lock_irqsave(&(rings[i])->producer_lock, flags);
+ spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
+ spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
- spin_unlock_irqrestore(&(rings[i])->producer_lock, flags);
+ spin_unlock(&(rings[i])->producer_lock);
+ spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
}
for (i = 0; i < nrings; ++i)
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 734deb094618..52966b9bfde3 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,10 +1,35 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2016 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
+
#ifndef _COMMON_HSI_H
#define _COMMON_HSI_H
#include <linux/types.h>
@@ -37,6 +62,7 @@
#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
#define RDMA_CDU_TASK_SEG_TYPE 1
#define FW_ASSERT_GENERAL_ATTN_IDX 32
@@ -180,6 +206,9 @@
#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
@@ -236,6 +265,7 @@
#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
@@ -266,6 +296,9 @@
#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
@@ -703,7 +736,7 @@ enum mf_mode {
/* Per-protocol connection types */
enum protocol_type {
PROTOCOLID_ISCSI,
- PROTOCOLID_RESERVED2,
+ PROTOCOLID_FCOE,
PROTOCOLID_ROCE,
PROTOCOLID_CORE,
PROTOCOLID_ETH,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 1aa0727c4136..4b402fb0eaad 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __ETH_COMMON__
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
new file mode 100644
index 000000000000..2e417a45c5f7
--- /dev/null
+++ b/include/linux/qed/fcoe_common.h
@@ -0,0 +1,715 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __FCOE_COMMON__
+#define __FCOE_COMMON__
+/*********************/
+/* FCOE FW CONSTANTS */
+/*********************/
+
+#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
+#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600)
+
+struct fcoe_abts_pkt {
+ __le32 abts_rsp_fc_payload_lo;
+ __le16 abts_rsp_rx_id;
+ u8 abts_rsp_rctl;
+ u8 reserved2;
+};
+
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+ __le32 previous_tid;
+ __le32 parent_tid;
+ __le32 burst_length;
+ __le32 seq_rec_updated_offset;
+};
+
+struct fcoe_exp_ro {
+ __le32 data_offset;
+ __le32 reserved;
+};
+
+union fcoe_cleanup_addr_exp_ro_union {
+ struct regpair abts_rsp_fc_payload_hi;
+ struct fcoe_exp_ro exp_ro;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+ FCOE_COMPLETION_STATUS_SUCCESS,
+ FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+ FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+ MAX_FCOE_COMPLETION_STATUS
+};
+
+struct fc_addr_nw {
+ u8 addr_lo;
+ u8 addr_mid;
+ u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+ struct regpair sq_pbl_addr;
+ struct regpair sq_curr_page_addr;
+ struct regpair sq_next_page_addr;
+ struct regpair xferq_pbl_addr;
+ struct regpair xferq_curr_page_addr;
+ struct regpair xferq_next_page_addr;
+ struct regpair respq_pbl_addr;
+ struct regpair respq_curr_page_addr;
+ struct regpair respq_next_page_addr;
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le16 tx_max_fc_pay_len;
+ __le16 e_d_tov_timer_val;
+ __le16 rx_max_fc_pay_len;
+ __le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
+ __le16 physical_q0;
+ __le16 rec_rr_tov_timer_val;
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
+ __le16 conn_id;
+ u8 def_q_idx;
+ u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+ struct regpair terminate_params_addr;
+};
+
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
+};
+
+struct fcoe_slow_sgl_ctx {
+ struct regpair base_sgl_addr;
+ __le16 curr_sge_off;
+ __le16 remainder_num_sges;
+ __le16 curr_sgl_index;
+ __le16 reserved;
+};
+
+struct fcoe_sge {
+ struct regpair sge_addr;
+ __le16 size;
+ __le16 reserved0;
+ u8 reserved1[3];
+ u8 is_valid_sge;
+};
+
+union fcoe_data_desc_ctx {
+ struct fcoe_fast_sgl_ctx fast;
+ struct fcoe_slow_sgl_ctx slow;
+ struct fcoe_sge single_sge;
+};
+
+union fcoe_dix_desc_ctx {
+ struct fcoe_slow_sgl_ctx dix_sgl;
+ struct fcoe_sge cached_dix_sge;
+};
+
+struct fcoe_fcp_cmd_payload {
+ __le32 opaque[8];
+};
+
+struct fcoe_fcp_rsp_payload {
+ __le32 opaque[6];
+};
+
+struct fcoe_fcp_xfer_payload {
+ __le32 opaque[3];
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+ __le16 mtu;
+ __le16 sq_num_pages_in_pbl;
+ __le32 reserved;
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+ FCOE_INITIATOR_MODE = 0x0,
+ FCOE_TARGET_MODE = 0x1,
+ FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+ MAX_FCOE_MODE_TYPE
+};
+
+struct fcoe_mstorm_fcoe_task_st_ctx_fp {
+ __le16 flags;
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
+ __le16 difDataResidue;
+ __le16 parent_id;
+ __le16 single_sge_saved_offset;
+ __le32 data_2_trns_rem;
+ __le32 offset_in_io;
+ union fcoe_dix_desc_ctx dix_desc;
+ union fcoe_data_desc_ctx data_desc;
+};
+
+struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
+ __le16 flags;
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
+ u8 tx_rx_sgl_mode;
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
+#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
+ u8 rsrv2;
+ __le32 num_prm_zero_read;
+ struct regpair rsp_buf_addr;
+};
+
+struct fcoe_rx_stat {
+ struct regpair fcoe_rx_byte_cnt;
+ struct regpair fcoe_rx_data_pkt_cnt;
+ struct regpair fcoe_rx_xfer_pkt_cnt;
+ struct regpair fcoe_rx_other_pkt_cnt;
+ __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_rq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_crc_error_cnt;
+ __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+ __le32 fcoe_silent_drop_total_pkt_cnt;
+ __le32 rsrv;
+};
+
+enum fcoe_sgl_mode {
+ FCOE_SLOW_SGL,
+ FCOE_SINGLE_FAST_SGE,
+ FCOE_2_FAST_SGE,
+ FCOE_3_FAST_SGE,
+ FCOE_4_FAST_SGE,
+ FCOE_MUL_FAST_SGES,
+ MAX_FCOE_SGL_MODE
+};
+
+struct fcoe_stat_ramrod_data {
+ struct regpair stat_params_addr;
+};
+
+struct protection_info_ctx {
+ __le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
+ u8 dix_block_size;
+ u8 dst_size;
+};
+
+union protection_info_union_ctx {
+ struct protection_info_ctx info;
+ __le32 value;
+};
+
+struct fcp_rsp_payload_padded {
+ struct fcoe_fcp_rsp_payload rsp_payload;
+ __le32 reserved[2];
+};
+
+struct fcp_xfer_payload_padded {
+ struct fcoe_fcp_xfer_payload xfer_payload;
+ __le32 reserved[5];
+};
+
+struct fcoe_tx_data_params {
+ __le32 data_offset;
+ __le32 offset_in_io;
+ u8 flags;
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
+ u8 dif_residual;
+ __le16 seq_cnt;
+ __le16 single_sge_saved_offset;
+ __le16 next_dif_offset;
+ __le16 seq_id;
+ __le16 reserved3;
+};
+
+struct fcoe_tx_mid_path_params {
+ __le32 parameter;
+ u8 r_ctl;
+ u8 type;
+ u8 cs_ctl;
+ u8 df_ctl;
+ __le16 rx_id;
+ __le16 ox_id;
+};
+
+struct fcoe_tx_params {
+ struct fcoe_tx_data_params data;
+ struct fcoe_tx_mid_path_params mid_path;
+};
+
+union fcoe_tx_info_union_ctx {
+ struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+ struct fcp_rsp_payload_padded fcp_rsp_payload;
+ struct fcp_xfer_payload_padded fcp_xfer_payload;
+ struct fcoe_tx_params tx_params;
+};
+
+struct ystorm_fcoe_task_st_ctx {
+ u8 task_type;
+ u8 sgl_mode;
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3
+ u8 cached_dix_sge;
+ u8 expect_first_xfer;
+ __le32 num_pbf_zero_write;
+ union protection_info_union_ctx protection_info_union;
+ __le32 data_2_trns_rem;
+ union fcoe_tx_info_union_ctx tx_info_union;
+ union fcoe_dix_desc_ctx dix_desc;
+ union fcoe_data_desc_ctx data_desc;
+ __le16 ox_id;
+ __le16 rx_id;
+ __le32 task_rety_identifier;
+ __le32 reserved1[2];
+};
+
+struct ystorm_fcoe_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 rx_id;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+struct tstorm_fcoe_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+ u8 flags1;
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+ u8 flags3;
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 cleanup_state;
+ __le16 last_sent_tid;
+ __le32 rec_rr_tov_exp_timeout;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 data_offset_end_of_seq;
+ __le32 data_offset_next;
+};
+
+struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
+ union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
+ __le16 flags;
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10
+ __le16 seq_cnt;
+ u8 seq_id;
+ u8 ooo_rx_seq_id;
+ __le16 rx_id;
+ struct fcoe_abts_pkt abts_data;
+ __le32 e_d_tov_exp_timeout_val;
+ __le16 ooo_rx_seq_cnt;
+ __le16 reserved1;
+};
+
+struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
+ u8 task_type;
+ u8 dev_type;
+ u8 conf_supported;
+ u8 glbl_q_num;
+ __le32 cid;
+ __le32 fcp_cmd_trns_size;
+ __le32 rsrv;
+};
+
+struct tstorm_fcoe_task_st_ctx {
+ struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
+ struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
+};
+
+struct mstorm_fcoe_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 cleanup_state;
+ __le32 received_bytes;
+ u8 byte3;
+ u8 glbl_q_num;
+ __le16 word1;
+ __le16 tid_to_xfer;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le32 expected_bytes;
+ __le32 reg2;
+};
+
+struct mstorm_fcoe_task_st_ctx {
+ struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
+ struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
+};
+
+struct ustorm_fcoe_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 global_cq_num;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+struct fcoe_task_context {
+ struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+ struct tdif_task_context tdif_context;
+ struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct tstorm_fcoe_task_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_task_st_ctx mstorm_st_context;
+ struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct rdif_task_context rdif_context;
+};
+
+struct fcoe_tx_stat {
+ struct regpair fcoe_tx_byte_cnt;
+ struct regpair fcoe_tx_data_pkt_cnt;
+ struct regpair fcoe_tx_xfer_pkt_cnt;
+ struct regpair fcoe_tx_other_pkt_cnt;
+};
+
+struct fcoe_wqe {
+ __le16 task_id;
+ __le16 flags;
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x7
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 7
+#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1
+#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
+#define FCOE_WQE_SUPER_IO_MASK 0x1
+#define FCOE_WQE_SUPER_IO_SHIFT 9
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10
+#define FCOE_WQE_RESERVED0_MASK 0x1F
+#define FCOE_WQE_RESERVED0_SHIFT 11
+ union fcoe_additional_info_union additional_info_union;
+};
+
+struct xfrqe_prot_flags {
+ u8 flags;
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
+};
+
+struct fcoe_db_data {
+ u8 params;
+#define FCOE_DB_DATA_DEST_MASK 0x3
+#define FCOE_DB_DATA_DEST_SHIFT 0
+#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
+#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define FCOE_DB_DATA_RESERVED_MASK 0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT 5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 sq_prod;
+};
+#endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 8f64b1223c2f..4c5747babcf6 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __ISCSI_COMMON__
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 37dfba101c6c..5cd7a4608c9b 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_CHAIN_H
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 7a52f7c58c37..4cd1f0ccfa36 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_ETH_IF_H
@@ -53,7 +77,7 @@ struct qed_dev_eth_info {
};
struct qed_update_vport_rss_params {
- u16 rss_ind_table[128];
+ void *rss_ind_table[128];
u32 rss_key[10];
u8 rss_caps;
};
@@ -72,6 +96,7 @@ struct qed_update_vport_params {
struct qed_start_vport_params {
bool remove_inner_vlan;
+ bool handle_ptp_pkts;
bool gro_enable;
bool drop_ttl0;
u8 vport_id;
@@ -135,6 +160,15 @@ struct qed_eth_cb_ops {
void (*force_mac) (void *dev, u8 *mac, bool forced);
};
+#define QED_MAX_PHC_DRIFT_PPB 291666666
+
+enum qed_ptp_filter_type {
+ QED_PTP_FILTER_L2,
+ QED_PTP_FILTER_IPV4,
+ QED_PTP_FILTER_IPV4_IPV6,
+ QED_PTP_FILTER_L2_IPV4_IPV6
+};
+
#ifdef CONFIG_DCB
/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
* of dcbnl_rtnl_ops structure.
@@ -194,6 +228,17 @@ struct qed_eth_dcbnl_ops {
};
#endif
+struct qed_eth_ptp_ops {
+ int (*hwtstamp_tx_on)(struct qed_dev *);
+ int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type);
+ int (*read_rx_ts)(struct qed_dev *, u64 *);
+ int (*read_tx_ts)(struct qed_dev *, u64 *);
+ int (*read_cc)(struct qed_dev *, u64 *);
+ int (*disable)(struct qed_dev *);
+ int (*adjfreq)(struct qed_dev *, s32);
+ int (*enable)(struct qed_dev *);
+};
+
struct qed_eth_ops {
const struct qed_common_ops *common;
#ifdef CONFIG_QED_SRIOV
@@ -202,6 +247,7 @@ struct qed_eth_ops {
#ifdef CONFIG_DCB
const struct qed_eth_dcbnl_ops *dcb;
#endif
+ const struct qed_eth_ptp_ops *ptp;
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
new file mode 100644
index 000000000000..bd6bcb809415
--- /dev/null
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -0,0 +1,145 @@
+#ifndef _QED_FCOE_IF_H
+#define _QED_FCOE_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+struct qed_fcoe_stats {
+ u64 fcoe_rx_byte_cnt;
+ u64 fcoe_rx_data_pkt_cnt;
+ u64 fcoe_rx_xfer_pkt_cnt;
+ u64 fcoe_rx_other_pkt_cnt;
+ u32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+ u32 fcoe_silent_drop_pkt_rq_full_cnt;
+ u32 fcoe_silent_drop_pkt_crc_error_cnt;
+ u32 fcoe_silent_drop_pkt_task_invalid_cnt;
+ u32 fcoe_silent_drop_total_pkt_cnt;
+
+ u64 fcoe_tx_byte_cnt;
+ u64 fcoe_tx_data_pkt_cnt;
+ u64 fcoe_tx_xfer_pkt_cnt;
+ u64 fcoe_tx_other_pkt_cnt;
+};
+
+struct qed_dev_fcoe_info {
+ struct qed_dev_info common;
+
+ void __iomem *primary_dbq_rq_addr;
+ void __iomem *secondary_bdq_rq_addr;
+};
+
+struct qed_fcoe_params_offload {
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t sq_curr_page_addr;
+ dma_addr_t sq_next_page_addr;
+
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+
+ u16 tx_max_fc_pay_len;
+ u16 e_d_tov_timer_val;
+ u16 rec_tov_timer_val;
+ u16 rx_max_fc_pay_len;
+ u16 vlan_tag;
+
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+ u8 def_q_idx;
+};
+
+#define MAX_TID_BLOCKS_FCOE (512)
+struct qed_fcoe_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_FCOE];
+};
+
+struct qed_fcoe_cb_ops {
+ struct qed_common_cb_ops common;
+ u32 (*get_login_failures)(void *cookie);
+};
+
+void qed_fcoe_set_pf_params(struct qed_dev *cdev,
+ struct qed_fcoe_pf_params *params);
+
+/**
+ * struct qed_fcoe_ops - qed FCoE operations.
+ * @common: common operations pointer
+ * @fill_dev_info: fills FCoE specific information
+ * @param cdev
+ * @param info
+ * @return 0 on sucesss, otherwise error value.
+ * @register_ops: register FCoE operations
+ * @param cdev
+ * @param ops - specified using qed_iscsi_cb_ops
+ * @param cookie - driver private
+ * @ll2: light L2 operations pointer
+ * @start: fcoe in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: stops fcoe in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new fcoe connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * return 0 on sucesss, otherwise error value.
+ * @release_conn: release a previously acquired fcoe connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param terminate_params
+ * return 0 on success, otherwise error value.
+ * @get_stats: gets FCoE related statistics
+ * @param cdev
+ * @param stats - pointer to struck that would be filled
+ * we stats
+ * return 0 on success, error otherwise.
+ */
+struct qed_fcoe_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_fcoe_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_fcoe_cb_ops *ops, void *cookie);
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_fcoe_params_offload *conn_info);
+ int (*destroy_conn)(struct qed_dev *cdev,
+ u32 handle, dma_addr_t terminate_params);
+
+ int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats);
+};
+
+const struct qed_fcoe_ops *qed_get_fcoe_ops(void);
+void qed_put_fcoe_ops(void);
+#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 4b454f4f5b25..fde56c436f71 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1,10 +1,33 @@
/* QLogic qed NIC Driver
- *
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_IF_H
@@ -36,7 +59,6 @@ enum dcbx_protocol_type {
#define QED_ROCE_PROTOCOL_INDEX (3)
-#ifdef CONFIG_DCB
#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
#define QED_LLDP_PORT_ID_STAT_LEN 4
#define QED_DCBX_MAX_APP_PROTOCOL 32
@@ -132,7 +154,6 @@ struct qed_dcbx_get {
struct qed_dcbx_remote_params remote;
struct qed_dcbx_admin_params local;
};
-#endif
enum qed_led_mode {
QED_LED_MODE_OFF,
@@ -159,6 +180,38 @@ struct qed_eth_pf_params {
u16 num_cons;
};
+struct qed_fcoe_pf_params {
+ /* The following parameters are used during protocol-init */
+ u64 glbl_q_params_addr;
+ u64 bdq_pbl_base_addr[2];
+
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+ u16 num_tasks;
+
+ /* The following parameters are used during protocol-init */
+ u16 sq_num_pbl_pages;
+
+ u16 cq_num_entries;
+ u16 cmdq_num_entries;
+ u16 rq_buffer_log_size;
+ u16 mtu;
+ u16 dummy_icid;
+ u16 bdq_xoff_threshold[2];
+ u16 bdq_xon_threshold[2];
+ u16 rq_buffer_size;
+ u8 num_cqs; /* num of global CQs */
+ u8 log_page_size;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 is_target;
+ u8 bdq_pbl_num_entries[2];
+};
+
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
struct qed_iscsi_pf_params {
u64 glbl_q_params_addr;
@@ -222,6 +275,7 @@ struct qed_rdma_pf_params {
struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
+ struct qed_fcoe_pf_params fcoe_pf_params;
struct qed_iscsi_pf_params iscsi_pf_params;
struct qed_rdma_pf_params rdma_pf_params;
};
@@ -282,6 +336,7 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
QED_PROTOCOL_ISCSI,
+ QED_PROTOCOL_FCOE,
};
enum qed_link_mode_bits {
@@ -368,6 +423,7 @@ struct qed_int_info {
struct qed_common_cb_ops {
void (*link_update)(void *dev,
struct qed_link_output *link);
+ void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
};
struct qed_selftest_ops {
@@ -471,6 +527,10 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+ int (*dbg_grc)(struct qed_dev *cdev,
+ void *buffer, u32 *num_dumped_bytes);
+
+ int (*dbg_grc_size)(struct qed_dev *cdev);
int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
index 5a4f8d0899e9..ac2e6a3199a3 100644
--- a/include/linux/qed/qed_iov_if.h
+++ b/include/linux/qed/qed_iov_if.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_IOV_IF_H
@@ -29,6 +53,8 @@ struct qed_iov_hv_ops {
int (*set_rate) (struct qed_dev *cdev, int vfid,
u32 min_rate, u32 max_rate);
+
+ int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust);
};
#endif
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index d27912480cb3..f70bb81b8b6a 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_ISCSI_IF_H
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index fd75c265dba3..4fb4666ea879 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -1,10 +1,33 @@
/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_LL2_IF_H
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
index 53047d3fa678..f742d4312c9d 100644
--- a/include/linux/qed/qed_roce_if.h
+++ b/include/linux/qed/qed_roce_if.h
@@ -1,5 +1,5 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index f48d64b0e2fb..3b8dd551a98c 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -1,5 +1,5 @@
/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 7663725faa94..f773aa5e746f 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __RDMA_COMMON__
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 2eeaf3dc6646..bad02df213df 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __ROCE_COMMON__
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 3b8e1efd9bc2..03f3e37ab059 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __STORAGE_COMMON__
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dc3889d1bbe6..46fe7856f1b2 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __TCP_COMMON__
diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h
deleted file mode 100644
index aca36bc83315..000000000000
--- a/include/linux/rfkill-regulator.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * rfkill-regulator.c - Regulator consumer driver for rfkill
- *
- * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com>
- * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __LINUX_RFKILL_REGULATOR_H
-#define __LINUX_RFKILL_REGULATOR_H
-
-/*
- * Use "vrfkill" as supply id when declaring the regulator consumer:
- *
- * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = {
- * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" },
- * };
- *
- * If you have several regulator driven rfkill, you can append a numerical id to
- * .dev_name as done above, and use the same id when declaring the platform
- * device:
- *
- * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = {
- * .name = "ezx-bluetooth",
- * .type = RFKILL_TYPE_BLUETOOTH,
- * };
- *
- * static struct platform_device a910_rfkill = {
- * .name = "rfkill-regulator",
- * .id = 0,
- * .dev = {
- * .platform_data = &ezx_rfkill_bt_data,
- * },
- * };
- */
-
-#include <linux/rfkill.h>
-
-struct rfkill_regulator_platform_data {
- char *name; /* the name for the rfkill switch */
- enum rfkill_type type; /* the type as specified in rfkill.h */
-};
-
-#endif /* __LINUX_RFKILL_REGULATOR_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5c132d3188be..f2e12a845910 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -61,6 +61,7 @@ struct rhlist_head {
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
+ * @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[]
@@ -68,10 +69,12 @@ struct rhlist_head {
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
+ * @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets
*/
struct bucket_table {
unsigned int size;
+ unsigned int nest;
unsigned int rehash;
u32 hash_rnd;
unsigned int locks_mask;
@@ -81,7 +84,7 @@ struct bucket_table {
struct bucket_table __rcu *future_tbl;
- struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+ struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/**
@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash);
+struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash);
+
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht);
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+static inline struct rhash_head __rcu *const *rht_bucket(
+ const struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_var(
+ struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
+{
+ return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
+ &tbl->buckets[hash];
+}
+
/**
* rht_for_each_continue - continue iterating over hash chain
* @pos: the &struct rhash_head to use as a loop cursor.
@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @hash: the hash value / bucket index
*/
#define rht_for_each(pos, tbl, hash) \
- rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+ rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/**
* rht_for_each_entry_continue - continue iterating over hash chain
@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
+ rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member)
/**
@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht);
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
-#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
- for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
- next = !rht_is_a_nulls(pos) ? \
- rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = next, \
- next = !rht_is_a_nulls(pos) ? \
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
+ for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = next, \
+ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/**
@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_rcu(pos, tbl, hash) \
- rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+ rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht);
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member)
/**
@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
- const struct bucket_table *tbl;
+ struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -697,8 +727,12 @@ slow_path:
}
elasticity = ht->elasticity;
- pprev = &tbl->buckets[hash];
- rht_for_each(head, tbl, hash) {
+ pprev = rht_bucket_insert(ht, tbl, hash);
+ data = ERR_PTR(-ENOMEM);
+ if (!pprev)
+ goto out;
+
+ rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *plist;
struct rhlist_head *list;
@@ -736,7 +770,7 @@ slow_path:
if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path;
- head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+ head = rht_dereference_bucket(*pprev, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (rhlist) {
@@ -746,7 +780,7 @@ slow_path:
RCU_INIT_POINTER(list->next, NULL);
}
- rcu_assign_pointer(tbl->buckets[hash], obj);
+ rcu_assign_pointer(*pprev, obj);
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
+ pprev = rht_bucket_var(tbl, hash);
+ rht_for_each_continue(he, *pprev, tbl, hash) {
struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead);
@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
+ pprev = rht_bucket_var(tbl, hash);
+ rht_for_each_continue(he, *pprev, tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
continue;
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index fcb4c3646173..7a4804c4a593 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -62,7 +62,7 @@ typedef struct sctphdr {
__be16 dest;
__be32 vtag;
__le32 checksum;
-} __packed sctp_sctphdr_t;
+} sctp_sctphdr_t;
static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
{
@@ -74,7 +74,7 @@ typedef struct sctp_chunkhdr {
__u8 type;
__u8 flags;
__be16 length;
-} __packed sctp_chunkhdr_t;
+} sctp_chunkhdr_t;
/* Section 3.2. Chunk Type Values.
@@ -108,6 +108,7 @@ typedef enum {
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
SCTP_CID_ASCONF_ACK = 0x80,
+ SCTP_CID_RECONF = 0x82,
} sctp_cid_t; /* enum */
@@ -164,7 +165,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 };
typedef struct sctp_paramhdr {
__be16 type;
__be16 length;
-} __packed sctp_paramhdr_t;
+} sctp_paramhdr_t;
typedef enum {
@@ -199,6 +200,13 @@ typedef enum {
SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005),
SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
+ /* RE-CONFIG. Section 4 */
+ SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d),
+ SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e),
+ SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f),
+ SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010),
+ SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011),
+ SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012),
} sctp_param_t; /* enum */
@@ -225,12 +233,12 @@ typedef struct sctp_datahdr {
__be16 ssn;
__be32 ppid;
__u8 payload[0];
-} __packed sctp_datahdr_t;
+} sctp_datahdr_t;
typedef struct sctp_data_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_datahdr_t data_hdr;
-} __packed sctp_data_chunk_t;
+} sctp_data_chunk_t;
/* DATA Chuck Specific Flags */
enum {
@@ -256,78 +264,78 @@ typedef struct sctp_inithdr {
__be16 num_inbound_streams;
__be32 initial_tsn;
__u8 params[0];
-} __packed sctp_inithdr_t;
+} sctp_inithdr_t;
typedef struct sctp_init_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_inithdr_t init_hdr;
-} __packed sctp_init_chunk_t;
+} sctp_init_chunk_t;
/* Section 3.3.2.1. IPv4 Address Parameter (5) */
typedef struct sctp_ipv4addr_param {
sctp_paramhdr_t param_hdr;
struct in_addr addr;
-} __packed sctp_ipv4addr_param_t;
+} sctp_ipv4addr_param_t;
/* Section 3.3.2.1. IPv6 Address Parameter (6) */
typedef struct sctp_ipv6addr_param {
sctp_paramhdr_t param_hdr;
struct in6_addr addr;
-} __packed sctp_ipv6addr_param_t;
+} sctp_ipv6addr_param_t;
/* Section 3.3.2.1 Cookie Preservative (9) */
typedef struct sctp_cookie_preserve_param {
sctp_paramhdr_t param_hdr;
__be32 lifespan_increment;
-} __packed sctp_cookie_preserve_param_t;
+} sctp_cookie_preserve_param_t;
/* Section 3.3.2.1 Host Name Address (11) */
typedef struct sctp_hostname_param {
sctp_paramhdr_t param_hdr;
uint8_t hostname[0];
-} __packed sctp_hostname_param_t;
+} sctp_hostname_param_t;
/* Section 3.3.2.1 Supported Address Types (12) */
typedef struct sctp_supported_addrs_param {
sctp_paramhdr_t param_hdr;
__be16 types[0];
-} __packed sctp_supported_addrs_param_t;
+} sctp_supported_addrs_param_t;
/* Appendix A. ECN Capable (32768) */
typedef struct sctp_ecn_capable_param {
sctp_paramhdr_t param_hdr;
-} __packed sctp_ecn_capable_param_t;
+} sctp_ecn_capable_param_t;
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
typedef struct sctp_adaptation_ind_param {
struct sctp_paramhdr param_hdr;
__be32 adaptation_ind;
-} __packed sctp_adaptation_ind_param_t;
+} sctp_adaptation_ind_param_t;
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
typedef struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
__u8 chunks[0];
-} __packed sctp_supported_ext_param_t;
+} sctp_supported_ext_param_t;
/* AUTH Section 3.1 Random */
typedef struct sctp_random_param {
sctp_paramhdr_t param_hdr;
__u8 random_val[0];
-} __packed sctp_random_param_t;
+} sctp_random_param_t;
/* AUTH Section 3.2 Chunk List */
typedef struct sctp_chunks_param {
sctp_paramhdr_t param_hdr;
__u8 chunks[0];
-} __packed sctp_chunks_param_t;
+} sctp_chunks_param_t;
/* AUTH Section 3.3 HMAC Algorithm */
typedef struct sctp_hmac_algo_param {
sctp_paramhdr_t param_hdr;
__be16 hmac_ids[0];
-} __packed sctp_hmac_algo_param_t;
+} sctp_hmac_algo_param_t;
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
* The INIT ACK chunk is used to acknowledge the initiation of an SCTP
@@ -339,13 +347,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t;
typedef struct sctp_cookie_param {
sctp_paramhdr_t p;
__u8 body[0];
-} __packed sctp_cookie_param_t;
+} sctp_cookie_param_t;
/* Section 3.3.3.1 Unrecognized Parameters (8) */
typedef struct sctp_unrecognized_param {
sctp_paramhdr_t param_hdr;
sctp_paramhdr_t unrecognized;
-} __packed sctp_unrecognized_param_t;
+} sctp_unrecognized_param_t;
@@ -360,7 +368,7 @@ typedef struct sctp_unrecognized_param {
typedef struct sctp_gap_ack_block {
__be16 start;
__be16 end;
-} __packed sctp_gap_ack_block_t;
+} sctp_gap_ack_block_t;
typedef __be32 sctp_dup_tsn_t;
@@ -375,12 +383,12 @@ typedef struct sctp_sackhdr {
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
sctp_sack_variable_t variable[0];
-} __packed sctp_sackhdr_t;
+} sctp_sackhdr_t;
typedef struct sctp_sack_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_sackhdr_t sack_hdr;
-} __packed sctp_sack_chunk_t;
+} sctp_sack_chunk_t;
/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -392,12 +400,12 @@ typedef struct sctp_sack_chunk {
typedef struct sctp_heartbeathdr {
sctp_paramhdr_t info;
-} __packed sctp_heartbeathdr_t;
+} sctp_heartbeathdr_t;
typedef struct sctp_heartbeat_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_heartbeathdr_t hb_hdr;
-} __packed sctp_heartbeat_chunk_t;
+} sctp_heartbeat_chunk_t;
/* For the abort and shutdown ACK we must carry the init tag in the
@@ -406,7 +414,7 @@ typedef struct sctp_heartbeat_chunk {
*/
typedef struct sctp_abort_chunk {
sctp_chunkhdr_t uh;
-} __packed sctp_abort_chunk_t;
+} sctp_abort_chunk_t;
/* For the graceful shutdown we must carry the tag (in common header)
@@ -414,12 +422,12 @@ typedef struct sctp_abort_chunk {
*/
typedef struct sctp_shutdownhdr {
__be32 cum_tsn_ack;
-} __packed sctp_shutdownhdr_t;
+} sctp_shutdownhdr_t;
struct sctp_shutdown_chunk_t {
sctp_chunkhdr_t chunk_hdr;
sctp_shutdownhdr_t shutdown_hdr;
-} __packed;
+};
/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
@@ -427,12 +435,12 @@ typedef struct sctp_errhdr {
__be16 cause;
__be16 length;
__u8 variable[0];
-} __packed sctp_errhdr_t;
+} sctp_errhdr_t;
typedef struct sctp_operr_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_errhdr_t err_hdr;
-} __packed sctp_operr_chunk_t;
+} sctp_operr_chunk_t;
/* RFC 2960 3.3.10 - Operation Error
*
@@ -522,7 +530,7 @@ typedef struct sctp_ecnehdr {
typedef struct sctp_ecne_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_ecnehdr_t ence_hdr;
-} __packed sctp_ecne_chunk_t;
+} sctp_ecne_chunk_t;
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Congestion Window Reduced (CWR) (13)
@@ -534,7 +542,7 @@ typedef struct sctp_cwrhdr {
typedef struct sctp_cwr_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_cwrhdr_t cwr_hdr;
-} __packed sctp_cwr_chunk_t;
+} sctp_cwr_chunk_t;
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -585,17 +593,17 @@ typedef struct sctp_cwr_chunk {
struct sctp_fwdtsn_skip {
__be16 stream;
__be16 ssn;
-} __packed;
+};
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
struct sctp_fwdtsn_skip skip[0];
-} __packed;
+};
struct sctp_fwdtsn_chunk {
struct sctp_chunkhdr chunk_hdr;
struct sctp_fwdtsn_hdr fwdtsn_hdr;
-} __packed;
+};
/* ADDIP
@@ -633,17 +641,17 @@ struct sctp_fwdtsn_chunk {
typedef struct sctp_addip_param {
sctp_paramhdr_t param_hdr;
__be32 crr_id;
-} __packed sctp_addip_param_t;
+} sctp_addip_param_t;
typedef struct sctp_addiphdr {
__be32 serial;
__u8 params[0];
-} __packed sctp_addiphdr_t;
+} sctp_addiphdr_t;
typedef struct sctp_addip_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_addiphdr_t addip_hdr;
-} __packed sctp_addip_chunk_t;
+} sctp_addip_chunk_t;
/* AUTH
* Section 4.1 Authentication Chunk (AUTH)
@@ -698,16 +706,71 @@ typedef struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
__u8 hmac[0];
-} __packed sctp_authhdr_t;
+} sctp_authhdr_t;
typedef struct sctp_auth_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_authhdr_t auth_hdr;
-} __packed sctp_auth_chunk_t;
+} sctp_auth_chunk_t;
struct sctp_infox {
struct sctp_info *sctpinfo;
struct sctp_association *asoc;
};
+struct sctp_reconf_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ __u8 params[0];
+};
+
+struct sctp_strreset_outreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u32 response_seq;
+ __u32 send_reset_at_tsn;
+ __u16 list_of_streams[0];
+};
+
+struct sctp_strreset_inreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u16 list_of_streams[0];
+};
+
+struct sctp_strreset_tsnreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+};
+
+struct sctp_strreset_addstrm {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u16 number_of_streams;
+ __u16 reserved;
+};
+
+enum {
+ SCTP_STRRESET_NOTHING_TO_DO = 0x00,
+ SCTP_STRRESET_PERFORMED = 0x01,
+ SCTP_STRRESET_DENIED = 0x02,
+ SCTP_STRRESET_ERR_WRONG_SSN = 0x03,
+ SCTP_STRRESET_ERR_IN_PROGRESS = 0x04,
+ SCTP_STRRESET_ERR_BAD_SEQNO = 0x05,
+ SCTP_STRRESET_IN_PROGRESS = 0x06,
+};
+
+struct sctp_strreset_resp {
+ sctp_paramhdr_t param_hdr;
+ __u32 response_seq;
+ __u32 result;
+};
+
+struct sctp_strreset_resptsn {
+ sctp_paramhdr_t param_hdr;
+ __u32 response_seq;
+ __u32 result;
+ __u32 senders_next_tsn;
+ __u32 receivers_next_tsn;
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
new file mode 100644
index 000000000000..fa7a6b9cedbf
--- /dev/null
+++ b/include/linux/siphash.h
@@ -0,0 +1,140 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#ifndef _LINUX_SIPHASH_H
+#define _LINUX_SIPHASH_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define SIPHASH_ALIGNMENT __alignof__(u64)
+typedef struct {
+ u64 key[2];
+} siphash_key_t;
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
+#endif
+
+u64 siphash_1u64(const u64 a, const siphash_key_t *key);
+u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
+u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
+ const siphash_key_t *key);
+u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
+ const siphash_key_t *key);
+u64 siphash_1u32(const u32 a, const siphash_key_t *key);
+u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
+ const siphash_key_t *key);
+
+static inline u64 siphash_2u32(const u32 a, const u32 b,
+ const siphash_key_t *key)
+{
+ return siphash_1u64((u64)b << 32 | a, key);
+}
+static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
+ const u32 d, const siphash_key_t *key)
+{
+ return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
+}
+
+
+static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
+ const siphash_key_t *key)
+{
+ if (__builtin_constant_p(len) && len == 4)
+ return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
+ if (__builtin_constant_p(len) && len == 8)
+ return siphash_1u64(le64_to_cpu(data[0]), key);
+ if (__builtin_constant_p(len) && len == 16)
+ return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ key);
+ if (__builtin_constant_p(len) && len == 24)
+ return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ le64_to_cpu(data[2]), key);
+ if (__builtin_constant_p(len) && len == 32)
+ return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ le64_to_cpu(data[2]), le64_to_cpu(data[3]),
+ key);
+ return __siphash_aligned(data, len, key);
+}
+
+/**
+ * siphash - compute 64-bit siphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the siphash key
+ */
+static inline u64 siphash(const void *data, size_t len,
+ const siphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ return __siphash_unaligned(data, len, key);
+#endif
+ return ___siphash_aligned(data, len, key);
+}
+
+#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
+typedef struct {
+ unsigned long key[2];
+} hsiphash_key_t;
+
+u32 __hsiphash_aligned(const void *data, size_t len,
+ const hsiphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key);
+#endif
+
+u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
+u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
+u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
+ const hsiphash_key_t *key);
+u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
+ const hsiphash_key_t *key);
+
+static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ if (__builtin_constant_p(len) && len == 4)
+ return hsiphash_1u32(le32_to_cpu(data[0]), key);
+ if (__builtin_constant_p(len) && len == 8)
+ return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ key);
+ if (__builtin_constant_p(len) && len == 12)
+ return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ le32_to_cpu(data[2]), key);
+ if (__builtin_constant_p(len) && len == 16)
+ return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ le32_to_cpu(data[2]), le32_to_cpu(data[3]),
+ key);
+ return __hsiphash_aligned(data, len, key);
+}
+
+/**
+ * hsiphash - compute 32-bit hsiphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the hsiphash key
+ */
+static inline u32 hsiphash(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ return __hsiphash_unaligned(data, len, key);
+#endif
+ return ___hsiphash_aligned(data, len, key);
+}
+
+#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a410715bbef8..69ccd2636911 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -585,20 +585,22 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @cloned: Head may be cloned (check refcnt to be sure)
* @ip_summed: Driver fed us an IP checksum
* @nohdr: Payload reference only, must not modify header
- * @nfctinfo: Relationship of this skb to the connection
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @tc_skip_classify: do not classify packet. set by IFB device
+ * @tc_at_ingress: used within tc_classify to distinguish in/egress
+ * @tc_redirected: packet was redirected by a tc action
+ * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
* @protocol: Packet protocol from driver
* @destructor: Destruct function
- * @nfct: Associated connection, if any
+ * @_nfct: Associated connection, if any (with nfctinfo bits)
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
- * @tc_verd: traffic control verdict
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
@@ -610,6 +612,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @dst_pending_confirm: need to confirm neighbour
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
@@ -668,7 +671,7 @@ struct sk_buff {
struct sec_path *sp;
#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- struct nf_conntrack *nfct;
+ unsigned long _nfct;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info *nf_bridge;
@@ -721,7 +724,6 @@ struct sk_buff {
__u8 pkt_type:3;
__u8 pfmemalloc:1;
__u8 ignore_df:1;
- __u8 nfctinfo:3;
__u8 nf_trace:1;
__u8 ip_summed:2;
@@ -740,6 +742,7 @@ struct sk_buff {
__u8 csum_level:2;
__u8 csum_bad:1;
+ __u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
@@ -749,13 +752,15 @@ struct sk_buff {
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
#endif
- /* 2, 4 or 5 bit hole */
+#ifdef CONFIG_NET_CLS_ACT
+ __u8 tc_skip_classify:1;
+ __u8 tc_at_ingress:1;
+ __u8 tc_redirected:1;
+ __u8 tc_from_ingress:1;
+#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
-#ifdef CONFIG_NET_CLS_ACT
- __u16 tc_verd; /* traffic control verdict */
-#endif
#endif
union {
@@ -836,6 +841,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
#define SKB_DST_NOREF 1UL
#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
+#define SKB_NFCT_PTRMASK ~(7UL)
/**
* skb_dst - returns skb dst_entry
* @skb: buffer
@@ -2178,6 +2184,11 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
return skb->head + skb->mac_header;
}
+static inline int skb_mac_offset(const struct sk_buff *skb)
+{
+ return skb_mac_header(skb) - skb->data;
+}
+
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != (typeof(skb->mac_header))~0U;
@@ -3553,6 +3564,15 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
skb->csum = csum_add(skb->csum, delta);
}
+static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
+#else
+ return NULL;
+#endif
+}
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -3581,8 +3601,8 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
static inline void nf_reset(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- nf_conntrack_put(skb->nfct);
- skb->nfct = NULL;
+ nf_conntrack_put(skb_nfct(skb));
+ skb->_nfct = 0;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(skb->nf_bridge);
@@ -3602,10 +3622,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- dst->nfct = src->nfct;
- nf_conntrack_get(src->nfct);
- if (copy)
- dst->nfctinfo = src->nfctinfo;
+ dst->_nfct = src->_nfct;
+ nf_conntrack_get(skb_nfct(src));
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dst->nf_bridge = src->nf_bridge;
@@ -3620,7 +3638,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
- nf_conntrack_put(dst->nfct);
+ nf_conntrack_put(skb_nfct(dst));
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(dst->nf_bridge);
@@ -3652,9 +3670,7 @@ static inline bool skb_irq_freeable(const struct sk_buff *skb)
#if IS_ENABLED(CONFIG_XFRM)
!skb->sp &&
#endif
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- !skb->nfct &&
-#endif
+ !skb_nfct(skb) &&
!skb->_skb_refdst &&
!skb_has_frag_list(skb);
}
@@ -3689,6 +3705,16 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
+static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
+{
+ skb->dst_pending_confirm = val;
+}
+
+static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
+{
+ return skb->dst_pending_confirm != 0;
+}
+
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index 7b88697929e9..b8478ee7a71f 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -1,7 +1,7 @@
#ifndef __QCOM_SMEM_STATE__
#define __QCOM_SMEM_STATE__
-#include <linux/errno.h>
+#include <linux/err.h>
struct device_node;
struct qcom_smem_state;
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 35cb9264e0d5..2b7882666ef6 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -41,6 +41,8 @@
#define KNAV_DMA_DESC_RETQ_SHIFT 0
#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
+#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4)
+#define KNAV_DMA_DESC_EFLAGS_SHIFT 20
#define KNAV_DMA_NUM_EPIB_WORDS 4
#define KNAV_DMA_NUM_PS_WORDS 16
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b5cc5a6d7011..082027457825 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -92,9 +92,9 @@ struct cmsghdr {
#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
-#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr))))
-#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len))
-#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len))
+#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr)))
+#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len))
+#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len))
#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \
(struct cmsghdr *)(ctl) : \
@@ -202,8 +202,12 @@ struct ucred {
#define AF_VSOCK 40 /* vSockets */
#define AF_KCM 41 /* Kernel Connection Multiplexor*/
#define AF_QIPCRTR 42 /* Qualcomm IPC Router */
+#define AF_SMC 43 /* smc sockets: reserve number for
+ * PF_SMC protocol family that
+ * reuses AF_INET address family
+ */
-#define AF_MAX 43 /* For now.. */
+#define AF_MAX 44 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -251,6 +255,7 @@ struct ucred {
#define PF_VSOCK AF_VSOCK
#define PF_KCM AF_KCM
#define PF_QIPCRTR AF_QIPCRTR
+#define PF_SMC AF_SMC
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 266dab9ad782..fc273e9d5f67 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -103,7 +103,6 @@ struct stmmac_axi {
u32 axi_wr_osr_lmt;
u32 axi_rd_osr_lmt;
bool axi_kbbe;
- bool axi_axi_all;
u32 axi_blen[AXI_BLEN];
bool axi_fb;
bool axi_mb;
@@ -135,13 +134,18 @@ struct plat_stmmacenet_data {
int tx_fifo_size;
int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
void *bsp_priv;
+ struct clk *stmmac_clk;
+ struct clk *pclk;
+ struct clk *clk_ptp_ref;
+ unsigned int clk_ptp_rate;
+ struct reset_control *stmmac_rst;
struct stmmac_axi *axi;
int has_gmac4;
bool tso_en;
int mac_port_sel_speed;
+ bool en_tx_lpi_clockgating;
};
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index c93f4b3a59cb..cfc2d9506ce8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -212,6 +212,8 @@ struct tcp_sock {
/* Information of the most recently (s)acked skb */
struct tcp_rack {
struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+ u32 rtt_us; /* Associated RTT */
+ u32 end_seq; /* Ending TCP sequence of the skb */
u8 advanced; /* mstamp advanced since last lost marking */
u8 reord; /* reordering detected */
} rack;
@@ -220,15 +222,15 @@ struct tcp_sock {
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
- unused:5;
+ fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
+ unused:4;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
- thin_dupack : 1,/* Fast retransmit on first dupack */
+ unused1 : 1,
repair : 1,
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
- u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
- syn_data:1, /* SYN includes data */
+ u8 syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
@@ -310,7 +312,6 @@ struct tcp_sock {
*/
int lost_cnt_hint;
- u32 retransmit_high; /* L-bits may be on up to this seqno */
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
@@ -444,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
+static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
+{
+ /* We use READ_ONCE() here because socket might not be locked.
+ * This happens for listeners.
+ */
+ u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
+
+ return (user_mss && user_mss < mss) ? user_mss : mss;
+}
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index be007610ceb0..0f165507495c 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -33,7 +33,8 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
unsigned int bitmask_size);
const char *trace_print_hex_seq(struct trace_seq *p,
- const unsigned char *buf, int len);
+ const unsigned char *buf, int len,
+ bool concatenate);
const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 2d095fc60204..4dff73a89758 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -19,6 +19,30 @@
#include <uapi/linux/uuid.h>
/*
+ * V1 (time-based) UUID definition [RFC 4122].
+ * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
+ * increments since midnight 15th October 1582
+ * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
+ * time
+ * - the clock sequence is a 14-bit counter to avoid duplicate times
+ */
+struct uuid_v1 {
+ __be32 time_low; /* low part of timestamp */
+ __be16 time_mid; /* mid part of timestamp */
+ __be16 time_hi_and_version; /* high part of timestamp and version */
+#define UUID_TO_UNIX_TIME 0x01b21dd213814000ULL
+#define UUID_TIMEHI_MASK 0x0fff
+#define UUID_VERSION_TIME 0x1000 /* time-based UUID */
+#define UUID_VERSION_NAME 0x3000 /* name-based UUID */
+#define UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */
+ u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
+#define UUID_CLOCKHI_MASK 0x3f
+#define UUID_VARIANT_STD 0x80
+ u8 clock_seq_low; /* clock seq low */
+ u8 node[6]; /* spatially unique node ID (MAC addr) */
+};
+
+/*
* The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
* not including trailing NUL.
*/
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d5eb5479a425..04b0d3f95043 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -132,12 +132,16 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev)
return container_of(_dev, struct virtio_device, dev);
}
+void virtio_add_status(struct virtio_device *dev, unsigned int status);
int register_virtio_device(struct virtio_device *dev);
void unregister_virtio_device(struct virtio_device *dev);
void virtio_break_device(struct virtio_device *dev);
void virtio_config_changed(struct virtio_device *dev);
+void virtio_config_disable(struct virtio_device *dev);
+void virtio_config_enable(struct virtio_device *dev);
+int virtio_finalize_features(struct virtio_device *dev);
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);