summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bcma/bcma_soc.h2
-rw-r--r--include/linux/clocksource.h102
-rw-r--r--include/linux/etherdevice.h4
-rw-r--r--include/linux/fec.h1
-rw-r--r--include/linux/ieee80211.h27
-rw-r--r--include/linux/if_bridge.h18
-rw-r--r--include/linux/if_vlan.h14
-rw-r--r--include/linux/ipv6.h13
-rw-r--r--include/linux/list_nulls.h6
-rw-r--r--include/linux/mlx4/cmd.h16
-rw-r--r--include/linux/mlx4/device.h59
-rw-r--r--include/linux/mlx4/driver.h19
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mmc/sdio_ids.h6
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h53
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/platform_data/irda-sa11x0.h20
-rw-r--r--include/linux/platform_data/st21nfca.h2
-rw-r--r--include/linux/platform_data/st21nfcb.h4
-rw-r--r--include/linux/rhashtable.h308
-rw-r--r--include/linux/skbuff.h42
-rw-r--r--include/linux/socket.h7
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/timecounter.h139
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/vmw_vmci_api.h2
31 files changed, 650 insertions, 269 deletions
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index f24d245f8394..1b5fc0c3b1b5 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -5,8 +5,6 @@
struct bcma_soc {
struct bcma_bus bus;
- struct bcma_device core_cc;
- struct bcma_device core_mips;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index abcafaa20b86..9c78d15d33e4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -18,8 +18,6 @@
#include <asm/div64.h>
#include <asm/io.h>
-/* clocksource cycle base type */
-typedef u64 cycle_t;
struct clocksource;
struct module;
@@ -28,106 +26,6 @@ struct module;
#endif
/**
- * struct cyclecounter - hardware abstraction for a free running counter
- * Provides completely state-free accessors to the underlying hardware.
- * Depending on which hardware it reads, the cycle counter may wrap
- * around quickly. Locking rules (if necessary) have to be defined
- * by the implementor and user of specific instances of this API.
- *
- * @read: returns the current cycle value
- * @mask: bitmask for two's complement
- * subtraction of non 64 bit counters,
- * see CLOCKSOURCE_MASK() helper macro
- * @mult: cycle to nanosecond multiplier
- * @shift: cycle to nanosecond divisor (power of two)
- */
-struct cyclecounter {
- cycle_t (*read)(const struct cyclecounter *cc);
- cycle_t mask;
- u32 mult;
- u32 shift;
-};
-
-/**
- * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
- * Contains the state needed by timecounter_read() to detect
- * cycle counter wrap around. Initialize with
- * timecounter_init(). Also used to convert cycle counts into the
- * corresponding nanosecond counts with timecounter_cyc2time(). Users
- * of this code are responsible for initializing the underlying
- * cycle counter hardware, locking issues and reading the time
- * more often than the cycle counter wraps around. The nanosecond
- * counter will only wrap around after ~585 years.
- *
- * @cc: the cycle counter used by this instance
- * @cycle_last: most recent cycle counter value seen by
- * timecounter_read()
- * @nsec: continuously increasing count
- */
-struct timecounter {
- const struct cyclecounter *cc;
- cycle_t cycle_last;
- u64 nsec;
-};
-
-/**
- * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @cc: Pointer to cycle counter.
- * @cycles: Cycles
- *
- * XXX - This could use some mult_lxl_ll() asm optimization. Same code
- * as in cyc2ns, but with unsigned result.
- */
-static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
- cycle_t cycles)
-{
- u64 ret = (u64)cycles;
- ret = (ret * cc->mult) >> cc->shift;
- return ret;
-}
-
-/**
- * timecounter_init - initialize a time counter
- * @tc: Pointer to time counter which is to be initialized/reset
- * @cc: A cycle counter, ready to be used.
- * @start_tstamp: Arbitrary initial time stamp.
- *
- * After this call the current cycle register (roughly) corresponds to
- * the initial time stamp. Every call to timecounter_read() increments
- * the time stamp counter by the number of elapsed nanoseconds.
- */
-extern void timecounter_init(struct timecounter *tc,
- const struct cyclecounter *cc,
- u64 start_tstamp);
-
-/**
- * timecounter_read - return nanoseconds elapsed since timecounter_init()
- * plus the initial time stamp
- * @tc: Pointer to time counter.
- *
- * In other words, keeps track of time since the same epoch as
- * the function which generated the initial time stamp.
- */
-extern u64 timecounter_read(struct timecounter *tc);
-
-/**
- * timecounter_cyc2time - convert a cycle counter to same
- * time base as values returned by
- * timecounter_read()
- * @tc: Pointer to time counter.
- * @cycle_tstamp: a value returned by tc->cc->read()
- *
- * Cycle counts that are converted correctly as long as they
- * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
- * with "max cycle count" == cs->mask+1.
- *
- * This allows conversion of cycle counter values which were generated
- * in the past.
- */
-extern u64 timecounter_cyc2time(struct timecounter *tc,
- cycle_t cycle_tstamp);
-
-/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 41c891d05f04..1d869d185a0d 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb);
+int eth_gro_complete(struct sk_buff *skb, int nhoff);
+
/* Reserved Ethernet Addresses per IEEE 802.1Q */
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
diff --git a/include/linux/fec.h b/include/linux/fec.h
index bcff455d1d53..1454a503622d 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -19,6 +19,7 @@
struct fec_platform_data {
phy_interface_t phy;
unsigned char mac[ETH_ALEN];
+ void (*sleep_mode_enable)(int enabled);
};
#endif
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 4f4eea8a6288..b9c7897dc566 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1017,6 +1017,15 @@ struct ieee80211_mmie {
u8 mic[8];
} __packed;
+/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */
+struct ieee80211_mmie_16 {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[16];
+} __packed;
+
struct ieee80211_vendor_ie {
u8 element_id;
u8 len;
@@ -1994,9 +2003,15 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
WLAN_KEY_LEN_WEP104 = 13,
WLAN_KEY_LEN_CCMP = 16,
+ WLAN_KEY_LEN_CCMP_256 = 32,
WLAN_KEY_LEN_TKIP = 32,
WLAN_KEY_LEN_AES_CMAC = 16,
WLAN_KEY_LEN_SMS4 = 32,
+ WLAN_KEY_LEN_GCMP = 16,
+ WLAN_KEY_LEN_GCMP_256 = 32,
+ WLAN_KEY_LEN_BIP_CMAC_256 = 32,
+ WLAN_KEY_LEN_BIP_GMAC_128 = 16,
+ WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
#define IEEE80211_WEP_IV_LEN 4
@@ -2004,9 +2019,16 @@ enum ieee80211_key_len {
#define IEEE80211_CCMP_HDR_LEN 8
#define IEEE80211_CCMP_MIC_LEN 8
#define IEEE80211_CCMP_PN_LEN 6
+#define IEEE80211_CCMP_256_HDR_LEN 8
+#define IEEE80211_CCMP_256_MIC_LEN 16
+#define IEEE80211_CCMP_256_PN_LEN 6
#define IEEE80211_TKIP_IV_LEN 8
#define IEEE80211_TKIP_ICV_LEN 4
#define IEEE80211_CMAC_PN_LEN 6
+#define IEEE80211_GMAC_PN_LEN 6
+#define IEEE80211_GCMP_HDR_LEN 8
+#define IEEE80211_GCMP_MIC_LEN 16
+#define IEEE80211_GCMP_PN_LEN 6
/* Public action codes */
enum ieee80211_pub_actioncode {
@@ -2230,6 +2252,11 @@ enum ieee80211_sa_query_action {
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
+#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
+#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
#define WLAN_CIPHER_SUITE_SMS4 0x00147201
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0a8ce762a47f..a57bca2ea97e 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -50,24 +50,6 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use
typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook;
-#if IS_ENABLED(CONFIG_BRIDGE)
-int br_fdb_external_learn_add(struct net_device *dev,
- const unsigned char *addr, u16 vid);
-int br_fdb_external_learn_del(struct net_device *dev,
- const unsigned char *addr, u16 vid);
-#else
-static inline int br_fdb_external_learn_add(struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- return 0;
-}
-static inline int br_fdb_external_learn_del(struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- return 0;
-}
-#endif
-
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 960e666c51e4..b11b28a30b9e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb_vlan_tag_get(skb));
if (likely(skb))
skb->vlan_tci = 0;
return skb;
@@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
*/
static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
{
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
@@ -442,8 +442,8 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
- if (vlan_tx_tag_present(skb)) {
- *vlan_tci = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ *vlan_tci = skb_vlan_tag_get(skb);
return 0;
} else {
*vlan_tci = 0;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c694e7baa621..4d5169f5d7d1 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -52,6 +52,7 @@ struct ipv6_devconf {
__s32 force_tllao;
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
+ __s32 accept_ra_mtu;
void *sysctl;
};
@@ -124,6 +125,12 @@ struct ipv6_mc_socklist;
struct ipv6_ac_socklist;
struct ipv6_fl_socklist;
+struct inet6_cork {
+ struct ipv6_txoptions *opt;
+ u8 hop_limit;
+ u8 tclass;
+};
+
/**
* struct ipv6_pinfo - ipv6 private area
*
@@ -216,11 +223,7 @@ struct ipv6_pinfo {
struct ipv6_txoptions *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
- struct {
- struct ipv6_txoptions *opt;
- u8 hop_limit;
- u8 tclass;
- } cork;
+ struct inet6_cork cork;
};
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 5d10ae364b5e..f266661d2666 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -1,6 +1,9 @@
#ifndef _LINUX_LIST_NULLS_H
#define _LINUX_LIST_NULLS_H
+#include <linux/poison.h>
+#include <linux/const.h>
+
/*
* Special version of lists, where end of list is not a NULL pointer,
* but a 'nulls' marker, which can have many different values.
@@ -21,8 +24,9 @@ struct hlist_nulls_head {
struct hlist_nulls_node {
struct hlist_nulls_node *next, **pprev;
};
+#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
- ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
+ ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
/**
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 64d25941b329..7b6d4e9ff603 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -71,6 +71,7 @@ enum {
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+ MLX4_CMD_VIRT_PORT_MAP = 0x5c,
MLX4_CMD_GET_OP_REQ = 0x59,
/* TPT commands */
@@ -165,9 +166,15 @@ enum {
};
enum {
- MLX4_CMD_TIME_CLASS_A = 10000,
- MLX4_CMD_TIME_CLASS_B = 10000,
- MLX4_CMD_TIME_CLASS_C = 10000,
+ MLX4_CMD_TIME_CLASS_A = 60000,
+ MLX4_CMD_TIME_CLASS_B = 60000,
+ MLX4_CMD_TIME_CLASS_C = 60000,
+};
+
+enum {
+ /* virtual to physical port mapping opcode modifiers */
+ MLX4_GET_PORT_VIRT2PHY = 0x0,
+ MLX4_SET_PORT_VIRT2PHY = 0x1,
};
enum {
@@ -279,6 +286,8 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
struct mlx4_config_dev_params *params);
+void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
+void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev);
/*
* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
@@ -288,5 +297,6 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
u16 *vlan, u8 *qos);
#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17)
#endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 5f3a9aa7225d..c116cb02475c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -42,7 +42,7 @@
#include <linux/atomic.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
@@ -70,6 +70,7 @@ enum {
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
+ MLX4_FLAG_BONDED = 1 << 7
};
enum {
@@ -200,7 +201,9 @@ enum {
MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
- MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
+ MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
+ MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
+ MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21
};
enum {
@@ -208,6 +211,10 @@ enum {
MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
};
+enum {
+ MLX4_VF_CAP_FLAG_RESET = 1 << 0
+};
+
/* bit enums for an 8-bit flags field indicating special use
* QPs which require special handling in qp_reserve_range.
* Currently, this only includes QPs used by the ETH interface,
@@ -248,9 +255,14 @@ enum {
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
+ MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
};
+enum {
+ MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP
+};
+
enum mlx4_event {
MLX4_EVENT_TYPE_COMP = 0x00,
MLX4_EVENT_TYPE_PATH_MIG = 0x01,
@@ -276,6 +288,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
+ MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
MLX4_EVENT_TYPE_NONE = 0xff,
};
@@ -285,6 +298,11 @@ enum {
};
enum {
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
+};
+
+enum {
MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
};
@@ -411,6 +429,16 @@ enum {
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
};
+enum {
+ MLX4_DEVICE_STATE_UP = 1 << 0,
+ MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
+};
+
+enum {
+ MLX4_INTERFACE_STATE_UP = 1 << 0,
+ MLX4_INTERFACE_STATE_DELETION = 1 << 1,
+};
+
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
@@ -535,6 +563,7 @@ struct mlx4_caps {
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
+ u32 vf_caps;
};
struct mlx4_buf_list {
@@ -744,8 +773,23 @@ struct mlx4_vf_dev {
u8 n_ports;
};
-struct mlx4_dev {
+struct mlx4_dev_persistent {
struct pci_dev *pdev;
+ struct mlx4_dev *dev;
+ int nvfs[MLX4_MAX_PORTS + 1];
+ int num_vfs;
+ enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1];
+ enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1];
+ struct work_struct catas_work;
+ struct workqueue_struct *catas_wq;
+ struct mutex device_state_mutex; /* protect HW state */
+ u8 state;
+ struct mutex interface_state_mutex; /* protect SW state */
+ u8 interface_state;
+};
+
+struct mlx4_dev {
+ struct mlx4_dev_persistent *persist;
unsigned long flags;
unsigned long num_slaves;
struct mlx4_caps caps;
@@ -754,13 +798,11 @@ struct mlx4_dev {
struct radix_tree_root qp_table_tree;
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
- int num_vfs;
int numa_node;
int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs;
- int nvfs[MLX4_MAX_PORTS + 1];
};
struct mlx4_eqe {
@@ -832,6 +874,11 @@ struct mlx4_eqe {
} __packed tbl_change_info;
} params;
} __packed port_mgmt_change;
+ struct {
+ u8 reserved[3];
+ u8 port;
+ u32 reserved1[5];
+ } __packed bad_cable;
} event;
u8 slave_id;
u8 reserved3[2];
@@ -1338,6 +1385,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 022055c8fb26..9553a73d2049 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -49,6 +49,10 @@ enum mlx4_dev_event {
MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
};
+enum {
+ MLX4_INTFF_BONDING = 1 << 0
+};
+
struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context);
@@ -57,11 +61,26 @@ struct mlx4_interface {
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
struct list_head list;
enum mlx4_protocol protocol;
+ int flags;
};
int mlx4_register_interface(struct mlx4_interface *intf);
void mlx4_unregister_interface(struct mlx4_interface *intf);
+int mlx4_bond(struct mlx4_dev *dev);
+int mlx4_unbond(struct mlx4_dev *dev);
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+struct mlx4_port_map {
+ u8 port1;
+ u8 port2;
+};
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
static inline u64 mlx4_mac_to_u64(u8 *addr)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 467ccdf94c98..2bbc62aa818a 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -96,6 +96,7 @@ enum {
MLX4_QP_BIT_RRE = 1 << 15,
MLX4_QP_BIT_RWE = 1 << 14,
MLX4_QP_BIT_RAE = 1 << 13,
+ MLX4_QP_BIT_FPP = 1 << 3,
MLX4_QP_BIT_RIC = 1 << 4,
};
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0f01fe065424..996807963716 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -24,13 +24,15 @@
* Vendors and devices. Sort key: vendor first, device next.
*/
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
+#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
+#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
-#define SDIO_DEVICE_ID_BROADCOM_43362 43362
+#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_VENDOR_ID_INTEL 0x0089
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 8e30685affeb..7d59dc6ab789 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,6 +66,7 @@ enum {
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
+ NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
/*
* Add your fresh new feature above and remember to update
@@ -124,6 +125,7 @@ enum {
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -159,7 +161,9 @@ enum {
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
- NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
+ NETIF_F_HW_SWITCH_OFFLOAD)
+
/*
* If one device doesn't support one of these features, then disable it
* for all in netdev_increment_features.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 52fd8e8694cf..ce784d5018e0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -51,6 +51,7 @@
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
#include <uapi/linux/netdevice.h>
+#include <uapi/linux/if_bonding.h>
struct netpoll_info;
struct device;
@@ -1154,13 +1155,15 @@ struct net_device_ops {
int idx);
int (*ndo_bridge_setlink)(struct net_device *dev,
- struct nlmsghdr *nlh);
+ struct nlmsghdr *nlh,
+ u16 flags);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask);
int (*ndo_bridge_dellink)(struct net_device *dev,
- struct nlmsghdr *nlh);
+ struct nlmsghdr *nlh,
+ u16 flags);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
@@ -1514,6 +1517,8 @@ struct net_device {
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
+ struct list_head ptype_all;
+ struct list_head ptype_specific;
struct {
struct list_head upper;
@@ -1969,7 +1974,7 @@ struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
+ struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
@@ -1979,10 +1984,21 @@ struct packet_offload {
struct list_head list;
};
+struct udp_offload;
+
+struct udp_offload_callbacks {
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff);
+ int (*gro_complete)(struct sk_buff *skb,
+ int nhoff,
+ struct udp_offload *uoff);
+};
+
struct udp_offload {
__be16 port;
u8 ipproto;
- struct offload_callbacks callbacks;
+ struct udp_offload_callbacks callbacks;
};
/* often modified stats are per cpu, other are shared (netdev->stats) */
@@ -2041,6 +2057,7 @@ struct pcpu_sw_netstats {
#define NETDEV_RESEND_IGMP 0x0016
#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
#define NETDEV_CHANGEINFODATA 0x0018
+#define NETDEV_BONDING_INFO 0x0019
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2303,6 +2320,21 @@ do { \
compute_pseudo(skb, proto)); \
} while (0)
+static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset)
+{
+ __wsum delta;
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+}
+
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -3464,6 +3496,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
netdev_features_t features);
+struct netdev_bonding_info {
+ ifslave slave;
+ ifbond master;
+};
+
+struct netdev_notifier_bonding_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct netdev_bonding_info bonding_info;
+};
+
+void netdev_bonding_info_change(struct net_device *dev,
+ struct netdev_bonding_info *bonding_info);
+
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 22af8f8f5802..685809835b5c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,6 +327,8 @@ struct phy_c45_device_ids {
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
+ * has_fixups: Set to true if this phy has fixups/quirks.
+ * suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* addr: Bus address of PHY
@@ -364,6 +366,7 @@ struct phy_device {
bool is_c45;
bool is_internal;
bool has_fixups;
+ bool suspended;
enum phy_state state;
@@ -565,6 +568,15 @@ struct phy_driver {
void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
int devnum, int regnum, u32 val);
+ /* Get the size and type of the eeprom contained within a plug-in
+ * module */
+ int (*module_info)(struct phy_device *dev,
+ struct ethtool_modinfo *modinfo);
+
+ /* Get the eeprom information from the plug-in module */
+ int (*module_eeprom)(struct phy_device *dev,
+ struct ethtool_eeprom *ee, u8 *data);
+
struct device_driver driver;
};
#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h
new file mode 100644
index 000000000000..38f77b5e56cf
--- /dev/null
+++ b/include/linux/platform_data/irda-sa11x0.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/include/asm/mach/irda.h
+ *
+ * Copyright (C) 2004 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_MACH_IRDA_H
+#define __ASM_ARM_MACH_IRDA_H
+
+struct irda_platform_data {
+ int (*startup)(struct device *);
+ void (*shutdown)(struct device *);
+ int (*set_power)(struct device *, unsigned int state);
+ void (*set_speed)(struct device *, unsigned int speed);
+};
+
+#endif
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 5087fff96d86..cc2bdafb0c69 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -26,6 +26,8 @@
struct st21nfca_nfc_platform_data {
unsigned int gpio_ena;
unsigned int irq_polarity;
+ bool is_ese_present;
+ bool is_uicc_present;
};
#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
index c3b432f5b63e..b023373d9874 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st21nfcb.h
@@ -19,8 +19,6 @@
#ifndef _ST21NFCB_NCI_H_
#define _ST21NFCB_NCI_H_
-#include <linux/i2c.h>
-
#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
struct st21nfcb_nfc_platform_data {
@@ -28,4 +26,4 @@ struct st21nfcb_nfc_platform_data {
unsigned int irq_polarity;
};
-#endif /* _ST21NFCA_HCI_H_ */
+#endif /* _ST21NFCB_NCI_H_ */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index b93fd89b2e5e..58851275fed9 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -18,16 +18,45 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
-#include <linux/rculist.h>
+#include <linux/compiler.h>
+#include <linux/list_nulls.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+/*
+ * The end of the chain is marked with a special nulls marks which has
+ * the following format:
+ *
+ * +-------+-----------------------------------------------------+-+
+ * | Base | Hash |1|
+ * +-------+-----------------------------------------------------+-+
+ *
+ * Base (4 bits) : Reserved to distinguish between multiple tables.
+ * Specified via &struct rhashtable_params.nulls_base.
+ * Hash (27 bits): Full hash (unmasked) of first element added to bucket
+ * 1 (1 bit) : Nulls marker (always set)
+ *
+ * The remaining bits of the next pointer remain unused for now.
+ */
+#define RHT_BASE_BITS 4
+#define RHT_HASH_BITS 27
+#define RHT_BASE_SHIFT RHT_HASH_BITS
struct rhash_head {
struct rhash_head __rcu *next;
};
-#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
-
+/**
+ * struct bucket_table - Table of hash buckets
+ * @size: Number of hash buckets
+ * @locks_mask: Mask to apply before accessing locks[]
+ * @locks: Array of spinlocks protecting individual buckets
+ * @buckets: size * hash buckets
+ */
struct bucket_table {
size_t size;
+ unsigned int locks_mask;
+ spinlock_t *locks;
struct rhash_head __rcu *buckets[];
};
@@ -45,11 +74,16 @@ struct rhashtable;
* @hash_rnd: Seed to use while hashing
* @max_shift: Maximum number of shifts while expanding
* @min_shift: Minimum number of shifts while shrinking
+ * @nulls_base: Base value to generate nulls marker
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key
* @obj_hashfn: Function to hash object
* @grow_decision: If defined, may return true if table should expand
* @shrink_decision: If defined, may return true if table should shrink
- * @mutex_is_held: Must return true if protecting mutex is held
+ *
+ * Note: when implementing the grow and shrink decision function, min/max
+ * shift must be enforced, otherwise, resizing watermarks they set may be
+ * useless.
*/
struct rhashtable_params {
size_t nelem_hint;
@@ -59,36 +93,95 @@ struct rhashtable_params {
u32 hash_rnd;
size_t max_shift;
size_t min_shift;
+ u32 nulls_base;
+ size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
bool (*grow_decision)(const struct rhashtable *ht,
size_t new_size);
bool (*shrink_decision)(const struct rhashtable *ht,
size_t new_size);
-#ifdef CONFIG_PROVE_LOCKING
- int (*mutex_is_held)(void *parent);
- void *parent;
-#endif
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
+ * @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
* @shift: Current size (1 << shift)
* @p: Configuration parameters
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @walkers: List of active walkers
+ * @being_destroyed: True if table is set up for destruction
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- size_t nelems;
- size_t shift;
+ struct bucket_table __rcu *future_tbl;
+ atomic_t nelems;
+ atomic_t shift;
struct rhashtable_params p;
+ struct work_struct run_work;
+ struct mutex mutex;
+ struct list_head walkers;
+ bool being_destroyed;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @resize: Resize event occured
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ bool resize;
};
+/**
+ * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhashtable_walker *walker;
+ unsigned int slot;
+ unsigned int skip;
+};
+
+static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
+{
+ return NULLS_MARKER(ht->p.nulls_base + hash);
+}
+
+#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
+ ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+
+static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr & 1);
+}
+
+static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr) >> 1;
+}
+
#ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
+int lockdep_rht_mutex_is_held(struct rhashtable *ht);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
#else
-static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
+{
+ return 1;
+}
+
+static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
+ u32 hash)
{
return 1;
}
@@ -96,13 +189,8 @@ static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
-
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev);
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
@@ -110,11 +198,23 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht);
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup(struct rhashtable *ht, const void *key);
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
bool (*compare)(void *, void *), void *arg);
-void rhashtable_destroy(const struct rhashtable *ht);
+bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
+bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
+ struct rhash_head *obj,
+ bool (*compare)(void *, void *),
+ void *arg);
+
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+void rhashtable_walk_exit(struct rhashtable_iter *iter);
+int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+
+void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -122,92 +222,146 @@ void rhashtable_destroy(const struct rhashtable *ht);
#define rht_dereference_rcu(p, ht) \
rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
-#define rht_entry(ptr, type, member) container_of(ptr, type, member)
-#define rht_entry_safe(ptr, type, member) \
-({ \
- typeof(ptr) __ptr = (ptr); \
- __ptr ? rht_entry(__ptr, type, member) : NULL; \
-})
+#define rht_dereference_bucket(p, tbl, hash) \
+ rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_dereference_bucket_rcu(p, tbl, hash) \
+ rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
-#define rht_next_entry_safe(pos, ht, member) \
-({ \
- pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \
- typeof(*(pos)), member) : NULL; \
-})
+#define rht_entry(tpos, pos, member) \
+ ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+
+/**
+ * rht_for_each_continue - continue iterating over hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ */
+#define rht_for_each_continue(pos, head, tbl, hash) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each - iterate over hash chain
- * @pos: &struct rhash_head to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
*/
-#define rht_for_each(pos, head, ht) \
- for (pos = rht_dereference(head, ht); \
- pos; \
- pos = rht_dereference((pos)->next, ht))
+#define rht_for_each(pos, tbl, hash) \
+ rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_continue - continue iterating over hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ */
+#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each_entry - iterate over hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*/
-#define rht_for_each_entry(pos, head, ht, member) \
- for (pos = rht_entry_safe(rht_dereference(head, ht), \
- typeof(*(pos)), member); \
- pos; \
- pos = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
+ tbl, hash, member)
/**
* rht_for_each_entry_safe - safely iterate over hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @n: type * to use for temporary next object storage
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @next: the &struct rhash_head to use as next in loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
-#define rht_for_each_entry_safe(pos, n, head, ht, member) \
- for (pos = rht_entry_safe(rht_dereference(head, ht), \
- typeof(*(pos)), member), \
- n = rht_next_entry_safe(pos, ht, member); \
- pos; \
- pos = n, \
- n = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
+ for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = next, \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL)
+
+/**
+ * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rcu_dereference_raw(pos->next))
/**
* rht_for_each_rcu - iterate over rcu hash chain
- * @pos: &struct rhash_head to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu(pos, tbl, hash) \
+ rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_rcu(pos, head, ht) \
- for (pos = rht_dereference_rcu(head, ht); \
- pos; \
- pos = rht_dereference_rcu((pos)->next, ht))
+#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
/**
* rht_for_each_entry_rcu - iterate over rcu hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu(pos, head, member) \
- for (pos = rht_entry_safe(rcu_dereference_raw(head), \
- typeof(*(pos)), member); \
- pos; \
- pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \
- typeof(*(pos)), member))
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+ tbl, hash, member)
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 85ab7d72b54c..111e665455c3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -626,8 +626,11 @@ struct sk_buff {
__u32 hash;
__be16 vlan_proto;
__u16 vlan_tci;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int napi_id;
+#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
+ union {
+ unsigned int napi_id;
+ unsigned int sender_cpu;
+ };
#endif
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
@@ -2484,19 +2487,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
}
static inline int skb_add_data(struct sk_buff *skb,
- char __user *from, int copy)
+ struct iov_iter *from, int copy)
{
const int off = skb->len;
if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
- copy, 0, &err);
- if (!err) {
+ __wsum csum = 0;
+ if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
+ &csum, from) == copy) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
- } else if (!copy_from_user(skb_put(skb, copy), from, copy))
+ } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
return 0;
__skb_trim(skb, off);
@@ -2693,8 +2695,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
- /* XXX: stripping const */
- return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
+ return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
}
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
@@ -3096,6 +3097,27 @@ do { \
compute_pseudo(skb, proto)); \
} while (0)
+/* Update skbuf and packet to reflect the remote checksum offload operation.
+ * When called, ptr indicates the starting point for skb->csum when
+ * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
+ * here, skb_postpull_rcsum is done so skb->csum start is ptr.
+ */
+static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset)
+{
+ __wsum delta;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ __skb_checksum_complete(skb);
+ skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
+ }
+
+ delta = remcsum_adjust(ptr, skb->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+}
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6e49a14365dc..5c19cba34dce 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -318,13 +318,6 @@ struct ucred {
/* IPX options */
#define IPX_TYPE 1
-extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
- struct iovec *iov,
- int offset,
- unsigned int len, __wsum *csump);
-extern unsigned long iov_pages(const struct iovec *iov, int offset,
- unsigned long nr_segs);
-
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 262ba4ef9a8e..3e18379dfa6f 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
+# define raw_spin_lock_bh_nested(lock, subclass) \
+ _raw_spin_lock_bh_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
@@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
+# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -324,6 +327,11 @@ do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
+#define spin_lock_bh_nested(lock, subclass) \
+do { \
+ raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
+} while (0)
+
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 42dfab89e740..5344268e6e62 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
void __lockfunc
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..d3afef9d8dbe 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -57,6 +57,7 @@
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
new file mode 100644
index 000000000000..4382035a75bb
--- /dev/null
+++ b/include/linux/timecounter.h
@@ -0,0 +1,139 @@
+/*
+ * linux/include/linux/timecounter.h
+ *
+ * based on code that migrated away from
+ * linux/include/linux/clocksource.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_TIMECOUNTER_H
+#define _LINUX_TIMECOUNTER_H
+
+#include <linux/types.h>
+
+/* simplify initialization of mask field */
+#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+
+/**
+ * struct cyclecounter - hardware abstraction for a free running counter
+ * Provides completely state-free accessors to the underlying hardware.
+ * Depending on which hardware it reads, the cycle counter may wrap
+ * around quickly. Locking rules (if necessary) have to be defined
+ * by the implementor and user of specific instances of this API.
+ *
+ * @read: returns the current cycle value
+ * @mask: bitmask for two's complement
+ * subtraction of non 64 bit counters,
+ * see CYCLECOUNTER_MASK() helper macro
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ */
+struct cyclecounter {
+ cycle_t (*read)(const struct cyclecounter *cc);
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+};
+
+/**
+ * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ * Contains the state needed by timecounter_read() to detect
+ * cycle counter wrap around. Initialize with
+ * timecounter_init(). Also used to convert cycle counts into the
+ * corresponding nanosecond counts with timecounter_cyc2time(). Users
+ * of this code are responsible for initializing the underlying
+ * cycle counter hardware, locking issues and reading the time
+ * more often than the cycle counter wraps around. The nanosecond
+ * counter will only wrap around after ~585 years.
+ *
+ * @cc: the cycle counter used by this instance
+ * @cycle_last: most recent cycle counter value seen by
+ * timecounter_read()
+ * @nsec: continuously increasing count
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: accumulated fractional nanoseconds
+ */
+struct timecounter {
+ const struct cyclecounter *cc;
+ cycle_t cycle_last;
+ u64 nsec;
+ u64 mask;
+ u64 frac;
+};
+
+/**
+ * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
+ * @cc: Pointer to cycle counter.
+ * @cycles: Cycles
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: pointer to storage for the fractional nanoseconds.
+ */
+static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
+ cycle_t cycles, u64 mask, u64 *frac)
+{
+ u64 ns = (u64) cycles;
+
+ ns = (ns * cc->mult) + *frac;
+ *frac = ns & mask;
+ return ns >> cc->shift;
+}
+
+/**
+ * timecounter_adjtime - Shifts the time of the clock.
+ * @delta: Desired change in nanoseconds.
+ */
+static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+ tc->nsec += delta;
+}
+
+/**
+ * timecounter_init - initialize a time counter
+ * @tc: Pointer to time counter which is to be initialized/reset
+ * @cc: A cycle counter, ready to be used.
+ * @start_tstamp: Arbitrary initial time stamp.
+ *
+ * After this call the current cycle register (roughly) corresponds to
+ * the initial time stamp. Every call to timecounter_read() increments
+ * the time stamp counter by the number of elapsed nanoseconds.
+ */
+extern void timecounter_init(struct timecounter *tc,
+ const struct cyclecounter *cc,
+ u64 start_tstamp);
+
+/**
+ * timecounter_read - return nanoseconds elapsed since timecounter_init()
+ * plus the initial time stamp
+ * @tc: Pointer to time counter.
+ *
+ * In other words, keeps track of time since the same epoch as
+ * the function which generated the initial time stamp.
+ */
+extern u64 timecounter_read(struct timecounter *tc);
+
+/**
+ * timecounter_cyc2time - convert a cycle counter to same
+ * time base as values returned by
+ * timecounter_read()
+ * @tc: Pointer to time counter.
+ * @cycle_tstamp: a value returned by tc->cc->read()
+ *
+ * Cycle counts that are converted correctly as long as they
+ * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
+ * with "max cycle count" == cs->mask+1.
+ *
+ * This allows conversion of cycle counter values which were generated
+ * in the past.
+ */
+extern u64 timecounter_cyc2time(struct timecounter *tc,
+ cycle_t cycle_tstamp);
+
+#endif
diff --git a/include/linux/types.h b/include/linux/types.h
index a0bb7048687f..62323825cff9 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -213,5 +213,8 @@ struct callback_head {
};
#define rcu_head callback_head
+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ee3277593222..247cfdcc4b08 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,11 +49,7 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
- convert_csum:1;/* On receive, convert checksum
- * unnecessary to checksum complete
- * if possible.
- */
+ no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
-static inline void udp_set_convert_csum(struct sock *sk, bool val)
-{
- udp_sk(sk)->convert_csum = val;
-}
-
-static inline bool udp_get_convert_csum(struct sock *sk)
-{
- return udp_sk(sk)->convert_csum;
-}
-
#define udp_portaddr_for_each_entry(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1c5e453f7ea9..3e0cb4ea3905 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -135,10 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
- int offset, int len);
-int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
- int offset, int len);
-
#endif
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 5691f752ce8f..63df3a2a8ce5 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
int mode);
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
- void *iov, size_t iov_size, int mode);
+ struct msghdr *msg, size_t iov_size, int mode);
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
struct msghdr *msg, size_t iov_size, int mode);
ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,