summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-12-20 21:53:28 +0300
committerDavid S. Miller <davem@davemloft.net>2018-12-20 22:53:36 +0300
commit2be09de7d6a06f58e768de1255a687c9aaa66606 (patch)
tree298f9e04caf105873d987e807eccba27710a49cc /drivers/net/ethernet/intel
parent44a7b3b6e3a458f9549c2cc28e74ecdc470e42f1 (diff)
parent1d51b4b1d3f2db0d6d144175e31a84e472fbd99a (diff)
downloadlinux-2be09de7d6a06f58e768de1255a687c9aaa66606.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of conflicts, by happily all cases of overlapping changes, parallel adds, things of that nature. Thanks to Stephen Rothwell, Saeed Mahameed, and others for their guidance in these resolutions. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c11
5 files changed, 32 insertions, 53 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index fbb21ac06c98..0b3bcb73d4bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1546,17 +1546,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
/* Copy the address first, so that we avoid a possible race with
- * .set_rx_mode(). If we copy after changing the address in the filter
- * list, we might open ourselves to a narrow race window where
- * .set_rx_mode could delete our dev_addr filter and prevent traffic
- * from passing.
+ * .set_rx_mode().
+ * - Remove old address from MAC filter
+ * - Copy new address
+ * - Add new address to MAC filter
*/
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
- i40e_add_mac_filter(vsi, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a0b1575468fc..a7e14e98889f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1559,24 +1559,6 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
@@ -1793,8 +1775,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* other fields within the skb.
**/
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb)
{
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
@@ -1802,6 +1783,8 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
+ u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -1812,6 +1795,13 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
skb_record_rx_queue(skb, rx_ring->queue_index);
+ if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(vlan_tag));
+ }
+
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
@@ -2350,8 +2340,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
- u8 rx_ptype;
u64 qword;
/* return some buffers to hardware, one at a time is too slow */
@@ -2444,18 +2432,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
-
/* populate checksum, VLAN, and protocol */
- i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- i40e_receive_skb(rx_ring, skb, vlan_tag);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb = NULL;
/* update budget accounting */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 09809dffe399..8af0e99c6c0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -12,10 +12,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
union i40e_rx_desc *rx_desc,
u64 qw);
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype);
-void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag);
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb);
void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
void i40e_update_rx_stats(struct i40e_ring *rx_ring,
unsigned int total_rx_bytes,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 433c8e688c78..870cf654e436 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -634,8 +634,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
struct i40e_rx_buffer *bi;
union i40e_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
- u8 rx_ptype;
u64 qword;
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
@@ -713,14 +711,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
total_rx_packets++;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- i40e_receive_skb(rx_ring, skb, vlan_tag);
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 5dacfc870259..345701af7749 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -700,7 +700,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
u8 num_tcs = adapter->hw_tcs;
u32 reg_val;
u32 queue;
- u32 word;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -758,6 +757,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
}
}
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 word;
+
/* Clear VF's mailbox memory */
for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -831,6 +838,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* reset the filters for the device */
ixgbe_vf_reset_event(adapter, vf);
+ ixgbe_vf_clear_mbx(adapter, vf);
+
/* set vf mac address */
if (!is_zero_ether_addr(vf_mac))
ixgbe_set_vf_mac(adapter, vf, vf_mac);