summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_alloc.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c45
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c91
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_osdep.h9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_register.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c5
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c311
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c378
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c337
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c377
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h197
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c251
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c15
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h43
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c163
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c142
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
65 files changed, 1633 insertions, 1753 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index bd7ef59b1f2e..771a3c909c45 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4198,7 +4198,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/**
* e1000e_trigger_lsc - trigger an LSC interrupt
- * @adapter:
+ * @adapter: board private structure
*
* Fire a link status change interrupt to start the watchdog.
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b847bd105b16..29ad1797adce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1788,12 +1788,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
- netdev_info(netdev, "already using mac address %pM\n",
- addr->sa_data);
- return 0;
- }
-
if (test_bit(__I40E_DOWN, pf->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return -EADDRNOTAVAIL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index cd7b52fb6b46..05ec1181471e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -582,7 +582,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
* @vsi: Current VSI
* @tx_ring: XDP Tx ring
*
- * Returns true if cleanup/tranmission is done.
+ * Returns true if cleanup/transmission is done.
**/
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
{
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 9abaff1f2aff..f80f2735e688 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -523,9 +523,6 @@ void iavf_schedule_request_stats(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
void iavf_update_stats(struct iavf_adapter *adapter);
-void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
-int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
@@ -579,17 +576,10 @@ void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
-int iavf_replace_primary_mac(struct iavf_adapter *adapter,
- const u8 *new_mac);
-void
-iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
- netdev_features_t prev_features,
- netdev_features_t features);
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
-int iavf_lock_timeout(struct mutex *lock, unsigned int msecs);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
index 2711573c14ec..162ea70685a6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_alloc.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
@@ -28,7 +28,6 @@ enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw,
struct iavf_dma_mem *mem);
enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
struct iavf_virt_mem *mem, u32 size);
-enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw,
- struct iavf_virt_mem *mem);
+void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem);
#endif /* _IAVF_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index dd11dbbd5551..1afd761d8052 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -35,7 +35,6 @@ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
status = IAVF_ERR_DEVICE_NOT_SUPPORTED;
}
- hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status);
return status;
}
@@ -398,23 +397,6 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
}
/**
- * iavf_aq_get_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * get the RSS lookup table, PF or VSI type
- **/
-enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
-{
- return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
- false);
-}
-
-/**
* iavf_aq_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
@@ -473,19 +455,6 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
}
/**
- * iavf_aq_get_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- **/
-enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
- struct iavf_aqc_get_set_rss_key_data *key)
-{
- return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
-}
-
-/**
* iavf_aq_set_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
@@ -828,17 +797,3 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw,
vsi_res++;
}
}
-
-/**
- * iavf_vf_reset
- * @hw: pointer to the hardware structure
- *
- * Send a VF_RESET message to the PF. Does not wait for response from PF
- * as none will be forthcoming. Immediately after calling this function,
- * the admin queue should be shut down and (optionally) reinitialized.
- **/
-enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
-{
- return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
- 0, NULL, 0, NULL);
-}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2de4baff4c20..a483eb185c99 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -192,12 +192,11 @@ enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
}
/**
- * iavf_free_dma_mem_d - OS specific memory free for shared code
+ * iavf_free_dma_mem - wrapper for DMA memory freeing
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
- struct iavf_dma_mem *mem)
+enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem)
{
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
@@ -209,13 +208,13 @@ enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
}
/**
- * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * iavf_allocate_virt_mem - virt memory alloc wrapper
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
**/
-enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
- struct iavf_virt_mem *mem, u32 size)
+enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem, u32 size)
{
if (!mem)
return IAVF_ERR_PARAM;
@@ -230,20 +229,13 @@ enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
}
/**
- * iavf_free_virt_mem_d - OS specific memory free for shared code
+ * iavf_free_virt_mem - virt memory free wrapper
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
- struct iavf_virt_mem *mem)
+void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
{
- if (!mem)
- return IAVF_ERR_PARAM;
-
- /* it's ok to kfree a NULL pointer */
kfree(mem->va);
-
- return 0;
}
/**
@@ -253,7 +245,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
*
* Returns 0 on success, negative on failure
**/
-int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
{
unsigned int wait, delay = 10;
@@ -359,21 +351,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_enable_queues - Enable interrupt for specified queues
+ * iavf_irq_enable_queues - Enable interrupt for all queues
* @adapter: board private structure
- * @mask: bitmap of queues to enable
**/
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
+static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i - 1)) {
- wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
- IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
- IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
- }
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+ IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
@@ -387,7 +376,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
struct iavf_hw *hw = &adapter->hw;
iavf_misc_irq_enable(adapter);
- iavf_irq_enable_queues(adapter, ~0);
+ iavf_irq_enable_queues(adapter);
if (flush)
iavf_flush(hw);
@@ -1006,44 +995,40 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
*
* Do not call this with mac_vlan_list_lock!
**/
-int iavf_replace_primary_mac(struct iavf_adapter *adapter,
- const u8 *new_mac)
+static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac)
{
struct iavf_hw *hw = &adapter->hw;
- struct iavf_mac_filter *f;
+ struct iavf_mac_filter *new_f;
+ struct iavf_mac_filter *old_f;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- list_for_each_entry(f, &adapter->mac_filter_list, list) {
- f->is_primary = false;
+ new_f = iavf_add_filter(adapter, new_mac);
+ if (!new_f) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return -ENOMEM;
}
- f = iavf_find_filter(adapter, hw->mac.addr);
- if (f) {
- f->remove = true;
+ old_f = iavf_find_filter(adapter, hw->mac.addr);
+ if (old_f) {
+ old_f->is_primary = false;
+ old_f->remove = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
-
- f = iavf_add_filter(adapter, new_mac);
-
- if (f) {
- /* Always send the request to add if changing primary MAC
- * even if filter is already present on the list
- */
- f->is_primary = true;
- f->add = true;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
- ether_addr_copy(hw->mac.addr, new_mac);
- }
+ /* Always send the request to add if changing primary MAC,
+ * even if filter is already present on the list
+ */
+ new_f->is_primary = true;
+ new_f->add = true;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
+ ether_addr_copy(hw->mac.addr, new_mac);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* schedule the watchdog task to immediately process the request */
- if (f) {
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
- return 0;
- }
- return -ENOMEM;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ return 0;
}
/**
@@ -1866,7 +1851,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
* @adapter: board private structure
*
**/
-void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
+static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
{
if (!adapter->msix_entries)
return;
@@ -1881,7 +1866,7 @@ void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
* @adapter: board private structure to initialize
*
**/
-int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
+static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
{
int err;
@@ -2179,7 +2164,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
* the watchdog if any changes are requested to expedite the request via
* virtchnl.
**/
-void
+static void
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
netdev_features_t prev_features,
netdev_features_t features)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
index a452ce90679a..77d33deaabb5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_osdep.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
@@ -13,12 +13,6 @@
/* get readq/writeq support for 32 bit kernels, use the low-first version */
#include <linux/io-64-nonatomic-lo-hi.h>
-/* File to be the magic between shared code and
- * actual OS primitives
- */
-
-#define hw_dbg(hw, S, A...) do {} while (0)
-
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
@@ -35,14 +29,11 @@ struct iavf_dma_mem {
#define iavf_allocate_dma_mem(h, m, unused, s, a) \
iavf_allocate_dma_mem_d(h, m, s, a)
-#define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)
struct iavf_virt_mem {
void *va;
u32 size;
};
-#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
-#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
#define iavf_debug(h, m, s, ...) \
do { \
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index edebfbbcffdc..940cb4203fbe 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -40,12 +40,8 @@ enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
-enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
-enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,
- struct iavf_aqc_get_set_rss_key_data *key);
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
@@ -60,7 +56,6 @@ static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
-enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
enum virtchnl_ops v_opcode,
enum iavf_status v_retval,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
index bf793332fc9d..a19e88898a0b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_register.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
@@ -40,7 +40,7 @@
#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index e989feda133c..8c5f6096b002 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -54,7 +54,7 @@ static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
* iavf_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
**/
-void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
+static void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
{
unsigned long bi_size;
u16 i;
@@ -110,7 +110,7 @@ void iavf_free_tx_resources(struct iavf_ring *tx_ring)
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
+static u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
@@ -128,6 +128,24 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
}
/**
+ * iavf_force_wb - Issue SW Interrupt so HW does a wb
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ **/
+static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
+{
+ u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+ IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
+ /* allow 00 to be written to the index */;
+
+ wr32(&vsi->back->hw,
+ IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
+ val);
+}
+
+/**
* iavf_detect_recover_hung - Function to detect and recover hung_queues
* @vsi: pointer to vsi struct with tx queues
*
@@ -352,25 +370,6 @@ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
q_vector->arm_wb_state = true;
}
-/**
- * iavf_force_wb - Issue SW Interrupt so HW does a wb
- * @vsi: the VSI we care about
- * @q_vector: the vector on which to force writeback
- *
- **/
-void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
-{
- u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
- IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
- IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
- /* allow 00 to be written to the index */;
-
- wr32(&vsi->back->hw,
- IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
- val);
-}
-
static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
struct iavf_ring_container *rc)
{
@@ -687,7 +686,7 @@ err:
* iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
-void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
+static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
unsigned long bi_size;
u16 i;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 2624bf6d009e..7e6ee32d19b6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -442,15 +442,11 @@ static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
-void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
void iavf_free_tx_resources(struct iavf_ring *tx_ring);
void iavf_free_rx_resources(struct iavf_ring *rx_ring);
int iavf_napi_poll(struct napi_struct *napi, int budget);
-void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
-u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
void iavf_detect_recover_hung(struct iavf_vsi *vsi);
int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
bool __iavf_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 9afbbdac3590..7c0578b5457b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features(adapter, 0,
- netdev->features);
-
iavf_set_queue_vlan_tag_loc(adapter);
was_mac_changed = !ether_addr_equal(netdev->dev_addr,
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 5d89392f969b..817977e3039d 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -18,6 +18,7 @@ ice-y := ice_main.o \
ice_txrx_lib.o \
ice_txrx.o \
ice_fltr.o \
+ ice_irq.o \
ice_pf_vsi_vlan_ops.o \
ice_vsi_vlan_ops.o \
ice_vsi_vlan_lib.o \
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index aa32111afd6e..4ba3d99439a0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -32,6 +32,7 @@
#include <linux/pkt_sched.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
+#include <linux/linkmode.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/auxiliary_bus.h>
@@ -74,6 +75,7 @@
#include "ice_lag.h"
#include "ice_vsi_vlan_ops.h"
#include "ice_gnss.h"
+#include "ice_irq.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -103,11 +105,6 @@
#define ICE_Q_WAIT_RETRY_LIMIT 10
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256
-#define ICE_RES_VALID_BIT 0x8000
-#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
-#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
-/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
-#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
@@ -245,12 +242,6 @@ struct ice_tc_cfg {
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
-struct ice_res_tracker {
- u16 num_entries;
- u16 end;
- u16 list[];
-};
-
struct ice_qs_cfg {
struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
unsigned long *pf_map;
@@ -348,7 +339,9 @@ struct ice_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
u16 num_q_vectors;
- u16 base_vector; /* IRQ base for OS reserved vectors */
+ /* tell if only dynamic irq allocation is allowed */
+ bool irq_dyn_alloc;
+
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -479,6 +472,7 @@ struct ice_q_vector {
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
+ struct msi_map irq;
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
@@ -514,6 +508,12 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */
};
+enum ice_misc_thread_tasks {
+ ICE_MISC_THREAD_EXTTS_EVENT,
+ ICE_MISC_THREAD_TX_TSTAMP,
+ ICE_MISC_THREAD_NBITS /* must be last */
+};
+
struct ice_switchdev_info {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
@@ -539,7 +539,7 @@ struct ice_pf {
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
- struct ice_res_tracker *irq_tracker;
+ struct ice_irq_tracker irq_tracker;
/* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
* number of MSIX vectors needed for all SR-IOV VFs from the number of
* MSIX vectors allowed on this PF.
@@ -556,6 +556,7 @@ struct ice_pf {
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
unsigned long serv_tmr_period;
@@ -583,8 +584,7 @@ struct ice_pf {
u32 hw_csum_rx_error;
u32 oicr_err_reg;
- u16 oicr_idx; /* Other interrupt cause MSIX vector index */
- u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
u16 num_lan_msix; /* Total MSIX vectors for base driver */
@@ -670,7 +670,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
- ((struct ice_pf *)hw->back)->oicr_idx;
+ ((struct ice_pf *)hw->back)->oicr_irq.index;
int itr = ICE_ITR_NONE;
u32 val;
@@ -821,25 +821,6 @@ static inline bool ice_is_switchdev_running(struct ice_pf *pf)
return pf->switchdev.is_running;
}
-/**
- * ice_set_sriov_cap - enable SRIOV in PF flags
- * @pf: PF struct
- */
-static inline void ice_set_sriov_cap(struct ice_pf *pf)
-{
- if (pf->hw.func_caps.common_cap.sr_iov_1_1)
- set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
-}
-
-/**
- * ice_clear_sriov_cap - disable SRIOV in PF flags
- * @pf: PF struct
- */
-static inline void ice_clear_sriov_cap(struct ice_pf *pf)
-{
- clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
-}
-
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
#define ICE_FD_STAT_PF_IDX(base_idx) \
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 838d9b274d68..63d3e1dcbba5 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1087,7 +1087,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index fba178e07600..cca0e753f38f 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -596,7 +596,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int base_idx, i;
+ int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -613,10 +613,9 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
if (unlikely(!netdev->rx_cpu_rmap))
return -EINVAL;
- base_idx = vsi->base_vector;
ice_for_each_q_vector(vsi, i)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- pf->msix_entries[base_idx + i].vector)) {
+ vsi->q_vectors[i]->irq.virq)) {
ice_free_cpu_rx_rmap(vsi);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1911d644dfa8..4a12316f7b46 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -103,10 +103,10 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
{
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
+ int err;
/* allocate q_vector */
- q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
- GFP_KERNEL);
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
@@ -118,9 +118,34 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_mode = ITR_DYNAMIC;
q_vector->tx.type = ICE_TX_CONTAINER;
q_vector->rx.type = ICE_RX_CONTAINER;
+ q_vector->irq.index = -ENOENT;
- if (vsi->type == ICE_VSI_VF)
+ if (vsi->type == ICE_VSI_VF) {
+ q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
goto out;
+ } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
+
+ if (ctrl_vsi) {
+ if (unlikely(!ctrl_vsi->q_vectors)) {
+ err = -ENOENT;
+ goto err_free_q_vector;
+ }
+
+ q_vector->irq = ctrl_vsi->q_vectors[0]->irq;
+ goto skip_alloc;
+ }
+ }
+
+ q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc);
+ if (q_vector->irq.index < 0) {
+ err = -ENOMEM;
+ goto err_free_q_vector;
+ }
+
+skip_alloc:
+ q_vector->reg_idx = q_vector->irq.index;
+
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
@@ -137,6 +162,11 @@ out:
vsi->q_vectors[v_idx] = q_vector;
return 0;
+
+err_free_q_vector:
+ kfree(q_vector);
+
+ return err;
}
/**
@@ -168,7 +198,19 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
if (vsi->netdev)
netif_napi_del(&q_vector->napi);
- devm_kfree(dev, q_vector);
+ /* release MSIX interrupt if q_vector had interrupt allocated */
+ if (q_vector->irq.index < 0)
+ goto free_q_vector;
+
+ /* only free last VF ctrl vsi interrupt */
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf &&
+ ice_get_vf_ctrl_vsi(pf, vsi))
+ goto free_q_vector;
+
+ ice_free_irq(pf, q_vector->irq);
+
+free_q_vector:
+ kfree(q_vector);
vsi->q_vectors[v_idx] = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 0157f6e98d3e..e16d4c83ed5f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -814,8 +814,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), lst_itr);
}
}
- if (recps[i].root_buf)
- devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
+ devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -834,7 +833,7 @@ static int ice_get_fw_log_cfg(struct ice_hw *hw)
u16 size;
size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
- config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
+ config = kzalloc(size, GFP_KERNEL);
if (!config)
return -ENOMEM;
@@ -857,7 +856,7 @@ static int ice_get_fw_log_cfg(struct ice_hw *hw)
}
}
- devm_kfree(ice_hw_to_dev(hw), config);
+ kfree(config);
return status;
}
@@ -1011,8 +1010,7 @@ static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
}
out:
- if (data)
- devm_kfree(ice_hw_to_dev(hw), data);
+ devm_kfree(ice_hw_to_dev(hw), data);
return status;
}
@@ -5160,7 +5158,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*/
int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8ba5f935a092..81961a7d6598 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -229,7 +229,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_sq_cd *cd);
int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index d2faf1baad2f..e7d2474c431c 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -339,8 +339,7 @@ do { \
} \
} \
/* free the buffer info list */ \
- if ((qi)->ring.cmd_buf) \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
@@ -1056,14 +1055,19 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (cq->sq.next_to_use == cq->sq.count)
cq->sq.next_to_use = 0;
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
+ ice_flush(hw);
+
+ /* Wait a short time before initial ice_sq_done() check, to allow
+ * hardware time for completion.
+ */
+ udelay(5);
timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
do {
if (ice_sq_done(hw, cq))
break;
- usleep_range(ICE_CTL_Q_SQ_CMD_USEC,
- ICE_CTL_Q_SQ_CMD_USEC * 3 / 2);
+ usleep_range(100, 150);
} while (time_before(jiffies, timeout));
/* if ready, copy the desc back to temp */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 950b7f4a7a05..8f2fd1613a95 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -35,7 +35,6 @@ enum ice_ctl_q {
/* Control Queue timeout settings - max delay 1s */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */
-#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index c6d4926f0fcf..850db8e0e6b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
- first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+ first->vid &= ~VLAN_PRIO_MASK;
/* Mask the lower 3 bits to set the 802.1p priority */
- first->tx_flags |= (skb->priority & 0x7) <<
- ICE_TX_FLAGS_VLAN_PR_S;
+ first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 37eadb3d27a8..41acfe26df1c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -185,7 +185,7 @@ struct ice_buf_hdr {
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \
((ICE_PKG_BUF_SIZE - \
- struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \
+ struct_size_t(struct ice_buf_hdr, section_entry, 1) - (hd_sz)) / \
(ent_sz))
/* ice package section IDs */
@@ -297,7 +297,7 @@ struct ice_label_section {
};
#define ICE_MAX_LABELS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_label_section, \
label, 1) - \
sizeof(struct ice_label), \
sizeof(struct ice_label))
@@ -352,7 +352,7 @@ struct ice_boost_tcam_section {
};
#define ICE_MAX_BST_TCAMS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_boost_tcam_section, \
tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
@@ -372,8 +372,7 @@ struct ice_marker_ptype_tcam_section {
};
#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_marker_ptype_tcam_section, tcam, \
1) - \
sizeof(struct ice_marker_ptype_tcam_entry), \
sizeof(struct ice_marker_ptype_tcam_entry))
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index bc44cc220818..80dc5445b50d 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -1256,8 +1256,6 @@ static const struct devlink_ops ice_devlink_ops = {
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_down = ice_devlink_reload_down,
.reload_up = ice_devlink_reload_up,
- .port_split = ice_devlink_port_split,
- .port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
@@ -1512,6 +1510,11 @@ ice_devlink_set_port_split_options(struct ice_pf *pf,
ice_active_port_option = active_idx;
}
+static const struct devlink_port_ops ice_devlink_port_ops = {
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
+};
+
/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
@@ -1551,7 +1554,8 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
- err = devlink_port_register(devlink, devlink_port, vsi->idx);
+ err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
if (err) {
dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
pf->hw.pf_id, err);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index f6dd3f8fd936..ad0a007b7398 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -10,16 +10,15 @@
#include "ice_tc_lib.h"
/**
- * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
+ * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
* @pf: pointer to PF struct
* @vf: pointer to VF struct
- * @mac: VF's MAC address
*
* This function adds advanced rule that forwards packets with
- * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ * VF's VSI index to the corresponding switchdev ctrl VSI queue.
*/
-int
-ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
+static int
+ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
struct ice_adv_rule_info rule_info = { 0 };
@@ -32,76 +31,41 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
if (!list)
return -ENOMEM;
- list[0].type = ICE_MAC_OFOS;
- ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
- eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+ ice_rule_add_src_vsi_metadata(list);
- rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.flag = ICE_FLTR_TX;
rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
- rule_info.rx = false;
rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
ctrl_vsi->rxq_map[vf->vf_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
+ rule_info.src_vsi = vf->lan_vsi_idx;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
- vf->repr->mac_rule);
+ &vf->repr->sp_rule);
if (err)
- dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
+ dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
vf->vf_id);
- else
- vf->repr->rule_added = true;
kfree(list);
return err;
}
/**
- * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
- * @vf: pointer to vF struct
- *
- * This function replays VF's MAC rule after reset.
- */
-void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
-{
- int err;
-
- if (!ice_is_switchdev_running(vf->pf))
- return;
-
- if (is_valid_ether_addr(vf->hw_lan_addr)) {
- err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
- vf->hw_lan_addr);
- if (err) {
- dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
- vf->hw_lan_addr, vf->vf_id, err);
- return;
- }
- vf->num_mac++;
-
- ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
- }
-}
-
-/**
- * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
+ * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
* @vf: pointer to the VF struct
*
- * Delete the advanced rule that was used to forward packets with the VF's MAC
- * address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ * Delete the advanced rule that was used to forward packets with the VF's VSI
+ * index to the corresponding switchdev ctrl VSI queue.
*/
-void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
+static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
{
- if (!ice_is_switchdev_running(vf->pf))
- return;
-
- if (!vf->repr->rule_added)
+ if (!vf->repr)
return;
- ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
- vf->repr->rule_added = false;
+ ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
}
/**
@@ -237,6 +201,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
+ ice_eswitch_del_vf_sp_rule(vf);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
@@ -264,25 +229,30 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!vf->repr->dst) {
- ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr,
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
+ ICE_FWD_TO_VSI);
+ goto err;
+ }
+
+ if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
- ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr,
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
+ ice_eswitch_del_vf_sp_rule(vf);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
goto err;
}
if (ice_vsi_add_vlan_zero(vsi)) {
- ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr,
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
+ ice_eswitch_del_vf_sp_rule(vf);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 6a413331572b..b18bf83a2f5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -20,11 +20,6 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
-int
-ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
- const u8 *mac);
-void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf);
-void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
@@ -34,15 +29,6 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
static inline void ice_eswitch_release(struct ice_pf *pf) { }
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
-static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { }
-static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { }
-
-static inline int
-ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
- const u8 *mac)
-{
- return -EOPNOTSUPP;
-}
static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index f86e814354a3..8d5cbbd0b3d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4,6 +4,7 @@
/* ethtool support for ice */
#include "ice.h"
+#include "ice_ethtool.h"
#include "ice_flow.h"
#include "ice_fltr.h"
#include "ice_lib.h"
@@ -956,7 +957,7 @@ static u64 ice_intr_test(struct net_device *netdev)
netdev_info(netdev, "interrupt test\n");
- wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx),
+ wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index),
GLINT_DYN_CTL_SW_ITR_INDX_M |
GLINT_DYN_CTL_INTENA_MSK_M |
GLINT_DYN_CTL_SWINT_TRIG_M);
@@ -1658,15 +1659,26 @@ ice_mask_min_supported_speeds(struct ice_hw *hw,
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
}
-#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \
- do { \
- if (req_speeds & (aq_link_speed) || \
- (!req_speeds && \
- (advert_phy_type_lo & phy_type_mask_lo || \
- advert_phy_type_hi & phy_type_mask_hi))) \
- ethtool_link_ksettings_add_link_mode(ks, advertising,\
- ethtool_link_mode); \
- } while (0)
+/**
+ * ice_linkmode_set_bit - set link mode bit
+ * @phy_to_ethtool: PHY type to ethtool link mode struct to set
+ * @ks: ethtool link ksettings struct to fill out
+ * @req_speeds: speed requested by user
+ * @advert_phy_type: advertised PHY type
+ * @phy_type: PHY type
+ */
+static void
+ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool *phy_to_ethtool,
+ struct ethtool_link_ksettings *ks, u32 req_speeds,
+ u64 advert_phy_type, u32 phy_type)
+{
+ linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported);
+
+ if (req_speeds & phy_to_ethtool->aq_link_speed ||
+ (!req_speeds && advert_phy_type & BIT(phy_type)))
+ linkmode_set_bit(phy_to_ethtool->link_mode,
+ ks->link_modes.advertising);
+}
/**
* ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
@@ -1682,11 +1694,10 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
struct ice_pf *pf = vsi->back;
u64 advert_phy_type_lo = 0;
u64 advert_phy_type_hi = 0;
- u64 phy_type_mask_lo = 0;
- u64 phy_type_mask_hi = 0;
u64 phy_types_high = 0;
u64 phy_types_low = 0;
- u16 req_speeds;
+ u32 req_speeds;
+ u32 i;
req_speeds = vsi->port_info->phy.link_info.req_speeds;
@@ -1743,272 +1754,22 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
advert_phy_type_hi = vsi->port_info->phy.phy_type_high;
}
- ethtool_link_ksettings_zero_link_mode(ks, supported);
- ethtool_link_ksettings_zero_link_mode(ks, advertising);
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX |
- ICE_PHY_TYPE_LOW_100M_SGMII;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100baseT_Full);
-
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB,
- 100baseT_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T |
- ICE_PHY_TYPE_LOW_1G_SGMII;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseT_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
- 1000baseT_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseKX_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
- 1000baseKX_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX |
- ICE_PHY_TYPE_LOW_1000BASE_LX;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseX_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
- 1000baseX_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseT_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
- 2500baseT_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X |
- ICE_PHY_TYPE_LOW_2500BASE_KX;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseX_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
- 2500baseX_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T |
- ICE_PHY_TYPE_LOW_5GBASE_KR;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 5000baseT_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB,
- 5000baseT_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T |
- ICE_PHY_TYPE_LOW_10G_SFI_DA |
- ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC |
- ICE_PHY_TYPE_LOW_10G_SFI_C2C;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
- 10000baseT_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseKR_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
- 10000baseKR_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseSR_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
- 10000baseSR_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseLR_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
- 10000baseLR_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T |
- ICE_PHY_TYPE_LOW_25GBASE_CR |
- ICE_PHY_TYPE_LOW_25GBASE_CR_S |
- ICE_PHY_TYPE_LOW_25GBASE_CR1 |
- ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC |
- ICE_PHY_TYPE_LOW_25G_AUI_C2C;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseCR_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
- 25000baseCR_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR |
- ICE_PHY_TYPE_LOW_25GBASE_LR;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseSR_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
- 25000baseSR_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR |
- ICE_PHY_TYPE_LOW_25GBASE_KR_S |
- ICE_PHY_TYPE_LOW_25GBASE_KR1;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseKR_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
- 25000baseKR_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseKR4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
- 40000baseKR4_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 |
- ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC |
- ICE_PHY_TYPE_LOW_40G_XLAUI;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseCR4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
- 40000baseCR4_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseSR4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
- 40000baseSR4_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseLR4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
- 40000baseLR4_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 |
- ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC |
- ICE_PHY_TYPE_LOW_50G_LAUI2 |
- ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC |
- ICE_PHY_TYPE_LOW_50G_AUI2 |
- ICE_PHY_TYPE_LOW_50GBASE_CP |
- ICE_PHY_TYPE_LOW_50GBASE_SR |
- ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC |
- ICE_PHY_TYPE_LOW_50G_AUI1;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseCR2_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
- 50000baseCR2_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 |
- ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseKR2_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
- 50000baseKR2_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 |
- ICE_PHY_TYPE_LOW_50GBASE_LR2 |
- ICE_PHY_TYPE_LOW_50GBASE_FR |
- ICE_PHY_TYPE_LOW_50GBASE_LR;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseSR2_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
- 50000baseSR2_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 |
- ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC |
- ICE_PHY_TYPE_LOW_100G_CAUI4 |
- ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
- ICE_PHY_TYPE_LOW_100G_AUI4 |
- ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4;
- phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
- ICE_PHY_TYPE_HIGH_100G_CAUI2 |
- ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
- ICE_PHY_TYPE_HIGH_100G_AUI2;
- if (phy_types_low & phy_type_mask_lo ||
- phy_types_high & phy_type_mask_hi) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
- 100000baseCR4_Full);
- }
-
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR2_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
- 100000baseCR2_Full);
- }
-
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseSR4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
- 100000baseSR4_Full);
- }
-
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseSR2_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
- 100000baseSR2_Full);
- }
-
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
- ICE_PHY_TYPE_LOW_100GBASE_DR;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseLR4_ER4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
- 100000baseLR4_ER4_Full);
- }
+ linkmode_zero(ks->link_modes.supported);
+ linkmode_zero(ks->link_modes.advertising);
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
- ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
- if (phy_types_low & phy_type_mask_lo) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR4_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
- 100000baseKR4_Full);
+ for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ if (phy_types_low & BIT_ULL(i))
+ ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
+ req_speeds, advert_phy_type_lo,
+ i);
}
- if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR2_Full);
- ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
- 100000baseKR2_Full);
+ for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ if (phy_types_high & BIT_ULL(i))
+ ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
+ req_speeds, advert_phy_type_hi,
+ i);
}
-
}
#define TEST_SET_BITS_TIMEOUT 50
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
new file mode 100644
index 000000000000..b403ee79cd5e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _ICE_ETHTOOL_H_
+#define _ICE_ETHTOOL_H_
+
+struct ice_phy_type_to_ethtool {
+ u64 aq_link_speed;
+ u8 link_mode;
+};
+
+/* Macro to make PHY type to Ethtool link mode table entry.
+ * The index is the PHY type.
+ */
+#define ICE_PHY_TYPE(LINK_SPEED, ETHTOOL_LINK_MODE) {\
+ .aq_link_speed = ICE_AQ_LINK_SPEED_##LINK_SPEED, \
+ .link_mode = ETHTOOL_LINK_MODE_##ETHTOOL_LINK_MODE##_BIT, \
+}
+
+/* Lookup table mapping PHY type low to link speed and Ethtool link modes.
+ * Array index corresponds to HW PHY type bit, see
+ * ice_adminq_cmd.h:ICE_PHY_TYPE_LOW_*.
+ */
+static const struct ice_phy_type_to_ethtool
+phy_type_low_lkup[] = {
+ [0] = ICE_PHY_TYPE(100MB, 100baseT_Full),
+ [1] = ICE_PHY_TYPE(100MB, 100baseT_Full),
+ [2] = ICE_PHY_TYPE(1000MB, 1000baseT_Full),
+ [3] = ICE_PHY_TYPE(1000MB, 1000baseX_Full),
+ [4] = ICE_PHY_TYPE(1000MB, 1000baseX_Full),
+ [5] = ICE_PHY_TYPE(1000MB, 1000baseKX_Full),
+ [6] = ICE_PHY_TYPE(1000MB, 1000baseT_Full),
+ [7] = ICE_PHY_TYPE(2500MB, 2500baseT_Full),
+ [8] = ICE_PHY_TYPE(2500MB, 2500baseX_Full),
+ [9] = ICE_PHY_TYPE(2500MB, 2500baseX_Full),
+ [10] = ICE_PHY_TYPE(5GB, 5000baseT_Full),
+ [11] = ICE_PHY_TYPE(5GB, 5000baseT_Full),
+ [12] = ICE_PHY_TYPE(10GB, 10000baseT_Full),
+ [13] = ICE_PHY_TYPE(10GB, 10000baseCR_Full),
+ [14] = ICE_PHY_TYPE(10GB, 10000baseSR_Full),
+ [15] = ICE_PHY_TYPE(10GB, 10000baseLR_Full),
+ [16] = ICE_PHY_TYPE(10GB, 10000baseKR_Full),
+ [17] = ICE_PHY_TYPE(10GB, 10000baseCR_Full),
+ [18] = ICE_PHY_TYPE(10GB, 10000baseKR_Full),
+ [19] = ICE_PHY_TYPE(25GB, 25000baseCR_Full),
+ [20] = ICE_PHY_TYPE(25GB, 25000baseCR_Full),
+ [21] = ICE_PHY_TYPE(25GB, 25000baseCR_Full),
+ [22] = ICE_PHY_TYPE(25GB, 25000baseCR_Full),
+ [23] = ICE_PHY_TYPE(25GB, 25000baseSR_Full),
+ [24] = ICE_PHY_TYPE(25GB, 25000baseSR_Full),
+ [25] = ICE_PHY_TYPE(25GB, 25000baseKR_Full),
+ [26] = ICE_PHY_TYPE(25GB, 25000baseKR_Full),
+ [27] = ICE_PHY_TYPE(25GB, 25000baseKR_Full),
+ [28] = ICE_PHY_TYPE(25GB, 25000baseSR_Full),
+ [29] = ICE_PHY_TYPE(25GB, 25000baseCR_Full),
+ [30] = ICE_PHY_TYPE(40GB, 40000baseCR4_Full),
+ [31] = ICE_PHY_TYPE(40GB, 40000baseSR4_Full),
+ [32] = ICE_PHY_TYPE(40GB, 40000baseLR4_Full),
+ [33] = ICE_PHY_TYPE(40GB, 40000baseKR4_Full),
+ [34] = ICE_PHY_TYPE(40GB, 40000baseSR4_Full),
+ [35] = ICE_PHY_TYPE(40GB, 40000baseCR4_Full),
+ [36] = ICE_PHY_TYPE(50GB, 50000baseCR2_Full),
+ [37] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full),
+ [38] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full),
+ [39] = ICE_PHY_TYPE(50GB, 50000baseKR2_Full),
+ [40] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full),
+ [41] = ICE_PHY_TYPE(50GB, 50000baseCR2_Full),
+ [42] = ICE_PHY_TYPE(50GB, 50000baseSR2_Full),
+ [43] = ICE_PHY_TYPE(50GB, 50000baseCR2_Full),
+ [44] = ICE_PHY_TYPE(50GB, 50000baseCR_Full),
+ [45] = ICE_PHY_TYPE(50GB, 50000baseSR_Full),
+ [46] = ICE_PHY_TYPE(50GB, 50000baseLR_ER_FR_Full),
+ [47] = ICE_PHY_TYPE(50GB, 50000baseLR_ER_FR_Full),
+ [48] = ICE_PHY_TYPE(50GB, 50000baseKR_Full),
+ [49] = ICE_PHY_TYPE(50GB, 50000baseSR_Full),
+ [50] = ICE_PHY_TYPE(50GB, 50000baseCR_Full),
+ [51] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full),
+ [52] = ICE_PHY_TYPE(100GB, 100000baseSR4_Full),
+ [53] = ICE_PHY_TYPE(100GB, 100000baseLR4_ER4_Full),
+ [54] = ICE_PHY_TYPE(100GB, 100000baseKR4_Full),
+ [55] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full),
+ [56] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full),
+ [57] = ICE_PHY_TYPE(100GB, 100000baseSR4_Full),
+ [58] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full),
+ [59] = ICE_PHY_TYPE(100GB, 100000baseCR4_Full),
+ [60] = ICE_PHY_TYPE(100GB, 100000baseKR4_Full),
+ [61] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full),
+ [62] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full),
+ [63] = ICE_PHY_TYPE(100GB, 100000baseLR4_ER4_Full),
+};
+
+/* Lookup table mapping PHY type high to link speed and Ethtool link modes.
+ * Array index corresponds to HW PHY type bit, see
+ * ice_adminq_cmd.h:ICE_PHY_TYPE_HIGH_*
+ */
+static const struct ice_phy_type_to_ethtool
+phy_type_high_lkup[] = {
+ [0] = ICE_PHY_TYPE(100GB, 100000baseKR2_Full),
+ [1] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full),
+ [2] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full),
+ [3] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full),
+ [4] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full),
+};
+
+#endif /* !_ICE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index ef103e47a8dc..85cca572c22a 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1304,23 +1304,6 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
}
/**
- * ice_dealloc_flow_entry - Deallocate flow entry memory
- * @hw: pointer to the HW struct
- * @entry: flow entry to be removed
- */
-static void
-ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
-{
- if (!entry)
- return;
-
- if (entry->entry)
- devm_kfree(ice_hw_to_dev(hw), entry->entry);
-
- devm_kfree(ice_hw_to_dev(hw), entry);
-}
-
-/**
* ice_flow_rem_entry_sync - Remove a flow entry
* @hw: pointer to the HW struct
* @blk: classification stage
@@ -1335,7 +1318,8 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
list_del(&entry->l_entry);
- ice_dealloc_flow_entry(hw, entry);
+ devm_kfree(ice_hw_to_dev(hw), entry->entry);
+ devm_kfree(ice_hw_to_dev(hw), entry);
return 0;
}
@@ -1662,8 +1646,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
out:
if (status && e) {
- if (e->entry)
- devm_kfree(ice_hw_to_dev(hw), e->entry);
+ devm_kfree(ice_hw_to_dev(hw), e->entry);
devm_kfree(ice_hw_to_dev(hw), e);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 2ea8a2b11bcd..75c9de675f20 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -16,8 +16,8 @@
* * number of bytes written - success
* * negative - error code
*/
-static unsigned int
-ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+static int
+ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size)
{
struct ice_aqc_link_topo_addr link_topo;
struct ice_hw *hw = &pf->hw;
@@ -72,39 +72,7 @@ err_out:
dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
offset, size, err);
- return offset;
-}
-
-/**
- * ice_gnss_write_pending - Write all pending data to internal GNSS
- * @work: GNSS write work structure
- */
-static void ice_gnss_write_pending(struct kthread_work *work)
-{
- struct gnss_serial *gnss = container_of(work, struct gnss_serial,
- write_work);
- struct ice_pf *pf = gnss->back;
-
- if (!pf)
- return;
-
- if (!test_bit(ICE_FLAG_GNSS, pf->flags))
- return;
-
- if (!list_empty(&gnss->queue)) {
- struct gnss_write_buf *write_buf = NULL;
- unsigned int bytes;
-
- write_buf = list_first_entry(&gnss->queue,
- struct gnss_write_buf, queue);
-
- bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
- dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
-
- list_del(&write_buf->queue);
- kfree(write_buf->buf);
- kfree(write_buf);
- }
+ return err;
}
/**
@@ -128,12 +96,7 @@ static void ice_gnss_read(struct kthread_work *work)
int err = 0;
pf = gnss->back;
- if (!pf) {
- err = -EFAULT;
- goto exit;
- }
-
- if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ if (!pf || !test_bit(ICE_FLAG_GNSS, pf->flags))
return;
hw = &pf->hw;
@@ -191,7 +154,6 @@ free_buf:
free_page((unsigned long)buf);
requeue:
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay);
-exit:
if (err)
dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
}
@@ -220,8 +182,6 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- INIT_LIST_HEAD(&gnss->queue);
- kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
@@ -281,7 +241,6 @@ static void ice_gnss_close(struct gnss_device *gdev)
if (!gnss)
return;
- kthread_cancel_work_sync(&gnss->write_work);
kthread_cancel_delayed_work_sync(&gnss->read_work);
}
@@ -300,10 +259,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
size_t count)
{
struct ice_pf *pf = gnss_get_drvdata(gdev);
- struct gnss_write_buf *write_buf;
struct gnss_serial *gnss;
- unsigned char *cmd_buf;
- int err = count;
/* We cannot write a single byte using our I2C implementation. */
if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
@@ -319,24 +275,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
if (!gnss)
return -ENODEV;
- cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
- if (!cmd_buf)
- return -ENOMEM;
-
- memcpy(cmd_buf, buf, count);
- write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
- if (!write_buf) {
- kfree(cmd_buf);
- return -ENOMEM;
- }
-
- write_buf->buf = cmd_buf;
- write_buf->size = count;
- INIT_LIST_HEAD(&write_buf->queue);
- list_add_tail(&write_buf->queue, &gnss->queue);
- kthread_queue_work(gnss->kworker, &gnss->write_work);
-
- return err;
+ return ice_gnss_do_write(pf, buf, count);
}
static const struct gnss_operations ice_gnss_ops = {
@@ -432,7 +371,6 @@ void ice_gnss_exit(struct ice_pf *pf)
if (pf->gnss_serial) {
struct gnss_serial *gnss = pf->gnss_serial;
- kthread_cancel_work_sync(&gnss->write_work);
kthread_cancel_delayed_work_sync(&gnss->read_work);
kthread_destroy_worker(gnss->kworker);
gnss->kworker = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index b8bb8b63d081..75e567ad7059 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -22,26 +22,16 @@
*/
#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
-struct gnss_write_buf {
- struct list_head queue;
- unsigned int size;
- unsigned char *buf;
-};
-
/**
* struct gnss_serial - data used to initialize GNSS TTY port
* @back: back pointer to PF
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
- * @write_work: write_work function for handling GNSS writes
- * @queue: write buffers queue
*/
struct gnss_serial {
struct ice_pf *back;
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
- struct kthread_work write_work;
- struct list_head queue;
};
#if IS_ENABLED(CONFIG_GNSS)
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index e6bc2285071e..145b27f2a4ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -229,20 +229,34 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
EXPORT_SYMBOL_GPL(ice_get_qos_params);
/**
- * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
+ * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
* @pf: board private structure to initialize
*/
-static int ice_reserve_rdma_qvector(struct ice_pf *pf)
+static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
{
if (ice_is_rdma_ena(pf)) {
- int index;
-
- index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
- ICE_RES_RDMA_VEC_ID);
- if (index < 0)
- return index;
- pf->num_avail_sw_msix -= pf->num_rdma_msix;
- pf->rdma_base_vector = (u16)index;
+ int i;
+
+ pf->msix_entries = kcalloc(pf->num_rdma_msix,
+ sizeof(*pf->msix_entries),
+ GFP_KERNEL);
+ if (!pf->msix_entries)
+ return -ENOMEM;
+
+ /* RDMA is the only user of pf->msix_entries array */
+ pf->rdma_base_vector = 0;
+
+ for (i = 0; i < pf->num_rdma_msix; i++) {
+ struct msix_entry *entry = &pf->msix_entries[i];
+ struct msi_map map;
+
+ map = ice_alloc_irq(pf, false);
+ if (map.index < 0)
+ break;
+
+ entry->entry = map.index;
+ entry->vector = map.virq;
+ }
}
return 0;
}
@@ -253,9 +267,21 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
*/
static void ice_free_rdma_qvector(struct ice_pf *pf)
{
- pf->num_avail_sw_msix -= pf->num_rdma_msix;
- ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
- ICE_RES_RDMA_VEC_ID);
+ int i;
+
+ if (!pf->msix_entries)
+ return;
+
+ for (i = 0; i < pf->num_rdma_msix; i++) {
+ struct msi_map map;
+
+ map.index = pf->msix_entries[i].entry;
+ map.virq = pf->msix_entries[i].vector;
+ ice_free_irq(pf, map);
+ }
+
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
}
/**
@@ -357,7 +383,7 @@ int ice_init_rdma(struct ice_pf *pf)
}
/* Reserve vector resources */
- ret = ice_reserve_rdma_qvector(pf);
+ ret = ice_alloc_rdma_qvectors(pf);
if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n");
goto err_reserve_rdma_qvector;
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
new file mode 100644
index 000000000000..ad82ff7d1995
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_irq.h"
+
+/**
+ * ice_init_irq_tracker - initialize interrupt tracker
+ * @pf: board private structure
+ * @max_vectors: maximum number of vectors that tracker can hold
+ * @num_static: number of preallocated interrupts
+ */
+static void
+ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
+ unsigned int num_static)
+{
+ pf->irq_tracker.num_entries = max_vectors;
+ pf->irq_tracker.num_static = num_static;
+ xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
+}
+
+/**
+ * ice_deinit_irq_tracker - free xarray tracker
+ * @pf: board private structure
+ */
+static void ice_deinit_irq_tracker(struct ice_pf *pf)
+{
+ xa_destroy(&pf->irq_tracker.entries);
+}
+
+/**
+ * ice_free_irq_res - free a block of resources
+ * @pf: board private structure
+ * @index: starting index previously returned by ice_get_res
+ */
+static void ice_free_irq_res(struct ice_pf *pf, u16 index)
+{
+ struct ice_irq_entry *entry;
+
+ entry = xa_erase(&pf->irq_tracker.entries, index);
+ kfree(entry);
+}
+
+/**
+ * ice_get_irq_res - get an interrupt resource
+ * @pf: board private structure
+ * @dyn_only: force entry to be dynamically allocated
+ *
+ * Allocate new irq entry in the free slot of the tracker. Since xarray
+ * is used, always allocate new entry at the lowest possible index. Set
+ * proper allocation limit for maximum tracker entries.
+ *
+ * Returns allocated irq entry or NULL on failure.
+ */
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+{
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ .min = 0 };
+ unsigned int num_static = pf->irq_tracker.num_static;
+ struct ice_irq_entry *entry;
+ unsigned int index;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ /* skip preallocated entries if the caller says so */
+ if (dyn_only)
+ limit.min = num_static;
+
+ ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
+ GFP_KERNEL);
+
+ if (ret) {
+ kfree(entry);
+ entry = NULL;
+ } else {
+ entry->index = index;
+ entry->dynamic = index >= num_static;
+ }
+
+ return entry;
+}
+
+/**
+ * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
+ * @pf: board private structure
+ * @v_remain: number of remaining MSI-X vectors to be distributed
+ *
+ * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
+ * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
+ * remaining vectors.
+ */
+static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
+{
+ int v_rdma;
+
+ if (!ice_is_rdma_ena(pf)) {
+ pf->num_lan_msix = v_remain;
+ return;
+ }
+
+ /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
+ dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining vectors to LAN MSIX
+ */
+ pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
+ pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
+ } else {
+ /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
+}
+
+/**
+ * ice_ena_msix_range - Request a range of MSIX vectors from the OS
+ * @pf: board private structure
+ *
+ * Compute the number of MSIX vectors wanted and request from the OS. Adjust
+ * device usage if there are not enough vectors. Return the number of vectors
+ * reserved or negative on failure.
+ */
+static int ice_ena_msix_range(struct ice_pf *pf)
+{
+ int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
+ num_cpus = num_online_cpus();
+
+ /* LAN miscellaneous handler */
+ v_other = ICE_MIN_LAN_OICR_MSIX;
+
+ /* Flow Director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ v_other += ICE_FDIR_MSIX;
+
+ /* switchdev */
+ v_other += ICE_ESWITCH_MSIX;
+
+ v_wanted = v_other;
+
+ /* LAN traffic */
+ pf->num_lan_msix = num_cpus;
+ v_wanted += pf->num_lan_msix;
+
+ /* RDMA auxiliary driver */
+ if (ice_is_rdma_ena(pf)) {
+ pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ v_wanted += pf->num_rdma_msix;
+ }
+
+ if (v_wanted > hw_num_msix) {
+ int v_remain;
+
+ dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
+ v_wanted, hw_num_msix);
+
+ if (hw_num_msix < ICE_MIN_MSIX) {
+ err = -ERANGE;
+ goto exit_err;
+ }
+
+ v_remain = hw_num_msix - v_other;
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
+ v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+ }
+
+ ice_reduce_msix_usage(pf, v_remain);
+ v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
+
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+
+ /* actually reserve the vectors */
+ v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
+ PCI_IRQ_MSIX);
+ if (v_actual < 0) {
+ dev_err(dev, "unable to reserve MSI-X vectors\n");
+ err = v_actual;
+ goto exit_err;
+ }
+
+ if (v_actual < v_wanted) {
+ dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
+ v_wanted, v_actual);
+
+ if (v_actual < ICE_MIN_MSIX) {
+ /* error if we can't get minimum vectors */
+ pci_free_irq_vectors(pf->pdev);
+ err = -ERANGE;
+ goto exit_err;
+ } else {
+ int v_remain = v_actual - v_other;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+
+ ice_reduce_msix_usage(pf, v_remain);
+
+ dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+ }
+
+ return v_actual;
+
+exit_err:
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = 0;
+ return err;
+}
+
+/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+ pci_free_irq_vectors(pf->pdev);
+ ice_deinit_irq_tracker(pf);
+}
+
+/**
+ * ice_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ */
+int ice_init_interrupt_scheme(struct ice_pf *pf)
+{
+ int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors, max_vectors;
+
+ vectors = ice_ena_msix_range(pf);
+
+ if (vectors < 0)
+ return -ENOMEM;
+
+ if (pci_msix_can_alloc_dyn(pf->pdev))
+ max_vectors = total_vectors;
+ else
+ max_vectors = vectors;
+
+ ice_init_irq_tracker(pf, max_vectors, vectors);
+
+ return 0;
+}
+
+/**
+ * ice_alloc_irq - Allocate new interrupt vector
+ * @pf: board private structure
+ * @dyn_only: force dynamic allocation of the interrupt
+ *
+ * Allocate new interrupt vector for a given owner id.
+ * return struct msi_map with interrupt details and track
+ * allocated interrupt appropriately.
+ *
+ * This function reserves new irq entry from the irq_tracker.
+ * if according to the tracker information all interrupts that
+ * were allocated with ice_pci_alloc_irq_vectors are already used
+ * and dynamically allocated interrupts are supported then new
+ * interrupt will be allocated with pci_msix_alloc_irq_at.
+ *
+ * Some callers may only support dynamically allocated interrupts.
+ * This is indicated with dyn_only flag.
+ *
+ * On failure, return map with negative .index. The caller
+ * is expected to check returned map index.
+ *
+ */
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+{
+ int sriov_base_vector = pf->sriov_base_vector;
+ struct msi_map map = { .index = -ENOENT };
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_irq_entry *entry;
+
+ entry = ice_get_irq_res(pf, dyn_only);
+ if (!entry)
+ return map;
+
+ /* fail if we're about to violate SRIOV vectors space */
+ if (sriov_base_vector && entry->index >= sriov_base_vector)
+ goto exit_free_res;
+
+ if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
+ map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
+ if (map.index < 0)
+ goto exit_free_res;
+ dev_dbg(dev, "allocated new irq at index %d\n", map.index);
+ } else {
+ map.index = entry->index;
+ map.virq = pci_irq_vector(pf->pdev, map.index);
+ }
+
+ return map;
+
+exit_free_res:
+ dev_err(dev, "Could not allocate irq at idx %d\n", entry->index);
+ ice_free_irq_res(pf, entry->index);
+ return map;
+}
+
+/**
+ * ice_free_irq - Free interrupt vector
+ * @pf: board private structure
+ * @map: map with interrupt details
+ *
+ * Remove allocated interrupt from the interrupt tracker. If interrupt was
+ * allocated dynamically, free respective interrupt vector.
+ */
+void ice_free_irq(struct ice_pf *pf, struct msi_map map)
+{
+ struct ice_irq_entry *entry;
+
+ entry = xa_load(&pf->irq_tracker.entries, map.index);
+
+ if (!entry) {
+ dev_err(ice_pf_to_dev(pf), "Failed to get MSIX interrupt entry at index %d",
+ map.index);
+ return;
+ }
+
+ dev_dbg(ice_pf_to_dev(pf), "Free irq at index %d\n", map.index);
+
+ if (entry->dynamic)
+ pci_msix_free_irq(pf->pdev, map);
+
+ ice_free_irq_res(pf, map.index);
+}
+
+/**
+ * ice_get_max_used_msix_vector - Get the max used interrupt vector
+ * @pf: board private structure
+ *
+ * Return index of maximum used interrupt vectors with respect to the
+ * beginning of the MSIX table. Take into account that some interrupts
+ * may have been dynamically allocated after MSIX was initially enabled.
+ */
+int ice_get_max_used_msix_vector(struct ice_pf *pf)
+{
+ unsigned long start, index, max_idx;
+ void *entry;
+
+ /* Treat all preallocated interrupts as used */
+ start = pf->irq_tracker.num_static;
+ max_idx = start - 1;
+
+ xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
+ if (index > max_idx)
+ max_idx = index;
+ }
+
+ return max_idx;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
new file mode 100644
index 000000000000..f35efc08575e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_IRQ_H_
+#define _ICE_IRQ_H_
+
+struct ice_irq_entry {
+ unsigned int index;
+ bool dynamic; /* allocation type flag */
+};
+
+struct ice_irq_tracker {
+ struct xarray entries;
+ u16 num_entries; /* total vectors available */
+ u16 num_static; /* preallocated entries */
+};
+
+int ice_init_interrupt_scheme(struct ice_pf *pf);
+void ice_clear_interrupt_scheme(struct ice_pf *pf);
+
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
+void ice_free_irq(struct ice_pf *pf, struct msi_map map);
+int ice_get_max_used_msix_vector(struct ice_pf *pf);
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index ee5b36941ba3..5a7753bda324 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -7,15 +7,6 @@
#include "ice_lag.h"
/**
- * ice_lag_nop_handler - no-op Rx handler to disable LAG
- * @pskb: pointer to skb pointer
- */
-rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb)
-{
- return RX_HANDLER_PASS;
-}
-
-/**
* ice_lag_set_primary - set PF LAG state as Primary
* @lag: LAG info struct
*/
@@ -158,7 +149,6 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
lag->upper_netdev = upper;
}
- ice_clear_sriov_cap(pf);
ice_clear_rdma_cap(pf);
lag->bonded = true;
@@ -205,7 +195,6 @@ ice_lag_unlink(struct ice_lag *lag,
}
lag->peer_netdev = NULL;
- ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
lag->role = ICE_LAG_NONE;
@@ -229,7 +218,6 @@ static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
if (lag->upper_netdev) {
dev_put(lag->upper_netdev);
lag->upper_netdev = NULL;
- ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
}
/* perform some cleanup in case we come back */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 51b5cf467ce2..2c373676c42f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -25,63 +25,9 @@ struct ice_lag {
struct notifier_block notif_block;
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
- u8 handler:1; /* did we register a rx_netdev_handler */
- /* each thing blocking bonding will increment this value by one.
- * If this value is zero, then bonding is allowed.
- */
- u16 dis_lag;
u8 role;
};
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
-rx_handler_result_t ice_lag_nop_handler(struct sk_buff **pskb);
-
-/**
- * ice_disable_lag - increment LAG disable count
- * @lag: LAG struct
- */
-static inline void ice_disable_lag(struct ice_lag *lag)
-{
- /* If LAG this PF is not already disabled, disable it */
- rtnl_lock();
- if (!netdev_is_rx_handler_busy(lag->netdev)) {
- if (!netdev_rx_handler_register(lag->netdev,
- ice_lag_nop_handler,
- NULL))
- lag->handler = true;
- }
- rtnl_unlock();
- lag->dis_lag++;
-}
-
-/**
- * ice_enable_lag - decrement disable count for a PF
- * @lag: LAG struct
- *
- * Decrement the disable counter for a port, and if that count reaches
- * zero, then remove the no-op Rx handler from that netdev
- */
-static inline void ice_enable_lag(struct ice_lag *lag)
-{
- if (lag->dis_lag)
- lag->dis_lag--;
- if (!lag->dis_lag && lag->handler) {
- rtnl_lock();
- netdev_rx_handler_unregister(lag->netdev);
- rtnl_unlock();
- lag->handler = false;
- }
-}
-
-/**
- * ice_is_lag_dis - is LAG disabled
- * @lag: LAG struct
- *
- * Return true if bonding is disabled
- */
-static inline bool ice_is_lag_dis(struct ice_lag *lag)
-{
- return !!(lag->dis_lag);
-}
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 450317dfcca7..00e3afd507a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -321,31 +321,19 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
- if (vsi->af_xdp_zc_qps) {
- bitmap_free(vsi->af_xdp_zc_qps);
- vsi->af_xdp_zc_qps = NULL;
- }
+ bitmap_free(vsi->af_xdp_zc_qps);
+ vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
- if (vsi->q_vectors) {
- devm_kfree(dev, vsi->q_vectors);
- vsi->q_vectors = NULL;
- }
- if (vsi->tx_rings) {
- devm_kfree(dev, vsi->tx_rings);
- vsi->tx_rings = NULL;
- }
- if (vsi->rx_rings) {
- devm_kfree(dev, vsi->rx_rings);
- vsi->rx_rings = NULL;
- }
- if (vsi->txq_map) {
- devm_kfree(dev, vsi->txq_map);
- vsi->txq_map = NULL;
- }
- if (vsi->rxq_map) {
- devm_kfree(dev, vsi->rxq_map);
- vsi->rxq_map = NULL;
- }
+ devm_kfree(dev, vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ devm_kfree(dev, vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ devm_kfree(dev, vsi->rx_rings);
+ vsi->rx_rings = NULL;
+ devm_kfree(dev, vsi->txq_map);
+ vsi->txq_map = NULL;
+ devm_kfree(dev, vsi->rxq_map);
+ vsi->rxq_map = NULL;
}
/**
@@ -902,10 +890,8 @@ static void ice_rss_clean(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
- if (vsi->rss_hkey_user)
- devm_kfree(dev, vsi->rss_hkey_user);
- if (vsi->rss_lut_user)
- devm_kfree(dev, vsi->rss_lut_user);
+ devm_kfree(dev, vsi->rss_hkey_user);
+ devm_kfree(dev, vsi->rss_lut_user);
ice_vsi_clean_rss_flow_fld(vsi);
/* remove RSS replay list */
@@ -1371,190 +1357,6 @@ out:
}
/**
- * ice_free_res - free a block of resources
- * @res: pointer to the resource
- * @index: starting index previously returned by ice_get_res
- * @id: identifier to track owner
- *
- * Returns number of resources freed
- */
-int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
-{
- int count = 0;
- int i;
-
- if (!res || index >= res->end)
- return -EINVAL;
-
- id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->end && res->list[i] == id; i++) {
- res->list[i] = 0;
- count++;
- }
-
- return count;
-}
-
-/**
- * ice_search_res - Search the tracker for a block of resources
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or -ENOMEM for error
- */
-static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
-{
- u16 start = 0, end = 0;
-
- if (needed > res->end)
- return -ENOMEM;
-
- id |= ICE_RES_VALID_BIT;
-
- do {
- /* skip already allocated entries */
- if (res->list[end++] & ICE_RES_VALID_BIT) {
- start = end;
- if ((start + needed) > res->end)
- break;
- }
-
- if (end == (start + needed)) {
- int i = start;
-
- /* there was enough, so assign it to the requestor */
- while (i != end)
- res->list[i++] = id;
-
- return start;
- }
- } while (end < res->end);
-
- return -ENOMEM;
-}
-
-/**
- * ice_get_free_res_count - Get free count from a resource tracker
- * @res: Resource tracker instance
- */
-static u16 ice_get_free_res_count(struct ice_res_tracker *res)
-{
- u16 i, count = 0;
-
- for (i = 0; i < res->end; i++)
- if (!(res->list[i] & ICE_RES_VALID_BIT))
- count++;
-
- return count;
-}
-
-/**
- * ice_get_res - get a block of resources
- * @pf: board private structure
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or negative for error
- */
-int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
-{
- if (!res || !pf)
- return -EINVAL;
-
- if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
- needed, res->num_entries, id);
- return -EINVAL;
- }
-
- return ice_search_res(res, needed, id);
-}
-
-/**
- * ice_get_vf_ctrl_res - Get VF control VSI resource
- * @pf: pointer to the PF structure
- * @vsi: the VSI to allocate a resource for
- *
- * Look up whether another VF has already allocated the control VSI resource.
- * If so, re-use this resource so that we share it among all VFs.
- *
- * Otherwise, allocate the resource and return it.
- */
-static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
-{
- struct ice_vf *vf;
- unsigned int bkt;
- int base;
-
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf) {
- if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
- rcu_read_unlock();
- return base;
- }
- }
- rcu_read_unlock();
-
- return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors,
- ICE_RES_VF_CTRL_VEC_ID);
-}
-
-/**
- * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
- * @vsi: ptr to the VSI
- *
- * This should only be called after ice_vsi_alloc_def() which allocates the
- * corresponding SW VSI structure and initializes num_queue_pairs for the
- * newly allocated VSI.
- *
- * Returns 0 on success or negative on failure
- */
-static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- u16 num_q_vectors;
- int base;
-
- dev = ice_pf_to_dev(pf);
- /* SRIOV doesn't grab irq_tracker entries for each VSI */
- if (vsi->type == ICE_VSI_VF)
- return 0;
- if (vsi->type == ICE_VSI_CHNL)
- return 0;
-
- if (vsi->base_vector) {
- dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
- vsi->vsi_num, vsi->base_vector);
- return -EEXIST;
- }
-
- num_q_vectors = vsi->num_q_vectors;
- /* reserve slots from OS requested IRQs */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
- base = ice_get_vf_ctrl_res(pf, vsi);
- } else {
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- vsi->idx);
- }
-
- if (base < 0) {
- dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
- ice_get_free_res_count(pf->irq_tracker),
- ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
- return -ENOENT;
- }
- vsi->base_vector = (u16)base;
- pf->num_avail_sw_msix -= num_q_vectors;
-
- return 0;
-}
-
-/**
* ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
* @vsi: the VSI having rings deallocated
*/
@@ -2410,50 +2212,6 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
}
/**
- * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
- * @vsi: VSI to set the q_vectors register index on
- */
-static int
-ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
-{
- u16 i;
-
- if (!vsi || !vsi->q_vectors)
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- if (!q_vector) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
- i, vsi->vsi_num);
- goto clear_reg_idx;
- }
-
- if (vsi->type == ICE_VSI_VF) {
- struct ice_vf *vf = vsi->vf;
-
- q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
- } else {
- q_vector->reg_idx =
- q_vector->v_idx + vsi->base_vector;
- }
- }
-
- return 0;
-
-clear_reg_idx:
- ice_for_each_q_vector(vsi, i) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- if (q_vector)
- q_vector->reg_idx = 0;
- }
-
- return -EINVAL;
-}
-
-/**
* ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
@@ -2611,37 +2369,6 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
vsi->agg_node->num_vsis);
}
-/**
- * ice_free_vf_ctrl_res - Free the VF control VSI resource
- * @pf: pointer to PF structure
- * @vsi: the VSI to free resources for
- *
- * Check if the VF control VSI resource is still in use. If no VF is using it
- * any more, release the VSI resource. Otherwise, leave it to be cleaned up
- * once no other VF uses it.
- */
-static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf) {
- if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- rcu_read_unlock();
- return;
- }
- }
- rcu_read_unlock();
-
- /* No other VFs left that have control VSI. It is now safe to reclaim
- * SW interrupts back to the common pool.
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-}
-
static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
@@ -2728,14 +2455,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
if (ret)
goto unroll_vsi_init;
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto unroll_alloc_q_vector;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto unroll_vector_base;
-
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_vector_base;
@@ -2745,6 +2464,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+ vsi->stat_offsets_loaded = false;
+
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
@@ -2786,13 +2507,12 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
if (ret)
goto unroll_alloc_q_vector;
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto unroll_vector_base;
-
ret = ice_vsi_alloc_ring_stats(vsi);
if (ret)
goto unroll_vector_base;
+
+ vsi->stat_offsets_loaded = false;
+
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -2822,8 +2542,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
@@ -2915,14 +2633,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
- ice_free_vf_ctrl_res(pf, vsi);
- } else if (vsi->type != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- vsi->base_vector = 0;
- }
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
@@ -2988,8 +2698,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
return vsi;
err_vsi_cfg:
- if (params->type == ICE_VSI_VF)
- ice_enable_lag(pf->lag);
ice_vsi_free(vsi);
return NULL;
@@ -3039,7 +2747,6 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
int i;
if (!vsi->q_vectors || !vsi->irqs_ready)
@@ -3053,10 +2760,9 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
- u16 vector = i + base;
int irq_num;
- irq_num = pf->msix_entries[vector].vector;
+ irq_num = vsi->q_vectors[i]->irq.virq;
/* free only the irqs that were actually requested */
if (!vsi->q_vectors[i] ||
@@ -3188,7 +2894,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
*/
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
- int base = vsi->base_vector;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 val;
@@ -3235,7 +2940,7 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
return;
ice_for_each_q_vector(vsi, i)
- synchronize_irq(pf->msix_entries[i + base].vector);
+ synchronize_irq(vsi->q_vectors[i]->irq.virq);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 75221478f2dc..e985766e6bb5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -104,11 +104,6 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
-int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
-
-int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
-
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index a1f7c8edc22f..93979ab18bc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2490,7 +2490,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
{
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
struct device *dev;
int rx_int_idx = 0;
int tx_int_idx = 0;
@@ -2501,7 +2500,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
for (vector = 0; vector < q_vectors; vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[vector];
- irq_num = pf->msix_entries[base + vector].vector;
+ irq_num = q_vector->irq.virq;
if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@@ -2555,9 +2554,8 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
return 0;
free_q_irqs:
- while (vector) {
- vector--;
- irq_num = pf->msix_entries[base + vector].vector;
+ while (vector--) {
+ irq_num = vsi->q_vectors[vector]->irq.virq;
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
@@ -2635,11 +2633,11 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
int i;
old_prog = xchg(&vsi->xdp_prog, prog);
- if (old_prog)
- bpf_prog_put(old_prog);
-
ice_for_each_rxq(vsi, i)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
}
/**
@@ -2924,6 +2922,12 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
}
+ /* hot swap progs and avoid toggling link */
+ if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
+ ice_vsi_assign_bpf_prog(vsi, prog);
+ return 0;
+ }
+
/* need to stop netdev while setting up the program for Rx rings */
if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
ret = ice_down(vsi);
@@ -2956,13 +2960,6 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_realloc_zc_buf(vsi, false);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
- } else {
- /* safe to call even when prog == vsi->xdp_prog as
- * dev_xdp_install in net/core/dev.c incremented prog's
- * refcount so corresponding bpf_prog_put won't cause
- * underflow
- */
- ice_vsi_assign_bpf_prog(vsi, prog);
}
if (if_running)
@@ -3047,7 +3044,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -3060,7 +3057,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
{
struct ice_pf *pf = (struct ice_pf *)data;
struct ice_hw *hw = &pf->hw;
- irqreturn_t ret = IRQ_NONE;
struct device *dev;
u32 oicr, ena_mask;
@@ -3142,19 +3138,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
if (!hw->reset_ongoing)
- ret = IRQ_WAKE_THREAD;
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
- /* Save EVENTs from GTSYN register */
- pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
- GLTSYN_STAT_EVENT1_M |
- GLTSYN_STAT_EVENT2_M);
ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
- kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
+
+ if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ /* Save EVENTs from GLTSYN register */
+ pf->ptp.ext_ts_irq |= gltsyn_stat &
+ (GLTSYN_STAT_EVENT0_M |
+ GLTSYN_STAT_EVENT1_M |
+ GLTSYN_STAT_EVENT2_M);
+
+ set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
+ }
}
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
@@ -3174,16 +3175,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_ECC_ERR_M)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_service_task_schedule(pf);
}
}
- if (!ret)
- ret = IRQ_HANDLED;
-
- ice_service_task_schedule(pf);
- ice_irq_dynamic_ena(hw, NULL, NULL);
- return ret;
+ return IRQ_WAKE_THREAD;
}
/**
@@ -3194,12 +3189,29 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
{
struct ice_pf *pf = data;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
if (ice_is_reset_in_progress(pf->state))
return IRQ_HANDLED;
- while (!ice_ptp_process_ts(pf))
- usleep_range(50, 100);
+ ice_service_task_schedule(pf);
+
+ if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
+ ice_ptp_extts_event(pf);
+
+ if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
+ /* Process outstanding Tx timestamps. If there is more work,
+ * re-arm the interrupt to trigger again.
+ */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ }
+
+ ice_irq_dynamic_ena(hw, NULL, NULL);
return IRQ_HANDLED;
}
@@ -3234,6 +3246,7 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
*/
static void ice_free_irq_msix_misc(struct ice_pf *pf)
{
+ int misc_irq_num = pf->oicr_irq.virq;
struct ice_hw *hw = &pf->hw;
ice_dis_ctrlq_interrupts(hw);
@@ -3242,14 +3255,10 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0);
ice_flush(hw);
- if (pf->msix_entries) {
- synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
- devm_free_irq(ice_pf_to_dev(pf),
- pf->msix_entries[pf->oicr_idx].vector, pf);
- }
+ synchronize_irq(misc_irq_num);
+ devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
- pf->num_avail_sw_msix += 1;
- ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
+ ice_free_irq(pf, pf->oicr_irq);
}
/**
@@ -3295,7 +3304,8 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int oicr_idx, err = 0;
+ struct msi_map oicr_irq;
+ int err = 0;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
@@ -3309,30 +3319,26 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
goto skip_req_irq;
/* reserve one vector in irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- if (oicr_idx < 0)
- return oicr_idx;
-
- pf->num_avail_sw_msix -= 1;
- pf->oicr_idx = (u16)oicr_idx;
-
- err = devm_request_threaded_irq(dev,
- pf->msix_entries[pf->oicr_idx].vector,
- ice_misc_intr, ice_misc_intr_thread_fn,
- 0, pf->int_name, pf);
+ oicr_irq = ice_alloc_irq(pf, false);
+ if (oicr_irq.index < 0)
+ return oicr_irq.index;
+
+ pf->oicr_irq = oicr_irq;
+ err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
+ ice_misc_intr_thread_fn, 0,
+ pf->int_name, pf);
if (err) {
dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
pf->int_name, err);
- ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- pf->num_avail_sw_msix += 1;
+ ice_free_irq(pf, pf->oicr_irq);
return err;
}
skip_req_irq:
ice_ena_misc_vector(pf);
- ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
- wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
+ ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
ice_flush(hw);
@@ -3901,224 +3907,6 @@ static int ice_init_pf(struct ice_pf *pf)
}
/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
-{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err, i;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- pf->msix_entries = devm_kcalloc(dev, v_wanted,
- sizeof(*pf->msix_entries), GFP_KERNEL);
- if (!pf->msix_entries) {
- err = -ENOMEM;
- goto exit_err;
- }
-
- for (i = 0; i < v_wanted; i++)
- pf->msix_entries[i].entry = i;
-
- /* actually reserve the vectors */
- v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
- ICE_MIN_MSIX, v_wanted);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto msix_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_disable_msix(pf->pdev);
- err = -ERANGE;
- goto msix_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-msix_err:
- devm_kfree(dev, pf->msix_entries);
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
-}
-
-/**
- * ice_dis_msix - Disable MSI-X interrupt setup in OS
- * @pf: board private structure
- */
-static void ice_dis_msix(struct ice_pf *pf)
-{
- pci_disable_msix(pf->pdev);
- devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
- pf->msix_entries = NULL;
-}
-
-/**
- * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
- * @pf: board private structure
- */
-static void ice_clear_interrupt_scheme(struct ice_pf *pf)
-{
- ice_dis_msix(pf);
-
- if (pf->irq_tracker) {
- devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
- pf->irq_tracker = NULL;
- }
-}
-
-/**
- * ice_init_interrupt_scheme - Determine proper interrupt scheme
- * @pf: board private structure to initialize
- */
-static int ice_init_interrupt_scheme(struct ice_pf *pf)
-{
- int vectors;
-
- vectors = ice_ena_msix_range(pf);
-
- if (vectors < 0)
- return vectors;
-
- /* set up vector assignment tracking */
- pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
- struct_size(pf->irq_tracker, list, vectors),
- GFP_KERNEL);
- if (!pf->irq_tracker) {
- ice_dis_msix(pf);
- return -ENOMEM;
- }
-
- /* populate SW interrupts pool with number of OS granted IRQs. */
- pf->num_avail_sw_msix = (u16)vectors;
- pf->irq_tracker->num_entries = (u16)vectors;
- pf->irq_tracker->end = pf->irq_tracker->num_entries;
-
- return 0;
-}
-
-/**
* ice_is_wol_supported - check if WoL is supported
* @hw: pointer to hardware info
*
@@ -4802,9 +4590,13 @@ err_init_pf:
static void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
- ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
+
+ /* Service task is already stopped, so call reset directly. */
+ ice_reset(&pf->hw, ICE_RESET_PFR);
+ pci_wait_for_pending_transaction(pf->pdev);
+ ice_clear_interrupt_scheme(pf);
}
static void ice_init_features(struct ice_pf *pf)
@@ -5094,10 +4886,6 @@ int ice_load(struct ice_pf *pf)
struct ice_vsi *vsi;
int err;
- err = ice_reset(&pf->hw, ICE_RESET_PFR);
- if (err)
- return err;
-
err = ice_init_dev(pf);
if (err)
return err;
@@ -5354,12 +5142,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- /* Issue a PFR as part of the prescribed driver unload flow. Do not
- * do it via ice_schedule_reset() since there is no need to rebuild
- * and the service task is already stopped.
- */
- ice_reset(&pf->hw, ICE_RESET_PFR);
- pci_wait_for_pending_transaction(pdev);
pci_disable_device(pdev);
}
@@ -5841,11 +5623,6 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
if (!is_valid_ether_addr(mac))
return -EADDRNOTAVAIL;
- if (ether_addr_equal(netdev->dev_addr, mac)) {
- netdev_dbg(netdev, "already using mac %pM\n", mac);
- return 0;
- }
-
if (test_bit(ICE_DOWN, pf->state) ||
ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't set mac %pM. device not ready\n",
@@ -7056,6 +6833,10 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_for_each_xdp_txq(vsi, i)
+ ice_clean_tx_ring(vsi->xdp_rings[i]);
+
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
@@ -7631,21 +7412,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
netdev->mtu = (unsigned int)new_mtu;
-
- /* if VSI is up, bring it down and then back up */
- if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
- err = ice_down(vsi);
- if (err) {
- netdev_err(netdev, "change MTU if_down err %d\n", err);
- return err;
- }
-
- err = ice_up(vsi);
- if (err) {
- netdev_err(netdev, "change MTU if_up err %d\n", err);
- return err;
- }
- }
+ err = ice_down_up(vsi);
+ if (err)
+ return err;
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 02a4e1cf624e..6a9364761165 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -47,6 +47,7 @@ enum ice_protocol_type {
ICE_L2TPV3,
ICE_VLAN_EX,
ICE_VLAN_IN,
+ ICE_HW_METADATA,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -115,17 +116,7 @@ enum ice_prot_id {
#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
-#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
-#define ICE_MDID_SIZE 2
-
-#define ICE_TUN_FLAG_MDID 21
-#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
-#define ICE_TUN_FLAG_MASK 0xFF
-
-#define ICE_VLAN_FLAG_MDID 20
-#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
-#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
#define ICE_TUN_FLAG_FV_IND 2
@@ -230,6 +221,191 @@ struct ice_nvgre_hdr {
__be32 tni_flow;
};
+/* Metadata information
+ *
+ * Not all MDIDs can be used by switch block. It depends on package version.
+ *
+ * MDID 16 (Rx offset)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | A | B | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * A = Source port where the transaction came from (3b).
+ *
+ * B = Destination TC of the packet. The TC is relative to a port (5b).
+ *
+ * MDID 17
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | PTYPE | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * PTYPE = Encodes the packet type (10b).
+ *
+ * MDID 18
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Packet length | R |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Packet length = Length of the packet in bytes
+ * (packet always carriers CRC) (14b).
+ * R = Reserved (2b).
+ *
+ * MDID 19
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Source VSI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Source VSI = Source VSI of packet loopbacked in switch (for egress) (10b).
+ */
+#define ICE_MDID_SOURCE_VSI_MASK GENMASK(9, 0)
+
+/*
+ * MDID 20
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |A|B|C|D|E|F|R|R|G|H|I|J|K|L|M|N|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * A = DSI - set for DSI RX pkts.
+ * B = ipsec_decrypted - invalid on NIC.
+ * C = marker - this is a marker packet.
+ * D = from_network - for TX sets to 0
+ * for RX:
+ * * 1 - packet is from external link
+ * * 0 - packet source is from internal
+ * E = source_interface_is_rx - reflect the physical interface from where the
+ * packet was received:
+ * * 1 - Rx
+ * * 0 - Tx
+ * F = from_mng - The bit signals that the packet's origin is the management.
+ * G = ucast - Outer L2 MAC address is unicast.
+ * H = mcast - Outer L2 MAC address is multicast.
+ * I = bcast - Outer L2 MAC address is broadcast.
+ * J = second_outer_mac_present - 2 outer MAC headers are present in the packet.
+ * K = STAG or BVLAN - Outer L2 header has STAG (ethernet type 0x88a8) or
+ * BVLAN (ethernet type 0x88a8).
+ * L = ITAG - Outer L2 header has ITAG *ethernet type 0x88e7)
+ * M = EVLAN (0x8100) - Outer L2 header has EVLAN (ethernet type 0x8100)
+ * N = EVLAN (0x9100) - Outer L2 header has EVLAN (ethernet type 0x9100)
+ */
+#define ICE_PKT_VLAN_STAG BIT(12)
+#define ICE_PKT_VLAN_ITAG BIT(13)
+#define ICE_PKT_VLAN_EVLAN (BIT(14) | BIT(15))
+#define ICE_PKT_VLAN_MASK (ICE_PKT_VLAN_STAG | ICE_PKT_VLAN_ITAG | \
+ ICE_PKT_VLAN_EVLAN)
+/* MDID 21
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |A|B|C|D|E|F|G|H|I|J|R|R|K|L|M|N|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * A = VLAN (0x8100) - Outer L2 header has VLAN (ethernet type 0x8100)
+ * B = NSHoE - Outer L2 header has NSH (ethernet type 0x894f)
+ * C = MPLS (0x8847) - There is at least 1 MPLS tag in the outer header
+ * (ethernet type 0x8847)
+ * D = MPLS (0x8848) - There is at least 1 MPLS tag in the outer header
+ * (ethernet type 0x8848)
+ * E = multi MPLS - There is more than a single MPLS tag in the outer header
+ * F = inner MPLS - There is inner MPLS tag in the packet
+ * G = tunneled MAC - Set if the packet includes a tunneled MAC
+ * H = tunneled VLAN - Same as VLAN, but for a tunneled header
+ * I = pkt_is_frag - Packet is fragmented (ipv4 or ipv6)
+ * J = ipv6_ext - The packet has routing or destination ipv6 extension in inner
+ * or outer ipv6 headers
+ * K = RoCE - UDP packet detected as RoCEv2
+ * L = UDP_XSUM_0 - Set to 1 if L4 checksum is 0 in a UDP packet
+ * M = ESP - This is a ESP packet
+ * N = NAT_ESP - This is a ESP packet encapsulated in UDP NAT
+ */
+#define ICE_PKT_TUNNEL_MAC BIT(6)
+#define ICE_PKT_TUNNEL_VLAN BIT(7)
+#define ICE_PKT_TUNNEL_MASK (ICE_PKT_TUNNEL_MAC | ICE_PKT_TUNNEL_VLAN)
+
+/* MDID 22
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |A|B|C|D|E|F| G |H|I|J| K |L|M|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * A = fin - fin flag in tcp header
+ * B = sync - sync flag in tcp header
+ * C = rst - rst flag in tcp header
+ * D = psh - psh flag in tcp header
+ * E = ack - ack flag in tcp header
+ * F = urg - urg flag in tcp header
+ * G = tunnel type (3b) - Flags used to decode tunnel type:
+ * * b000 - not a VXLAN/Geneve/GRE tunnel
+ * * b001 - VXLAN-GPE
+ * * b010 - VXLAN (non-GPE)
+ * * b011 - Geneve
+ * * b100 - GRE (no key, no xsum)
+ * * b101 - GREK (key, no xsum)
+ * * b110 - GREC (no key, xsum)
+ * * b111 - GREKC (key, xsum)
+ * H = UDP_GRE - Packet is UDP (VXLAN or VLAN_GPE or Geneve or MPLSoUDP or GRE)
+ * tunnel
+ * I = OAM - VXLAN/Geneve/tunneled NSH packet with the OAM bit set
+ * J = tunneled NSH - Packet has NSHoGRE or NSHoUDP
+ * K = switch (2b) - Direction on switch
+ * * b00 - normal
+ * * b01 - TX force only LAN
+ * * b10 - TX disable LAN
+ * * b11 - direct to VSI
+ * L = swpe - Represents SWPE bit in TX command
+ * M = sw_cmd - Switch command
+ *
+ * MDID 23
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |A|B|C|D| R |E|F|R|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * A = MAC error - Produced by MAC according to L2 error conditions
+ * B = PPRS no offload - FIFO overflow in PPRS or any problematic condition in
+ * PPRS ANA
+ * C = abort - Set when malicious packet is detected
+ * D = partial analysis - ANA's analysing got cut in the middle
+ * (header > 504B etc.)
+ * E = FLM - Flow director hit indication
+ * F = FDLONG - Flow direector long bucket indication
+ *
+ */
+#define ICE_MDID_SIZE 2
+#define ICE_META_DATA_ID_HW 255
+
+enum ice_hw_metadata_id {
+ ICE_SOURCE_PORT_MDID = 16,
+ ICE_PTYPE_MDID = 17,
+ ICE_PACKET_LENGTH_MDID = 18,
+ ICE_SOURCE_VSI_MDID = 19,
+ ICE_PKT_VLAN_MDID = 20,
+ ICE_PKT_TUNNEL_MDID = 21,
+ ICE_PKT_TCP_MDID = 22,
+ ICE_PKT_ERROR_MDID = 23,
+};
+
+enum ice_hw_metadata_offset {
+ ICE_SOURCE_PORT_MDID_OFFSET = ICE_MDID_SIZE * ICE_SOURCE_PORT_MDID,
+ ICE_PTYPE_MDID_OFFSET = ICE_MDID_SIZE * ICE_PTYPE_MDID,
+ ICE_PACKET_LENGTH_MDID_OFFSET = ICE_MDID_SIZE * ICE_PACKET_LENGTH_MDID,
+ ICE_SOURCE_VSI_MDID_OFFSET = ICE_MDID_SIZE * ICE_SOURCE_VSI_MDID,
+ ICE_PKT_VLAN_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_VLAN_MDID,
+ ICE_PKT_TUNNEL_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_TUNNEL_MDID,
+ ICE_PKT_TCP_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_TCP_MDID,
+ ICE_PKT_ERROR_MDID_OFFSET = ICE_MDID_SIZE * ICE_PKT_ERROR_MDID,
+};
+
+enum ice_pkt_flags {
+ ICE_PKT_FLAGS_VLAN = 0,
+ ICE_PKT_FLAGS_TUNNEL = 1,
+ ICE_PKT_FLAGS_TCP = 2,
+ ICE_PKT_FLAGS_ERROR = 3,
+};
+
+struct ice_hw_metadata {
+ __be16 source_port;
+ __be16 ptype;
+ __be16 packet_length;
+ __be16 source_vsi;
+ __be16 flags[4];
+};
+
union ice_prot_hdr {
struct ice_ether_hdr eth_hdr;
struct ice_ethtype_hdr ethertype;
@@ -243,6 +419,7 @@ union ice_prot_hdr {
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
+ struct ice_hw_metadata metadata;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ac6f06f9a2ed..81d96a40d5a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -617,7 +617,7 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_tx_tstamp - Process Tx timestamps for a port
+ * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
* @tx: the PTP Tx timestamp tracker
*
* Process timestamps captured by the PHY associated with this port. To do
@@ -633,15 +633,6 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
* 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
* 7) send this 64 bit timestamp to the stack
*
- * Returns true if all timestamps were handled, and false if any slots remain
- * without a timestamp.
- *
- * After looping, if we still have waiting SKBs, return false. This may cause
- * us effectively poll even when not strictly necessary. We do this because
- * it's possible a new timestamp was requested around the same time as the
- * interrupt. In some cases hardware might not interrupt us again when the
- * timestamp is captured.
- *
* Note that we do not hold the tracking lock while reading the Tx timestamp.
* This is because reading the timestamp requires taking a mutex that might
* sleep.
@@ -673,10 +664,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
* the packet will never be sent by hardware and discard it without reading
* the timestamp register.
*/
-static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
- bool more_timestamps;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
@@ -685,7 +675,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
u8 idx;
if (!tx->init)
- return true;
+ return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
@@ -694,7 +684,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
/* Read the Tx ready status first */
err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
if (err)
- return false;
+ return;
/* Drop packets if the link went down */
link_up = ptp_port->link_up;
@@ -782,15 +772,34 @@ skip_ts_read:
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
+}
- /* Check if we still have work to do. If so, re-queue this task to
- * poll for remaining timestamps.
- */
+/**
+ * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
+ * @tx: Tx tracking structure to initialize
+ *
+ * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
+ * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
+ */
+static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+{
+ bool more_timestamps;
+
+ if (!tx->init)
+ return ICE_TX_TSTAMP_WORK_DONE;
+
+ /* Process the Tx timestamp tracker */
+ ice_ptp_process_tx_tstamp(tx);
+
+ /* Check if there are outstanding Tx timestamps */
spin_lock(&tx->lock);
more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
spin_unlock(&tx->lock);
- return !more_timestamps;
+ if (more_timestamps)
+ return ICE_TX_TSTAMP_WORK_PENDING;
+
+ return ICE_TX_TSTAMP_WORK_DONE;
}
/**
@@ -911,7 +920,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
spin_unlock(&tx->lock);
/* wait for potentially outstanding interrupt to complete */
- synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
+ synchronize_irq(pf->oicr_irq.virq);
ice_ptp_flush_tx_tracker(pf, tx);
@@ -1458,15 +1467,11 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
}
/**
- * ice_ptp_extts_work - Workqueue task function
- * @work: external timestamp work structure
- *
- * Service for PTP external clock event
+ * ice_ptp_extts_event - Process PTP external clock event
+ * @pf: Board private structure
*/
-static void ice_ptp_extts_work(struct kthread_work *work)
+void ice_ptp_extts_event(struct ice_pf *pf)
{
- struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
- struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
struct ptp_clock_event event;
struct ice_hw *hw = &pf->hw;
u8 chan, tmr_idx;
@@ -2430,9 +2435,10 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
* ice_ptp_process_ts - Process the PTP Tx timestamps
* @pf: Board private structure
*
- * Returns true if timestamps are processed.
+ * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
+ * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
*/
-bool ice_ptp_process_ts(struct ice_pf *pf)
+enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
{
return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
}
@@ -2558,7 +2564,6 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf)
ice_ptp_cfg_timestamp(pf, false);
kthread_cancel_delayed_work_sync(&ptp->work);
- kthread_cancel_work_sync(&ptp->extts_work);
if (test_bit(ICE_PFR_REQ, pf->state))
return;
@@ -2656,7 +2661,6 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Initialize work functions */
kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
- kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 9cda2f43e0e5..995a57019ba7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -109,6 +109,16 @@ struct ice_tx_tstamp {
};
/**
+ * enum ice_tx_tstamp_work - Status of Tx timestamp work function
+ * @ICE_TX_TSTAMP_WORK_DONE: Tx timestamp processing is complete
+ * @ICE_TX_TSTAMP_WORK_PENDING: More Tx timestamps are pending
+ */
+enum ice_tx_tstamp_work {
+ ICE_TX_TSTAMP_WORK_DONE = 0,
+ ICE_TX_TSTAMP_WORK_PENDING,
+};
+
+/**
* struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
* @lock: lock to prevent concurrent access to fields of this struct
* @tstamps: array of len to store outstanding requests
@@ -169,7 +179,6 @@ struct ice_ptp_port {
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
* @port: data for the PHY port initialization procedure
* @work: delayed work function for periodic tasks
- * @extts_work: work function for handling external Tx timestamps
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @cached_phc_jiffies: jiffies when cached_phc_time was last updated
* @ext_ts_chan: the external timestamp channel in use
@@ -190,7 +199,6 @@ struct ice_ptp_port {
struct ice_ptp {
struct ice_ptp_port port;
struct kthread_delayed_work work;
- struct kthread_work extts_work;
u64 cached_phc_time;
unsigned long cached_phc_jiffies;
u8 ext_ts_chan;
@@ -256,8 +264,9 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
+void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
-bool ice_ptp_process_ts(struct ice_pf *pf);
+enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
@@ -284,6 +293,7 @@ static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
return -1;
}
+static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index fd1f8b0ad0ab..e30e12321abd 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -298,14 +298,6 @@ static int ice_repr_add(struct ice_vf *vf)
if (!repr)
return -ENOMEM;
-#ifdef CONFIG_ICE_SWITCHDEV
- repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL);
- if (!repr->mac_rule) {
- err = -ENOMEM;
- goto err_alloc_rule;
- }
-#endif
-
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
err = -ENOMEM;
@@ -351,11 +343,6 @@ err_alloc_q_vector:
free_netdev(repr->netdev);
repr->netdev = NULL;
err_alloc:
-#ifdef CONFIG_ICE_SWITCHDEV
- kfree(repr->mac_rule);
- repr->mac_rule = NULL;
-err_alloc_rule:
-#endif
kfree(repr);
vf->repr = NULL;
return err;
@@ -376,10 +363,6 @@ static void ice_repr_rem(struct ice_vf *vf)
ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
-#ifdef CONFIG_ICE_SWITCHDEV
- kfree(vf->repr->mac_rule);
- vf->repr->mac_rule = NULL;
-#endif
kfree(vf->repr);
vf->repr = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 378a45bfa256..9c2a6f496b3b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -13,9 +13,8 @@ struct ice_repr {
struct net_device *netdev;
struct metadata_dst *dst;
#ifdef CONFIG_ICE_SWITCHDEV
- /* info about slow path MAC rule */
- struct ice_rule_query_data *mac_rule;
- u8 rule_added;
+ /* info about slow path rule */
+ struct ice_rule_query_data sp_rule;
#endif
};
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index b7682de0ae05..b664d60fd037 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -358,10 +358,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->sibling;
}
- /* leaf nodes have no children */
- if (node->children)
- devm_kfree(ice_hw_to_dev(hw), node->children);
-
+ devm_kfree(ice_hw_to_dev(hw), node->children);
kfree(node->name);
xa_erase(&pi->sched_node_ids, node->id);
devm_kfree(ice_hw_to_dev(hw), node);
@@ -859,10 +856,8 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
if (!hw)
return;
- if (hw->layer_info) {
- devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
- hw->layer_info = NULL;
- }
+ devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
+ hw->layer_info = NULL;
ice_sched_clear_port(hw->port_info);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index f1dca59bd844..1f66914c7a20 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -135,18 +135,9 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
*/
static int ice_sriov_free_msix_res(struct ice_pf *pf)
{
- struct ice_res_tracker *res;
-
if (!pf)
return -EINVAL;
- res = pf->irq_tracker;
- if (!res)
- return -EINVAL;
-
- /* give back irq_tracker resources used */
- WARN_ON(pf->sriov_base_vector < res->num_entries);
-
pf->sriov_base_vector = 0;
return 0;
@@ -410,29 +401,6 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
}
/**
- * ice_get_max_valid_res_idx - Get the max valid resource index
- * @res: pointer to the resource to find the max valid index for
- *
- * Start from the end of the ice_res_tracker and return right when we find the
- * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
- * valid for SR-IOV because it is the only consumer that manipulates the
- * res->end and this is always called when res->end is set to res->num_entries.
- */
-static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
-{
- int i;
-
- if (!res)
- return -EINVAL;
-
- for (i = res->num_entries - 1; i >= 0; i--)
- if (res->list[i] & ICE_RES_VALID_BIT)
- return i;
-
- return 0;
-}
-
-/**
* ice_sriov_set_msix_res - Set any used MSIX resources
* @pf: pointer to PF structure
* @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
@@ -450,7 +418,7 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
{
u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = pf->irq_tracker->num_entries;
+ int vectors_used = ice_get_max_used_msix_vector(pf);
int sriov_base_vector;
sriov_base_vector = total_vectors - num_msix_needed;
@@ -490,7 +458,7 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
@@ -501,12 +469,9 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
if (!num_vfs)
return -EINVAL;
- if (max_valid_res_idx < 0)
- return -ENOSPC;
-
/* determine MSI-X resources per VF */
msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- pf->irq_tracker->num_entries;
+ vectors_used;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -871,7 +836,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
int ret;
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
set_bit(ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
@@ -940,14 +905,13 @@ err_unroll_intr:
*/
static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
- int pre_existing_vfs = pci_num_vf(pf->pdev);
struct device *dev = ice_pf_to_dev(pf);
int err;
- if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ if (!num_vfs) {
ice_free_vfs(pf);
- else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
return 0;
+ }
if (num_vfs > pf->vfs.num_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
@@ -1014,8 +978,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) {
ice_free_vfs(pf);
- if (pf->lag)
- ice_enable_lag(pf->lag);
return 0;
}
@@ -1027,8 +989,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (err)
return err;
- if (pf->lag)
- ice_disable_lag(pf->lag);
return num_vfs;
}
@@ -1171,7 +1131,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
if (!vf)
return -EINVAL;
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1286,7 +1246,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto out_put_vf;
}
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1340,7 +1300,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
return -EOPNOTSUPP;
}
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1653,7 +1613,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (!vf)
return -EINVAL;
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 46b36851af46..6db4ca7978cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1636,21 +1636,16 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
*/
static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
{
- struct ice_vsi_ctx *vsi;
+ struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
u8 i;
- vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi)
return;
ice_for_each_traffic_class(i) {
- if (vsi->lan_q_ctx[i]) {
- devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
- vsi->lan_q_ctx[i] = NULL;
- }
- if (vsi->rdma_q_ctx[i]) {
- devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
- vsi->rdma_q_ctx[i] = NULL;
- }
+ devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
+ vsi->lan_q_ctx[i] = NULL;
+ devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
+ vsi->rdma_q_ctx[i] = NULL;
}
}
@@ -4540,6 +4535,11 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
return status;
}
+#define ICE_PROTOCOL_ENTRY(id, ...) { \
+ .prot_type = id, \
+ .offs = {__VA_ARGS__}, \
+}
+
/* This is mapping table entry that maps every word within a given protocol
* structure to the real byte offset as per the specification of that
* protocol header.
@@ -4550,29 +4550,38 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
* structure is added to that union.
*/
static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
- { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
- { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
- { ICE_ETYPE_OL, { 0 } },
- { ICE_ETYPE_IL, { 0 } },
- { ICE_VLAN_OFOS, { 2, 0 } },
- { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
- { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
- { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
- 26, 28, 30, 32, 34, 36, 38 } },
- { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
- 26, 28, 30, 32, 34, 36, 38 } },
- { ICE_TCP_IL, { 0, 2 } },
- { ICE_UDP_OF, { 0, 2 } },
- { ICE_UDP_ILOS, { 0, 2 } },
- { ICE_VXLAN, { 8, 10, 12, 14 } },
- { ICE_GENEVE, { 8, 10, 12, 14 } },
- { ICE_NVGRE, { 0, 2, 4, 6 } },
- { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
- { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
- { ICE_PPPOE, { 0, 2, 4, 6 } },
- { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
- { ICE_VLAN_EX, { 2, 0 } },
- { ICE_VLAN_IN, { 2, 0 } },
+ ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
+ ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
+ ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
+ ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
+ ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
+ ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
+ ICE_PROTOCOL_ENTRY(ICE_IPV4_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
+ ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
+ 20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
+ ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 24, 26, 28, 30, 32, 34, 36, 38),
+ ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
+ ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
+ ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
+ ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
+ ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
+ ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
+ ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
+ ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
+ ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
+ ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
+ ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
+ ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
+ ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
+ ICE_SOURCE_PORT_MDID_OFFSET,
+ ICE_PTYPE_MDID_OFFSET,
+ ICE_PACKET_LENGTH_MDID_OFFSET,
+ ICE_SOURCE_VSI_MDID_OFFSET,
+ ICE_PKT_VLAN_MDID_OFFSET,
+ ICE_PKT_TUNNEL_MDID_OFFSET,
+ ICE_PKT_TCP_MDID_OFFSET,
+ ICE_PKT_ERROR_MDID_OFFSET),
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
@@ -4597,6 +4606,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
{ ICE_VLAN_IN, ICE_VLAN_OL_HW },
+ { ICE_HW_METADATA, ICE_META_DATA_ID_HW },
};
/**
@@ -5255,71 +5265,6 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
return status;
}
-/**
- * ice_tun_type_match_word - determine if tun type needs a match mask
- * @tun_type: tunnel type
- * @mask: mask to be used for the tunnel
- */
-static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
-{
- switch (tun_type) {
- case ICE_SW_TUN_GENEVE:
- case ICE_SW_TUN_VXLAN:
- case ICE_SW_TUN_NVGRE:
- case ICE_SW_TUN_GTPU:
- case ICE_SW_TUN_GTPC:
- *mask = ICE_TUN_FLAG_MASK;
- return true;
-
- default:
- *mask = 0;
- return false;
- }
-}
-
-/**
- * ice_add_special_words - Add words that are not protocols, such as metadata
- * @rinfo: other information regarding the rule e.g. priority and action info
- * @lkup_exts: lookup word structure
- * @dvm_ena: is double VLAN mode enabled
- */
-static int
-ice_add_special_words(struct ice_adv_rule_info *rinfo,
- struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
-{
- u16 mask;
-
- /* If this is a tunneled packet, then add recipe index to match the
- * tunnel bit in the packet metadata flags.
- */
- if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
- if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
- u8 word = lkup_exts->n_val_words++;
-
- lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
- lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
- lkup_exts->field_mask[word] = mask;
- } else {
- return -ENOSPC;
- }
- }
-
- if (rinfo->vlan_type != 0 && dvm_ena) {
- if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
- u8 word = lkup_exts->n_val_words++;
-
- lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
- lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
- lkup_exts->field_mask[word] =
- ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
- } else {
- return -ENOSPC;
- }
- }
-
- return 0;
-}
-
/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
* @hw: pointer to hardware structure
* @rinfo: other information regarding the rule e.g. priority and action info
@@ -5433,13 +5378,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_unroll;
- /* Create any special protocol/offset pairs, such as looking at tunnel
- * bits by extracting metadata
- */
- status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
- if (status)
- goto err_unroll;
-
/* Group match words into recipes using preferred recipe grouping
* criteria.
*/
@@ -5525,9 +5463,7 @@ err_unroll:
devm_kfree(ice_hw_to_dev(hw), fvit);
}
- if (rm->root_buf)
- devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
-
+ devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
kfree(rm);
err_free_lkup_exts:
@@ -5725,6 +5661,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* was already checked when search for the dummy packet
*/
type = lkups[i].type;
+ /* metadata isn't present in the packet */
+ if (type == ICE_HW_METADATA)
+ continue;
+
for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
if (type == offsets[j].type) {
offset = offsets[j].offset;
@@ -5860,16 +5800,21 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
/**
* ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
+ * @hw: pointer to hw structure
* @vlan_type: VLAN tag type
* @pkt: dummy packet to fill in
* @offsets: offset info for the dummy packet
*/
static int
-ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
+ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
const struct ice_dummy_pkt_offsets *offsets)
{
u16 i;
+ /* Check if there is something to do */
+ if (!vlan_type || !ice_is_dvm_ena(hw))
+ return 0;
+
/* Find VLAN header and insert VLAN TPID */
for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
if (offsets[i].type == ICE_VLAN_OFOS ||
@@ -5888,6 +5833,15 @@ ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
return -EIO;
}
+static bool ice_rules_equal(const struct ice_adv_rule_info *first,
+ const struct ice_adv_rule_info *second)
+{
+ return first->sw_act.flag == second->sw_act.flag &&
+ first->tun_type == second->tun_type &&
+ first->vlan_type == second->vlan_type &&
+ first->src_vsi == second->src_vsi;
+}
+
/**
* ice_find_adv_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
@@ -5921,9 +5875,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
lkups_matched = false;
break;
}
- if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
- rinfo->tun_type == list_itr->rule_info.tun_type &&
- rinfo->vlan_type == list_itr->rule_info.vlan_type &&
+ if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
lkups_matched)
return list_itr;
}
@@ -6039,6 +5991,26 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
return status;
}
+void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
+{
+ lkup->type = ICE_HW_METADATA;
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
+ cpu_to_be16(ICE_PKT_TUNNEL_MASK);
+}
+
+void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
+{
+ lkup->type = ICE_HW_METADATA;
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
+ cpu_to_be16(ICE_PKT_VLAN_MASK);
+}
+
+void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
+{
+ lkup->type = ICE_HW_METADATA;
+ lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
+}
+
/**
* ice_add_adv_rule - helper function to create an advanced switch rule
* @hw: pointer to the hardware structure
@@ -6120,7 +6092,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
- if (rinfo->sw_act.flag & ICE_FLTR_TX)
+
+ if (rinfo->src_vsi)
+ rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
+ else
rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
@@ -6189,19 +6164,20 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto err_ice_add_adv_rule;
}
- /* set the rule LOOKUP type based on caller specified 'Rx'
- * instead of hardcoding it to be either LOOKUP_TX/RX
+ /* If there is no matching criteria for direction there
+ * is only one difference between Rx and Tx:
+ * - get switch id base on VSI number from source field (Tx)
+ * - get switch id base on port number (Rx)
*
- * for 'Rx' set the source to be the port number
- * for 'Tx' set the source to be the source HW VSI number (determined
- * by caller)
+ * If matching on direction metadata is chose rule direction is
+ * extracted from type value set here.
*/
- if (rinfo->rx) {
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->src = cpu_to_le16(hw->port_info->lport);
- } else {
+ if (rinfo->sw_act.flag & ICE_FLTR_TX) {
s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
s_rule->src = cpu_to_le16(rinfo->sw_act.src);
+ } else {
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
}
s_rule->recipe_id = cpu_to_le16(rid);
@@ -6211,22 +6187,16 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_ice_add_adv_rule;
- if (rinfo->tun_type != ICE_NON_TUN &&
- rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
- status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
- s_rule->hdr_data,
- profile->offsets);
- if (status)
- goto err_ice_add_adv_rule;
- }
+ status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
+ profile->offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
- if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
- status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
- s_rule->hdr_data,
- profile->offsets);
- if (status)
- goto err_ice_add_adv_rule;
- }
+ status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
+ s_rule->hdr_data,
+ profile->offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
@@ -6469,13 +6439,6 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- /* Create any special protocol/offset pairs, such as looking at tunnel
- * bits by extracting metadata
- */
- status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
- if (status)
- return status;
-
rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 68d8e8a6a189..c84b56fe84a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -10,7 +10,6 @@
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_FLTR_RX BIT(0)
#define ICE_FLTR_TX BIT(1)
-#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
@@ -187,12 +186,13 @@ struct ice_adv_rule_flags_info {
};
struct ice_adv_rule_info {
+ /* Store metadata values in rule info */
enum ice_sw_tunnel_type tun_type;
- struct ice_sw_act_ctrl sw_act;
- u32 priority;
- u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
- u16 fltr_rule_id;
u16 vlan_type;
+ u16 fltr_rule_id;
+ u32 priority;
+ u16 src_vsi;
+ struct ice_sw_act_ctrl sw_act;
struct ice_adv_rule_flags_info flags_info;
};
@@ -342,6 +342,9 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
+void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup);
+void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup);
+void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup);
int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index d1a31f236d26..b54052ef6050 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -54,6 +54,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
+ /* is VLAN TPID specified */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID)
+ lkups_cnt++;
+
/* is CVLAN specified? */
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
@@ -80,6 +84,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
lkups_cnt++;
+ /* matching for tunneled packets in metadata */
+ if (fltr->tunnel_type != TNL_LAST)
+ lkups_cnt++;
+
return lkups_cnt;
}
@@ -320,6 +328,10 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ /* always fill matching on tunneled packets in metadata */
+ ice_rule_add_tunnel_metadata(&list[i]);
+ i++;
+
return i;
}
@@ -390,10 +402,6 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* copy VLAN info */
if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
- vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
- rule_info->vlan_type =
- ice_check_supported_vlan_tpid(vlan_tpid);
-
if (flags & ICE_TC_FLWR_FIELD_CVLAN)
list[i].type = ICE_VLAN_EX;
else
@@ -418,6 +426,15 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) {
+ vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
+ rule_info->vlan_type =
+ ice_check_supported_vlan_tpid(vlan_tpid);
+
+ ice_rule_add_vlan_metadata(&list[i]);
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
list[i].type = ICE_VLAN_IN;
@@ -698,12 +715,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
- rule_info.rx = true;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else {
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
- rule_info.rx = false;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
}
@@ -910,7 +925,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
- rule_info.rx = true;
dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
tc_fltr->action.fwd.tc.tc_class,
rule_info.sw_act.vsi_handle, lkups_cnt);
@@ -921,7 +935,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
rule_info.sw_act.src = hw->pf_id;
- rule_info.rx = true;
dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
tc_fltr->action.fwd.q.queue,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
@@ -929,7 +942,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
case ICE_DROP_PACKET:
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
- rule_info.rx = true;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
@@ -1460,8 +1472,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
VLAN_PRIO_MASK);
}
- if (match.mask->vlan_tpid)
+ if (match.mask->vlan_tpid) {
headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 8d5e22ac7023..8bbc1a62bdb1 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -33,6 +33,7 @@
#define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26)
#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
+#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 4fcf2d07eb85..52d0a126eb61 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1152,11 +1152,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
+ u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
u32 cnt = rx_ring->count;
- u32 cached_ntc = ntc;
u32 xdp_xmit = 0;
u32 cached_ntu;
bool failure;
@@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
- td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ td_tag = first->vid;
}
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
* VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
- first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
+ first->vid = skb_vlan_tag_get(skb);
if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
else
@@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_IL2TAG2 <<
ICE_TXD_CTX_QW1_CMD_S));
- offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ offload.cd_l2tag2 = first->vid;
}
/* set up TSO offload */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fff0efe28373..166413fc33f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -127,10 +127,6 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
-#define ICE_TX_FLAGS_VLAN_M 0xffff0000
-#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
-#define ICE_TX_FLAGS_VLAN_PR_S 29
-#define ICE_TX_FLAGS_VLAN_S 16
#define ICE_XDP_PASS 0
#define ICE_XDP_CONSUMED BIT(0)
@@ -182,8 +178,9 @@ struct ice_tx_buf {
unsigned int gso_segs;
unsigned int nr_frags; /* used for mbuf XDP */
};
- u32 type:16; /* &ice_tx_buf_type */
- u32 tx_flags:16;
+ u32 tx_flags:12;
+ u32 type:4; /* &ice_tx_buf_type */
+ u32 vid:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 89fd6982df09..b26ce4425f45 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -186,6 +186,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
}
/**
+ * ice_check_vf_ready_for_reset - check if VF is ready to be reset
+ * @vf: VF to check if it's ready to be reset
+ *
+ * The purpose of this function is to ensure that the VF is not in reset,
+ * disabled, and is both initialized and active, thus enabling us to safely
+ * initialize another reset.
+ */
+int ice_check_vf_ready_for_reset(struct ice_vf *vf)
+{
+ int ret;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+/**
* ice_trigger_vf_reset - Reset a VF on HW
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -670,8 +689,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
*/
ice_vf_clear_all_promisc_modes(vf, vsi);
- ice_eswitch_del_vf_mac_rule(vf);
-
ice_vf_fdir_exit(vf);
ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VF since it should be setup
@@ -697,7 +714,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
}
ice_eswitch_update_repr(vsi);
- ice_eswitch_replay_vf_mac_rule(vf);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
@@ -1310,3 +1326,35 @@ void ice_vf_set_initialized(struct ice_vf *vf)
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
}
+
+/**
+ * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
+ * @pf: the PF private structure
+ * @vsi: pointer to the VSI
+ *
+ * Return first found VF control VSI other than the vsi
+ * passed by parameter. This function is used to determine
+ * whether new resources have to be allocated for control VSI
+ * or they can be shared with existing one.
+ *
+ * Return found VF control VSI pointer other itself. Return
+ * NULL Otherwise.
+ *
+ */
+struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vsi *ctrl_vsi = NULL;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return ctrl_vsi;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index e3cda6fb71ab..67172fdd9bc2 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+int ice_check_vf_ready_for_reset(struct ice_vf *vf);
void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void
@@ -226,6 +227,7 @@ int
ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int ice_reset_vf(struct ice_vf *vf, u32 flags);
void ice_reset_all_vfs(struct ice_pf *pf);
+struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
#else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
@@ -290,6 +292,12 @@ static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
static inline void ice_reset_all_vfs(struct ice_pf *pf)
{
}
+
+static inline struct ice_vsi *
+ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ return NULL;
+}
#endif /* !CONFIG_PCI_IOV */
#endif /* _ICE_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 97243c616d5d..efbc2968a7bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -3730,7 +3730,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
for (i = 0; i < al->num_elements; i++) {
u8 *mac_addr = al->list[i].addr;
- int result;
if (!is_unicast_ether_addr(mac_addr) ||
ether_addr_equal(mac_addr, vf->hw_lan_addr))
@@ -3742,13 +3741,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
goto handle_mac_exit;
}
- result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
- if (result) {
- dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
- mac_addr, vf->vf_id, result);
- goto handle_mac_exit;
- }
-
ice_vfhw_mac_add(vf, &al->list[i]);
vf->num_mac++;
break;
@@ -3955,6 +3947,7 @@ error_handler:
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index bcda2e004807..1279c1ffe31c 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -219,7 +219,7 @@ static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
.rid = ICE_SW_LKUP_VLAN,
.fv_idx = ICE_PKT_FLAGS_0_TO_15_FV_IDX,
.ignore_valid = false,
- .mask = ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK,
+ .mask = ICE_PKT_VLAN_MASK,
.mask_valid = true,
.lkup_idx = ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX,
},
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index d1e489da7363..a7fe2b4ce655 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -90,7 +90,6 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int base = vsi->base_vector;
u16 reg;
u32 val;
@@ -103,11 +102,9 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
wr32(hw, QINT_RQCTL(reg), val);
if (q_vector) {
- u16 v_idx = q_vector->v_idx;
-
wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
ice_flush(hw);
- synchronize_irq(pf->msix_entries[v_idx + base].vector);
+ synchronize_irq(q_vector->irq.virq);
}
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 205d577bdbba..caf91c6f52b4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
- u8 bit_shift = 0;
+ u8 bit_shift = 1;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
- while (hash_mask >> bit_shift != 0xFF)
+ while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
bit_shift++;
/* The portion of the address that is used for the hash table
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7d60da1b7bf4..319ed601eaa1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev,
*/
ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
+ if (ret_val)
+ goto out;
}
/* Device's eeprom is always little-endian, word addressable */
@@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev,
hw->nvm.ops.update(hw);
igb_set_fw_version(adapter);
+out:
kfree(eeprom_buff);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 58872a4c2540..9a2561409b06 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -183,11 +183,13 @@ static int igb_resume(struct device *);
static int igb_runtime_suspend(struct device *dev);
static int igb_runtime_resume(struct device *dev);
static int igb_runtime_idle(struct device *dev);
+#ifdef CONFIG_PM
static const struct dev_pm_ops igb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
igb_runtime_idle)
};
+#endif
static void igb_shutdown(struct pci_dev *);
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
@@ -6947,6 +6949,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
struct timespec64 ts;
+ unsigned long flags;
if (pin < 0 || pin >= IGB_N_SDP)
return;
@@ -6954,9 +6957,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
if (hw->mac.type == e1000_82580 ||
hw->mac.type == e1000_i354 ||
hw->mac.type == e1000_i350) {
- s64 ns = rd32(auxstmpl);
+ u64 ns = rd32(auxstmpl);
- ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
+ ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_cyc2time(&adapter->tc, ns);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ts = ns_to_timespec64(ns);
} else {
ts.tv_nsec = rd32(auxstmpl);
@@ -9581,6 +9587,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ if (state == pci_channel_io_normal) {
+ dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 34aebf00a512..00a5ee487812 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -13,6 +13,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
+#include <linux/bitfield.h>
#include "igc_hw.h"
@@ -228,7 +229,10 @@ struct igc_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
- struct work_struct ptp_tx_work;
+ /* Access to ptp_tx_skb and ptp_tx_start are protected by the
+ * ptp_tx_lock.
+ */
+ spinlock_t ptp_tx_lock;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
@@ -311,6 +315,33 @@ extern char igc_driver_name[];
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+/* RX-desc Write-Back format RSS Type's */
+enum igc_rss_type_num {
+ IGC_RSS_TYPE_NO_HASH = 0,
+ IGC_RSS_TYPE_HASH_TCP_IPV4 = 1,
+ IGC_RSS_TYPE_HASH_IPV4 = 2,
+ IGC_RSS_TYPE_HASH_TCP_IPV6 = 3,
+ IGC_RSS_TYPE_HASH_IPV6_EX = 4,
+ IGC_RSS_TYPE_HASH_IPV6 = 5,
+ IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6,
+ IGC_RSS_TYPE_HASH_UDP_IPV4 = 7,
+ IGC_RSS_TYPE_HASH_UDP_IPV6 = 8,
+ IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9,
+ IGC_RSS_TYPE_MAX = 10,
+};
+#define IGC_RSS_TYPE_MAX_TABLE 16
+#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */
+
+/* igc_rss_type - Rx descriptor RSS type field */
+static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
+{
+ /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved)
+ * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info)
+ * is slightly slower than via u32 (wb.lower.lo_dword.data)
+ */
+ return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK);
+}
+
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
@@ -401,7 +432,6 @@ enum igc_state_t {
__IGC_TESTING,
__IGC_RESETTING,
__IGC_DOWN,
- __IGC_PTP_TX_IN_PROGRESS,
};
enum igc_tx_flags {
@@ -471,6 +501,13 @@ struct igc_rx_buffer {
};
};
+/* context wrapper around xdp_buff to provide access to descriptor metadata */
+struct igc_xdp_buff {
+ struct xdp_buff xdp;
+ union igc_adv_rx_desc *rx_desc;
+ ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
+};
+
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
@@ -578,6 +615,7 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_TX_CTX_IDX,
IGC_RING_FLAG_TX_DETECT_HANG,
IGC_RING_FLAG_AF_XDP_ZC,
+ IGC_RING_FLAG_TX_HWTSTAMP,
};
#define ring_uses_large_buffer(ring) \
@@ -634,6 +672,7 @@ int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
+void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 1c4676882082..019ce91c45aa 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* Zero out the buffer ring */
+ memset(tx_ring->tx_buffer_info, 0,
+ sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
*/
void igc_free_tx_resources(struct igc_ring *tx_ring)
{
- igc_clean_tx_ring(tx_ring);
+ igc_disable_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
@@ -1578,14 +1585,16 @@ done:
}
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
/* FIXME: add support for retrieving timestamps from
* the other timer registers before skipping the
* timestamping request.
*/
- if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
- !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
- &adapter->state)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
+ if (!adapter->ptp_tx_skb) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP;
@@ -1594,6 +1603,8 @@ done:
} else {
adapter->tx_hwtstamp_skipped++;
}
+
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
}
if (skb_vlan_tag_present(skb)) {
@@ -1690,14 +1701,36 @@ static void igc_rx_checksum(struct igc_ring *ring,
le32_to_cpu(rx_desc->wb.upper.status_error));
}
+/* Mapping HW RSS Type to enum pkt_hash_types */
+static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
+ [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
+ [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
+ [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
+ [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
+ [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
+ [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
+ [13] = PKT_HASH_TYPE_NONE,
+ [14] = PKT_HASH_TYPE_NONE,
+ [15] = PKT_HASH_TYPE_NONE,
+};
+
static inline void igc_rx_hash(struct igc_ring *ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (ring->netdev->features & NETIF_F_RXHASH)
- skb_set_hash(skb,
- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
- PKT_HASH_TYPE_L3);
+ if (ring->netdev->features & NETIF_F_RXHASH) {
+ u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ u32 rss_type = igc_rss_type(rx_desc);
+
+ skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
+ }
}
static void igc_rx_vlan(struct igc_ring *rx_ring,
@@ -2214,6 +2247,8 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
if (!count)
return ok;
+ XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
+
desc = IGC_RX_DESC(ring, i);
bi = &ring->rx_buffer_info[i];
i -= ring->count;
@@ -2387,6 +2422,8 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
res = igc_xdp_init_tx_descriptor(ring, xdpf);
__netif_tx_unlock(nq);
return res;
@@ -2498,8 +2535,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *rx_desc;
struct igc_rx_buffer *rx_buffer;
unsigned int size, truesize;
+ struct igc_xdp_buff ctx;
ktime_t timestamp = 0;
- struct xdp_buff xdp;
int pkt_offset = 0;
void *pktbuf;
@@ -2528,18 +2565,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
pktbuf);
+ ctx.rx_ts = timestamp;
pkt_offset = IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
}
if (!skb) {
- xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
- xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
+ xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
igc_rx_offset(rx_ring) + pkt_offset,
size, true);
- xdp_buff_clear_frags_flag(&xdp);
+ xdp_buff_clear_frags_flag(&ctx.xdp);
+ ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &xdp);
+ skb = igc_xdp_run_prog(adapter, &ctx.xdp);
}
if (IS_ERR(skb)) {
@@ -2561,9 +2600,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
} else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
+ skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
else
- skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
+ skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
timestamp);
/* exit if we failed to retrieve a buffer */
@@ -2664,6 +2703,15 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
+static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
+{
+ /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
+ * igc_xdp_buff shares its layout with xdp_buff_xsk and private
+ * igc_xdp_buff fields fall into xdp_buff_xsk->cb
+ */
+ return (struct igc_xdp_buff *)xdp;
+}
+
static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
{
struct igc_adapter *adapter = q_vector->adapter;
@@ -2682,6 +2730,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
+ struct igc_xdp_buff *ctx;
ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2699,9 +2748,13 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
bi = &ring->rx_buffer_info[ntc];
+ ctx = xsk_buff_to_igc_ctx(bi->xdp);
+ ctx->rx_desc = desc;
+
if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
bi->xdp->data);
+ ctx->rx_ts = timestamp;
bi->xdp->data += IGC_TS_HDR_LEN;
@@ -2789,6 +2842,9 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+
budget = igc_desc_unused(ring);
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
@@ -5212,7 +5268,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
if (tsicr & IGC_TSICR_TXTS) {
/* retrieve hardware timestamp */
- schedule_work(&adapter->ptp_tx_work);
+ igc_ptp_tx_tstamp_event(adapter);
ack |= IGC_TSICR_TXTS;
}
@@ -6068,9 +6124,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
size_t n;
int i;
- adapter->qbv_enable = qopt->enable;
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ adapter->qbv_enable = true;
+ break;
+ case TAPRIO_CMD_DESTROY:
+ adapter->qbv_enable = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- if (!qopt->enable)
+ if (!adapter->qbv_enable)
return igc_tsn_clear_schedule(adapter);
if (qopt->base_time < 0)
@@ -6314,6 +6379,9 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+
drops = 0;
for (i = 0; i < num_frames; i++) {
int err;
@@ -6454,6 +6522,58 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
return value;
}
+/* Mapping HW RSS Type to enum xdp_rss_hash_type */
+static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
+ [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2,
+ [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP,
+ [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP,
+ [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
+ [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
+ [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
+ [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
+ [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
+ [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */
+ [13] = XDP_RSS_TYPE_NONE,
+ [14] = XDP_RSS_TYPE_NONE,
+ [15] = XDP_RSS_TYPE_NONE,
+};
+
+static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct igc_xdp_buff *ctx = (void *)_ctx;
+
+ if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
+ return -ENODATA;
+
+ *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
+ *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
+
+ return 0;
+}
+
+static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct igc_xdp_buff *ctx = (void *)_ctx;
+
+ if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
+ *timestamp = ctx->rx_ts;
+
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
+ .xmo_rx_hash = igc_xdp_rx_hash,
+ .xmo_rx_timestamp = igc_xdp_rx_timestamp,
+};
+
/**
* igc_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -6527,6 +6647,7 @@ static int igc_probe(struct pci_dev *pdev,
hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igc_netdev_ops;
+ netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -6554,6 +6675,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_TSO_ECN;
+ netdev->features |= NETIF_F_RXHASH;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
@@ -6723,6 +6845,9 @@ static void igc_remove(struct pci_dev *pdev)
igc_ptp_stop(adapter);
+ pci_disable_ptm(pdev);
+ pci_clear_master(pdev);
+
set_bit(__IGC_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 4e10ced736db..32ef112f8291 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -536,9 +536,34 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
wr32(IGC_TSYNCRXCTL, val);
}
+static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
+
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
+}
+
static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ int i;
+
+ /* Clear the flags first to avoid new packets to be enqueued
+ * for TX timestamping.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ clear_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags);
+ }
+
+ /* Now we can clean the pending TX timestamp requests. */
+ igc_ptp_clear_tx_tstamp(adapter);
wr32(IGC_TSYNCTXCTL, 0);
}
@@ -546,12 +571,23 @@ static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter)
static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ int i;
wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG);
/* Read TXSTMP registers to discard any timestamp previously stored. */
rd32(IGC_TXSTMPL);
rd32(IGC_TXSTMPH);
+
+ /* The hardware is ready to accept TX timestamp requests,
+ * notify the transmit path.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ set_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags);
+ }
+
}
/**
@@ -603,6 +639,7 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
return 0;
}
+/* Requires adapter->ptp_tx_lock held by caller. */
static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -610,7 +647,6 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
adapter->tx_hwtstamp_timeouts++;
- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
/* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */
rd32(IGC_TXSTMPH);
netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
@@ -618,20 +654,20 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
void igc_ptp_tx_hang(struct igc_adapter *adapter)
{
- bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
- IGC_PTP_TX_TIMEOUT);
+ unsigned long flags;
- if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
- return;
+ spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
- /* If we haven't received a timestamp within the timeout, it is
- * reasonable to assume that it will never occur, so we can unlock the
- * timestamp bit when this occurs.
- */
- if (timeout) {
- cancel_work_sync(&adapter->ptp_tx_work);
- igc_ptp_tx_timeout(adapter);
- }
+ if (!adapter->ptp_tx_skb)
+ goto unlock;
+
+ if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT))
+ goto unlock;
+
+ igc_ptp_tx_timeout(adapter);
+
+unlock:
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
}
/**
@@ -641,20 +677,57 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter)
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
+ *
+ * Context: Expects adapter->ptp_tx_lock to be held by caller.
*/
static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
{
struct sk_buff *skb = adapter->ptp_tx_skb;
struct skb_shared_hwtstamps shhwtstamps;
struct igc_hw *hw = &adapter->hw;
+ u32 tsynctxctl;
int adjust = 0;
u64 regval;
if (WARN_ON_ONCE(!skb))
return;
- regval = rd32(IGC_TXSTMPL);
- regval |= (u64)rd32(IGC_TXSTMPH) << 32;
+ tsynctxctl = rd32(IGC_TSYNCTXCTL);
+ tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0;
+ if (tsynctxctl) {
+ regval = rd32(IGC_TXSTMPL);
+ regval |= (u64)rd32(IGC_TXSTMPH) << 32;
+ } else {
+ /* There's a bug in the hardware that could cause
+ * missing interrupts for TX timestamping. The issue
+ * is that for new interrupts to be triggered, the
+ * IGC_TXSTMPH_0 register must be read.
+ *
+ * To avoid discarding a valid timestamp that just
+ * happened at the "wrong" time, we need to confirm
+ * that there was no timestamp captured, we do that by
+ * assuming that no two timestamps in sequence have
+ * the same nanosecond value.
+ *
+ * So, we read the "low" register, read the "high"
+ * register (to latch a new timestamp) and read the
+ * "low" register again, if "old" and "new" versions
+ * of the "low" register are different, a valid
+ * timestamp was captured, we can read the "high"
+ * register again.
+ */
+ u32 txstmpl_old, txstmpl_new;
+
+ txstmpl_old = rd32(IGC_TXSTMPL);
+ rd32(IGC_TXSTMPH);
+ txstmpl_new = rd32(IGC_TXSTMPL);
+
+ if (txstmpl_old == txstmpl_new)
+ return;
+
+ regval = txstmpl_new;
+ regval |= (u64)rd32(IGC_TXSTMPH) << 32;
+ }
if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
return;
@@ -676,13 +749,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- /* Clear the lock early before calling skb_tstamp_tx so that
- * applications are not woken up before the lock bit is clear. We use
- * a copy of the skb pointer to ensure other threads can't change it
- * while we're notifying the stack.
- */
adapter->ptp_tx_skb = NULL;
- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
/* Notify the stack and free the skb after we've unlocked */
skb_tstamp_tx(skb, &shhwtstamps);
@@ -690,27 +757,25 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
}
/**
- * igc_ptp_tx_work
- * @work: pointer to work struct
+ * igc_ptp_tx_tstamp_event
+ * @adapter: board private structure
*
- * This work function polls the TSYNCTXCTL valid bit to determine when a
- * timestamp has been taken for the current stored skb.
+ * Called when a TX timestamp interrupt happens to retrieve the
+ * timestamp and send it up to the socket.
*/
-static void igc_ptp_tx_work(struct work_struct *work)
+void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter)
{
- struct igc_adapter *adapter = container_of(work, struct igc_adapter,
- ptp_tx_work);
- struct igc_hw *hw = &adapter->hw;
- u32 tsynctxctl;
+ unsigned long flags;
- if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
- return;
+ spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
- tsynctxctl = rd32(IGC_TSYNCTXCTL);
- if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0)))
- return;
+ if (!adapter->ptp_tx_skb)
+ goto unlock;
igc_ptp_tx_hwtstamp(adapter);
+
+unlock:
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
}
/**
@@ -959,8 +1024,8 @@ void igc_ptp_init(struct igc_adapter *adapter)
return;
}
+ spin_lock_init(&adapter->ptp_tx_lock);
spin_lock_init(&adapter->tmreg_lock);
- INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -1020,10 +1085,7 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
return;
- cancel_work_sync(&adapter->ptp_tx_work);
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ igc_ptp_clear_tx_tstamp(adapter);
if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5d83c887a3fc..1726297f2e0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1256,7 +1256,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
ixgbe_desc_unused(tx_ring),
TX_WAKE_THRESHOLD,
- netif_carrier_ok(tx_ring->netdev) &&
+ !netif_carrier_ok(tx_ring->netdev) ||
test_bit(__IXGBE_DOWN, &adapter->state)))
++tx_ring->tx_stats.restart_queue;