summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c631
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c773
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h113
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c196
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c488
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c528
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c725
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h160
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c2204
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c43
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c32
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c27
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c18
85 files changed, 6549 insertions, 385 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4c0c9433bd60..19cf36360933 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -1183,6 +1183,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
break;
case e1000_ms_auto:
phy_data &= ~CR_1000T_MS_ENABLE;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 88faf05e23ba..0b1e890dd583 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 69a2329ea463..db79c4e6413e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2018 Intel Corporation. */
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
+#ifndef _E1000E_HW_H_
+#define _E1000E_HW_H_
#include "regs.h"
#include "defines.h"
@@ -714,4 +714,4 @@ struct e1000_hw {
#include "80003es2lan.h"
#include "ich8lan.h"
-#endif
+#endif /* _E1000E_HW_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 0ac8d79a7987..590ad110d383 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2745,7 +2745,7 @@ release:
}
/**
- * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * e1000_k1_workaround_lv - K1 Si workaround
* @hw: pointer to the HW structure
*
* Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
@@ -5220,7 +5220,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
}
/**
- * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ * e1000e_igp3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
* @hw: pointer to the HW structure
*
* Workaround for 82566 power-down on D3 entry:
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e9b82c209c2d..88e9035b75cf 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -25,6 +25,7 @@
#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/suspend.h>
#include "e1000.h"
@@ -5974,19 +5975,23 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ rtnl_lock();
/* don't run the task if already down */
- if (test_bit(__E1000_DOWN, &adapter->state))
+ if (test_bit(__E1000_DOWN, &adapter->state)) {
+ rtnl_unlock();
return;
+ }
if (!(adapter->flags & FLAG_RESTART_NOW)) {
e1000e_dump(adapter);
e_err("Reset adapter unexpectedly\n");
}
e1000e_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
- * e1000_get_stats64 - Get System Network Statistics
+ * e1000e_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
*
@@ -6159,7 +6164,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
}
/**
- * e1000e_hwtstamp_ioctl - control hardware time stamping
+ * e1000e_hwtstamp_set - control hardware time stamping
* @netdev: network interface device structure
* @ifr: interface request
*
@@ -6817,7 +6822,7 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
}
/**
- * e1000e_disable_aspm_locked Disable ASPM states.
+ * e1000e_disable_aspm_locked - Disable ASPM states.
* @pdev: pointer to PCI device struct
* @state: bit-mask of ASPM states to disable
*
@@ -6918,6 +6923,12 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
+static __maybe_unused int e1000e_pm_prepare(struct device *dev)
+{
+ return pm_runtime_suspended(dev) &&
+ pm_suspend_via_firmware();
+}
+
static __maybe_unused int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -7626,9 +7637,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e1000_print_device_info(adapter);
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
- if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
+ if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp)
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -7851,6 +7862,7 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
static const struct dev_pm_ops e1000_pm_ops = {
#ifdef CONFIG_PM_SLEEP
+ .prepare = e1000e_pm_prepare,
.suspend = e1000e_pm_suspend,
.resume = e1000e_pm_resume,
.freeze = e1000e_pm_freeze,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index bdd9dc163f15..1db35b2c7750 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -371,7 +371,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
}
/**
- * e1000e_write_phy_reg_igp - Write igp PHY register
+ * __e1000e_write_phy_reg_igp - Write igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index f3f671311855..9e79d672f4f1 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -142,7 +142,7 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
}
/**
- * e1000e_phc_getsynctime - Reads the current system/device cross timestamp
+ * e1000e_phc_getcrosststamp - Reads the current system/device cross timestamp
* @ptp: ptp clock structure
* @xtstamp: structure containing timestamp
*
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index c45315472245..86397c564dfc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -105,7 +105,7 @@ static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
}
/**
- * fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device
+ * fm10k_dcbnl_getdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
*
* Returns that we support only IEEE DCB for this interface
@@ -116,7 +116,7 @@ static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev)
}
/**
- * fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device
+ * fm10k_dcbnl_setdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
* @mode: new mode for this device
*
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 1d27b2fb23af..5c77054d67c6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -185,7 +185,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
}
/**
- * fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors
+ * fm10k_dbg_q_vector_exit - setup debugfs for the q_vectors
* @q_vector: q_vector to allocate directories for
**/
void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 247f44f4cb30..3362f26d7f99 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1774,7 +1774,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
}
/**
- * f10k_reset_msix_capability - reset MSI-X capability
+ * fm10k_reset_msix_capability - reset MSI-X capability
* @interface: board private structure to initialize
*
* Reset the MSI-X capability back to its starting state
@@ -1787,7 +1787,7 @@ static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
}
/**
- * f10k_init_msix_capability - configure MSI-X capability
+ * fm10k_init_msix_capability - configure MSI-X capability
* @interface: board private structure to initialize
*
* Attempt to configure the interrupts using the best available
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 8e2e92bf3cd4..30ca9ee1900b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -692,7 +692,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx)
}
/**
- * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO
+ * fm10k_mbx_dequeue_rx - Dequeues the message from the head in the Rx FIFO
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
@@ -1039,6 +1039,7 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw,
case FM10K_STATE_CLOSED:
/* generate new header based on data */
fm10k_mbx_create_disconnect_hdr(mbx);
+ break;
default:
break;
}
@@ -2017,6 +2018,7 @@ static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
case FM10K_STATE_CONNECT:
/* Update remote value to match local value */
mbx->remote = mbx->local;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index c0780c3624c8..af1b0cde3670 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1417,7 +1417,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
}
/**
- * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
+ * fm10k_update_hw_stats_pf - Updates hardware related statistics of PF
* @hw: pointer to hardware structure
* @stats: pointer to the stats structure to update
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index ec19e18305ec..41b813fe07a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -2332,7 +2332,7 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
}
/**
- * i40e_get_vsi_params - get VSI configuration info
+ * i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
@@ -2586,7 +2586,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
}
/**
- * i40e_updatelink_status - update status of the HW network link
+ * i40e_update_link_info - update status of the HW network link
* @hw: pointer to the hw struct
**/
noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
@@ -5059,7 +5059,7 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_led
+ * i40e_blink_phy_link_led
* @hw: pointer to the HW structure
* @time: time how long led will blinks in secs
* @interval: gap between LED on and off in msecs
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 243b0d2b7b72..673f341f4c0c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -234,7 +234,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
}
/**
- * i40e_parse_ieee_etsrec_tlv
+ * i40e_parse_ieee_tlv
* @tlv: IEEE 802.1Qaz TLV
* @dcbcfg: Local store to update ETS REC data
*
@@ -1588,7 +1588,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
}
/**
- * i40e_dcb_hw_rx_ets_bw_config
+ * i40e_dcb_hw_rx_up2tc_config
* @hw: pointer to the hw struct
* @prio_tc: priority to tc assignment indexed by priority
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 0345132a0ef5..e32c61909b31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -392,7 +392,7 @@ static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
/**
- * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx BW config
+ * i40e_dcbnl_set_pg_bwg_cfg_tx - Set CEE PG Tx BW config
* @netdev: the corresponding netdev
* @pgid: the corresponding traffic class
* @bw_pct: the BW percentage for the specified traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 5e08f100c413..e1069ae658ad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -77,7 +77,7 @@ static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
}
/**
- * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
+ * i40e_ddp_does_profile_overlap - checks if DDP overlaps with existing one.
* @hw: HW data structure
* @pinfo: DDP profile information structure
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d7c13ca9be7d..e8230da29f05 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -651,7 +651,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
}
/**
- * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
* @pf: the i40e_pf created in command write
* @estats: the eth stats structure to be dumped
**/
@@ -1638,7 +1638,7 @@ static const struct file_operations i40e_dbg_command_fops = {
static char i40e_dbg_netdev_ops_buf[256] = "";
/**
- * i40e_dbg_netdev_ops - read for netdev_ops datum
+ * i40e_dbg_netdev_ops_read - read for netdev_ops datum
* @filp: the opened file
* @buffer: where to write the data for the user to read
* @count: the size of the user's buffer
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c70dec65a572..c4c167650b6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -212,7 +212,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
}
/**
- * 40e_add_stat_strings - copy stat strings into ethtool buffer
+ * i40e_add_stat_strings - copy stat strings into ethtool buffer
* @p: ethtool supplied buffer
* @stats: stat definitions array
*
@@ -2368,21 +2368,15 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- char *p = (char *)data;
unsigned int i;
+ u8 *p = data;
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
+ ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string);
if (pf->hw.pf_id != 0)
return;
- for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gl_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
+ ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a3da422ab05b..d6e92ecddfbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -511,7 +511,7 @@ configure_lan_hmc_out:
}
/**
- * i40e_delete_hmc_object - remove hmc objects
+ * i40e_delete_lan_hmc_object - remove hmc objects
* @hw: pointer to the HW structure
* @info: pointer to i40e_hmc_delete_obj_info struct
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 353deae139f9..0f84ed0143e4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2023,7 +2023,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
}
/**
- * i40e_next_entry - Get the next non-broadcast filter from a list
+ * i40e_next_filter - Get the next non-broadcast filter from a list
* @next: pointer to filter in list
*
* Returns the next non-broadcast filter in the list. Required so that we
@@ -3259,6 +3259,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_configure_rx_ring - Configure a receive ring context
* @ring: The Rx ring to configure
*
@@ -3369,6 +3380,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
else
set_ring_build_skb_enabled(ring);
+ ring->rx_offset = i40e_rx_offset(ring);
+
/* cache tail for quicker writes, and clear the reg before use */
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -5191,7 +5204,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
}
/**
- * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
+ * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
*
* Return a bitmap for enabled traffic classes for this PF.
@@ -9454,7 +9467,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
/**
- * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
+ * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
* @pf: board private structure
**/
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7164f4ad8120..fe6dca846028 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -4,7 +4,7 @@
#include "i40e_prototype.h"
/**
- * i40e_init_nvm_ops - Initialize NVM function pointers
+ * i40e_init_nvm - Initialize NVM function pointers
* @hw: pointer to the HW structure
*
* Setup the function pointers and the NVM info structure. Should be called
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 7a879614ca55..f1f6fc3744e9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -216,7 +216,7 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events
+ * i40e_ptp_get_rx_events - Read I40E_PRTTSYN_STAT_1 and latch events
* @pf: the PF data structure
*
* This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index e2c5c6d83f25..fc20afc23bfa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1570,17 +1570,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40e_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
-}
-
-/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
- rx_ring->rx_offset = i40e_rx_offset(rx_ring);
/* XDP RX-queue info only needed for RX rings exposed to XDP */
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
@@ -3345,7 +3333,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
/**
- * i40e_create_tx_ctx Build the Tx context descriptor
+ * i40e_create_tx_ctx - Build the Tx context descriptor
* @tx_ring: ring to create the descriptor on
* @cd_type_cmd_tso_mss: Quad Word 1
* @cd_tunneling: Quad Word 0 - bits 0-31
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index fc32c5019b0f..d89c22347d9d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -160,6 +160,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ rcu_read_unlock();
+ return result;
+ }
+
switch (act) {
case XDP_PASS:
break;
@@ -167,10 +174,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
break;
- case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
- break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
@@ -625,7 +628,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
}
/**
- * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
+ * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
* @tx_ring: XDP Tx ring
**/
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index c997063ed728..121e194ee734 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -11,5 +11,5 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
-iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
+iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 8a65525a7c0d..bda2a900df8e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -37,6 +37,7 @@
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
#include "iavf_txrx.h"
+#include "iavf_fdir.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
@@ -300,6 +301,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
+#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
+#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
/* OS defined structs */
struct net_device *netdev;
@@ -340,6 +343,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN)
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
@@ -362,6 +367,11 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
+
+#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
+ u16 fdir_active_fltr;
+ struct list_head fdir_list_head;
+ spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
};
@@ -432,6 +442,8 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+void iavf_add_fdir_filter(struct iavf_adapter *adapter);
+void iavf_del_fdir_filter(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index c93567f4d0f7..3ebfef737f5c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -828,6 +828,623 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
}
/**
+ * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
+ * flow type values
+ * @flow: filter type to be converted
+ *
+ * Returns the corresponding ethtool flow type.
+ */
+static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
+{
+ switch (flow) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ return TCP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ return UDP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ return SCTP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ return AH_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ return ESP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ return IPV4_USER_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ return TCP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ return UDP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ return SCTP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ return AH_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ return ESP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ return IPV6_USER_FLOW;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ return ETHER_FLOW;
+ default:
+ /* 0 is undefined ethtool flow */
+ return 0;
+ }
+}
+
+/**
+ * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
+ * @eth: Ethtool flow type to be converted
+ *
+ * Returns flow enum
+ */
+static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
+{
+ switch (eth) {
+ case TCP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_TCP;
+ case UDP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_UDP;
+ case SCTP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_SCTP;
+ case AH_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_AH;
+ case ESP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_ESP;
+ case IPV4_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_OTHER;
+ case TCP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_TCP;
+ case UDP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_UDP;
+ case SCTP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_SCTP;
+ case AH_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_AH;
+ case ESP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_ESP;
+ case IPV6_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_OTHER;
+ case ETHER_FLOW:
+ return IAVF_FDIR_FLOW_NON_IP_L2;
+ default:
+ return IAVF_FDIR_FLOW_NONE;
+ }
+}
+
+/**
+ * iavf_is_mask_valid - check mask field set
+ * @mask: full mask to check
+ * @field: field for which mask should be valid
+ *
+ * If the mask is fully set return true. If it is not valid for field return
+ * false.
+ */
+static bool iavf_is_mask_valid(u64 mask, u64 field)
+{
+ return (mask & field) == field;
+}
+
+/**
+ * iavf_parse_rx_flow_user_data - deconstruct user-defined data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter for userdef data storage
+ *
+ * Returns 0 on success, negative error value on failure
+ */
+static int
+iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_flex_word *flex;
+ int i, cnt = 0;
+
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ for (i = 0; i < 2; i++) {
+#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0)
+#define IAVF_USERDEF_FLEX_OFFS_S 16
+#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
+#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0)
+ u32 value = be32_to_cpu(fsp->h_ext.data[i]);
+ u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
+
+ if (!value || !mask)
+ continue;
+
+ if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
+ return -EINVAL;
+
+ /* 504 is the maximum value for offsets, and offset is measured
+ * from the start of the MAC address.
+ */
+#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
+ flex = &fltr->flex_words[cnt++];
+ flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
+ flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
+ IAVF_USERDEF_FLEX_OFFS_S;
+ if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
+ return -EINVAL;
+ }
+
+ fltr->flex_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_rx_flow_ext_data - fill the additional data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter to get additional data
+ */
+static void
+iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
+ return;
+
+ fsp->flow_type |= FLOW_EXT;
+
+ memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
+ memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
+}
+
+/**
+ * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
+ * @adapter: the VF adapter structure that contains filter list
+ * @cmd: ethtool command data structure to receive the filter data
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct iavf_fdir_fltr *rule = NULL;
+ int ret = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ if (!rule) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
+
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case IPV4_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
+ fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
+ fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
+ fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
+ fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
+ fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
+ break;
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
+ fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iavf_fill_rx_flow_ext_data(fsp, rule);
+
+ if (rule->action == VIRTCHNL_ACTION_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return ret;
+}
+
+/**
+ * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
+ * @adapter: the VF adapter structure containing the filter list
+ * @cmd: ethtool command data structure
+ * @rule_locs: ethtool array passed in from OS to receive filter IDs
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct iavf_fdir_fltr *fltr;
+ unsigned int cnt = 0;
+ int val = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (cnt == cmd->rule_cnt) {
+ val = -EMSGSIZE;
+ goto release_lock;
+ }
+ rule_locs[cnt] = fltr->loc;
+ cnt++;
+ }
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!val)
+ cmd->rule_cnt = cnt;
+
+ return val;
+}
+
+/**
+ * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: filter structure
+ */
+static int
+iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ u32 flow_type, q_index = 0;
+ enum virtchnl_action act;
+ int err;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ act = VIRTCHNL_ACTION_DROP;
+ } else {
+ q_index = fsp->ring_cookie;
+ if (q_index >= adapter->num_active_queues)
+ return -EINVAL;
+
+ act = VIRTCHNL_ACTION_QUEUE;
+ }
+
+ fltr->action = act;
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
+ sizeof(fltr->ext_data.usr_def));
+ memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
+ sizeof(fltr->ext_mask.usr_def));
+ }
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
+ fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
+ fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
+ fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
+ break;
+ case IPV4_USER_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
+ fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
+ fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
+ fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
+ fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ break;
+ case ETHER_FLOW:
+ fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
+ fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ if (iavf_fdir_is_dup_fltr(adapter, fltr))
+ return -EEXIST;
+
+ err = iavf_parse_rx_flow_user_data(fsp, fltr);
+ if (err)
+ return err;
+
+ return iavf_fill_fdir_add_msg(adapter, fltr);
+}
+
+/**
+ * iavf_add_fdir_ethtool - add Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to add Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct iavf_fdir_fltr *fltr;
+ int count = 50;
+ int err;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
+ IAVF_MAX_FDIR_FILTERS);
+ return -ENOSPC;
+ }
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ if (--count == 0) {
+ kfree(fltr);
+ return -EINVAL;
+ }
+ udelay(1);
+ }
+
+ err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
+ if (err)
+ goto ret;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ iavf_fdir_list_add_fltr(adapter, fltr);
+ adapter->fdir_active_fltr++;
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ret:
+ if (err && fltr)
+ kfree(fltr);
+
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return err;
+}
+
+/**
+ * iavf_del_fdir_ethtool - delete Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ if (fltr) {
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ } else {
+ err = -EBUSY;
+ }
+ } else if (adapter->fdir_active_fltr) {
+ err = -EINVAL;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ return err;
+}
+
+/**
+ * iavf_set_rxnfc - command to set Rx flow rules.
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns 0 for success and negative values for errors
+ */
+static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = iavf_add_fdir_ethtool(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = iavf_del_fdir_ethtool(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -846,6 +1463,19 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = adapter->num_active_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ break;
+ cmd->rule_cnt = adapter->fdir_active_fltr;
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
+ break;
case ETHTOOL_GRXFH:
netdev_info(netdev,
"RSS hash info is not available to vf, use pf.\n");
@@ -1025,6 +1655,7 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_coalesce = iavf_set_coalesce,
.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
+ .set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
new file mode 100644
index 000000000000..3e687189d737
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation. */
+
+/* flow director ethtool support for iavf */
+
+#include "iavf.h"
+
+#define GTPU_PORT 2152
+#define NAT_T_ESP_PORT 4500
+#define PFCP_PORT 8805
+
+static const struct in6_addr ipv6_addr_full_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+ }
+};
+
+/**
+ * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
+ * @fltr: Flow Director filter data structure
+ */
+static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr)
+{
+ return sizeof(struct ethhdr) +
+ (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct udphdr);
+}
+
+/**
+ * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the GTP-U protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */
+ u16 adj_offs, hdr_offs;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_GTPU_HDR_TEID_OFFS0 4
+#define IAVF_GTPU_HDR_TEID_OFFS1 6
+#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10
+#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13
+#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85 /* PDU Session Container Extension Header */
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_GTPU_HDR_TEID_OFFS0:
+ case IAVF_GTPU_HDR_TEID_OFFS1: {
+ __be16 *pay_word = (__be16 *)ghdr->buffer;
+
+ pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID);
+ }
+ break;
+ case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS:
+ if ((fltr->flex_words[i].word & 0xff) != IAVF_GTPU_PSC_EXTHDR_TYPE)
+ return -EOPNOTSUPP;
+ if (!ehdr)
+ ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH);
+ break;
+ case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS:
+ if (!ehdr)
+ return -EINVAL;
+ ehdr->buffer[1] = fltr->flex_words[i].word & 0x3F;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the PFCP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ u16 adj_offs, hdr_offs;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS:
+ hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the NAT-T-ESP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ u16 adj_offs, hdr_offs;
+ u32 spi = 0;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_NAT_T_ESP_SPI_OFFS0 0
+#define IAVF_NAT_T_ESP_SPI_OFFS1 2
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_NAT_T_ESP_SPI_OFFS0:
+ spi |= fltr->flex_words[i].word << 16;
+ break;
+ case IAVF_NAT_T_ESP_SPI_OFFS1:
+ spi |= fltr->flex_words[i].word;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!spi)
+ return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */
+
+ *(__be32 *)hdr->buffer = htonl(spi);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the UDP payload defined protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ int err;
+
+ switch (ntohs(fltr->ip_data.dst_port)) {
+ case GTPU_PORT:
+ err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs);
+ break;
+ case NAT_T_ESP_PORT:
+ err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs);
+ break;
+ case PFCP_PORT:
+ err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the IPv4 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct iphdr *iph = (struct iphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (fltr->ip_mask.tos == U8_MAX) {
+ iph->tos = fltr->ip_data.tos;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+ }
+
+ if (fltr->ip_mask.proto == U8_MAX) {
+ iph->protocol = fltr->ip_data.proto;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+ }
+
+ if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) {
+ iph->saddr = fltr->ip_data.v4_addrs.src_ip;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+ }
+
+ if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) {
+ iph->daddr = fltr->ip_data.v4_addrs.dst_ip;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+ }
+
+ fltr->ip_ver = 4;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the IPv6 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (fltr->ip_mask.tclass == U8_MAX) {
+ iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
+ iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+ }
+
+ if (fltr->ip_mask.proto == U8_MAX) {
+ iph->nexthdr = fltr->ip_data.proto;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+ }
+
+ if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr))) {
+ memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+ }
+
+ if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr))) {
+ memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+ }
+
+ fltr->ip_ver = 6;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the TCP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct tcphdr *tcph = (struct tcphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ tcph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ tcph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_udp_hdr - fill the UDP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the UDP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct udphdr *udph = (struct udphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ udph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ udph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+ }
+
+ if (!fltr->flex_cnt)
+ return 0;
+
+ return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs);
+}
+
+/**
+ * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the SCTP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct sctphdr *sctph = (struct sctphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ sctph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ sctph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_ah_hdr - fill the AH protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the AH protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+ if (fltr->ip_mask.spi == htonl(U32_MAX)) {
+ ah->spi = fltr->ip_data.spi;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_esp_hdr - fill the ESP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the ESP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+ if (fltr->ip_mask.spi == htonl(U32_MAX)) {
+ esph->spi = fltr->ip_data.spi;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_l4_hdr - fill the L4 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the L4 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr;
+ __be32 *l4_4_data;
+
+ if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */
+ return 0;
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ l4_4_data = (__be32 *)hdr->buffer;
+
+ /* L2TPv3 over IP with 'Session ID' */
+ if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) {
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+
+ *l4_4_data = fltr->ip_data.l4_header;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the Ethernet protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+ if (fltr->eth_mask.etype == htons(U16_MAX)) {
+ if (fltr->eth_data.etype == htons(ETH_P_IP) ||
+ fltr->eth_data.etype == htons(ETH_P_IPV6))
+ return -EOPNOTSUPP;
+
+ ehdr->h_proto = fltr->eth_data.etype;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: Flow Director filter data structure
+ *
+ * Returns 0 if the add Flow Director virtchnl message is filled successfully
+ */
+int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg;
+ struct virtchnl_proto_hdrs *proto_hdrs;
+ int err;
+
+ proto_hdrs = &vc_msg->rule_cfg.proto_hdrs;
+
+ err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */
+ if (err)
+ return err;
+
+ switch (fltr->flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ vc_msg->vsi_id = adapter->vsi.id;
+ vc_msg->rule_cfg.action_set.count = 1;
+ vc_msg->rule_cfg.action_set.actions[0].type = fltr->action;
+ vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index;
+
+ return 0;
+}
+
+/**
+ * iavf_fdir_flow_proto_name - get the flow protocol name
+ * @flow_type: Flow Director filter flow type
+ **/
+static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type)
+{
+ switch (flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ return "TCP";
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ return "UDP";
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ return "SCTP";
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ return "AH";
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ return "ESP";
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ return "Other";
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ return "Ethernet";
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * iavf_print_fdir_fltr
+ * @adapter: adapter structure
+ * @fltr: Flow Director filter to print
+ *
+ * Print the Flow Director filter
+ **/
+void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type);
+
+ if (!proto)
+ return;
+
+ switch (fltr->flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ proto,
+ ntohs(fltr->ip_data.dst_port),
+ ntohs(fltr->ip_data.src_port));
+ break;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ proto,
+ ntohl(fltr->ip_data.spi));
+ break;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ fltr->ip_data.proto,
+ ntohl(fltr->ip_data.l4_header));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ proto,
+ ntohs(fltr->ip_data.dst_port),
+ ntohs(fltr->ip_data.src_port));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ proto,
+ ntohl(fltr->ip_data.spi));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ fltr->ip_data.proto,
+ ntohl(fltr->ip_data.l4_header));
+ break;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n",
+ fltr->loc,
+ ntohs(fltr->eth_data.etype));
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * iavf_fdir_is_dup_fltr - test if filter is already in list
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: Flow Director filter data structure
+ *
+ * Returns true if the filter is found in the list
+ */
+bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_fdir_fltr *tmp;
+ bool ret = false;
+
+ list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ if (tmp->flow_type != fltr->flow_type)
+ continue;
+
+ if (!memcmp(&tmp->eth_data, &fltr->eth_data,
+ sizeof(fltr->eth_data)) &&
+ !memcmp(&tmp->ip_data, &fltr->ip_data,
+ sizeof(fltr->ip_data)) &&
+ !memcmp(&tmp->ext_data, &fltr->ext_data,
+ sizeof(fltr->ext_data))) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * iavf_find_fdir_fltr_by_loc - find filter with location
+ * @adapter: pointer to the VF adapter structure
+ * @loc: location to find.
+ *
+ * Returns pointer to Flow Director filter if found or null
+ */
+struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
+{
+ struct iavf_fdir_fltr *rule;
+
+ list_for_each_entry(rule, &adapter->fdir_list_head, list)
+ if (rule->loc == loc)
+ return rule;
+
+ return NULL;
+}
+
+/**
+ * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: filter node to add to structure
+ */
+void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_fdir_fltr *rule, *parent = NULL;
+
+ list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if (rule->loc >= fltr->loc)
+ break;
+ parent = rule;
+ }
+
+ if (parent)
+ list_add(&fltr->list, &parent->list);
+ else
+ list_add(&fltr->list, &adapter->fdir_list_head);
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
new file mode 100644
index 000000000000..2439c970b657
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _IAVF_FDIR_H_
+#define _IAVF_FDIR_H_
+
+struct iavf_adapter;
+
+/* State of Flow Director filter */
+enum iavf_fdir_fltr_state_t {
+ IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
+ IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
+ IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
+ IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
+ IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
+};
+
+enum iavf_fdir_flow_type {
+ /* NONE - used for undef/error */
+ IAVF_FDIR_FLOW_NONE = 0,
+ IAVF_FDIR_FLOW_IPV4_TCP,
+ IAVF_FDIR_FLOW_IPV4_UDP,
+ IAVF_FDIR_FLOW_IPV4_SCTP,
+ IAVF_FDIR_FLOW_IPV4_AH,
+ IAVF_FDIR_FLOW_IPV4_ESP,
+ IAVF_FDIR_FLOW_IPV4_OTHER,
+ IAVF_FDIR_FLOW_IPV6_TCP,
+ IAVF_FDIR_FLOW_IPV6_UDP,
+ IAVF_FDIR_FLOW_IPV6_SCTP,
+ IAVF_FDIR_FLOW_IPV6_AH,
+ IAVF_FDIR_FLOW_IPV6_ESP,
+ IAVF_FDIR_FLOW_IPV6_OTHER,
+ IAVF_FDIR_FLOW_NON_IP_L2,
+ /* MAX - this must be last and add anything new just above it */
+ IAVF_FDIR_FLOW_PTYPE_MAX,
+};
+
+struct iavf_flex_word {
+ u16 offset;
+ u16 word;
+};
+
+struct iavf_ipv4_addrs {
+ __be32 src_ip;
+ __be32 dst_ip;
+};
+
+struct iavf_ipv6_addrs {
+ struct in6_addr src_ip;
+ struct in6_addr dst_ip;
+};
+
+struct iavf_fdir_eth {
+ __be16 etype;
+};
+
+struct iavf_fdir_ip {
+ union {
+ struct iavf_ipv4_addrs v4_addrs;
+ struct iavf_ipv6_addrs v6_addrs;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 l4_header; /* first 4 bytes of the layer 4 header */
+ __be32 spi; /* security parameter index for AH/ESP */
+ union {
+ u8 tos;
+ u8 tclass;
+ };
+ u8 proto;
+};
+
+struct iavf_fdir_extra {
+ u32 usr_def[2];
+};
+
+/* bookkeeping of Flow Director filters */
+struct iavf_fdir_fltr {
+ enum iavf_fdir_fltr_state_t state;
+ struct list_head list;
+
+ enum iavf_fdir_flow_type flow_type;
+
+ struct iavf_fdir_eth eth_data;
+ struct iavf_fdir_eth eth_mask;
+
+ struct iavf_fdir_ip ip_data;
+ struct iavf_fdir_ip ip_mask;
+
+ struct iavf_fdir_extra ext_data;
+ struct iavf_fdir_extra ext_mask;
+
+ enum virtchnl_action action;
+
+ /* flex byte filter data */
+ u8 ip_ver; /* used to adjust the flex offset, 4 : IPv4, 6 : IPv6 */
+ u8 flex_cnt;
+ struct iavf_flex_word flex_words[2];
+
+ u32 flow_id;
+
+ u32 loc; /* Rule location inside the flow table */
+ u32 q_index;
+
+ struct virtchnl_fdir_add vc_add_msg;
+};
+
+int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
+#endif /* _IAVF_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index dc5b3c06d1e0..a3268c894d85 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -959,8 +959,9 @@ void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct iavf_vlan_filter *vlf;
- struct iavf_mac_filter *f;
struct iavf_cloud_filter *cf;
+ struct iavf_fdir_fltr *fdir;
+ struct iavf_mac_filter *f;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -996,6 +997,13 @@ void iavf_down(struct iavf_adapter *adapter)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __IAVF_RESETTING) {
/* cancel any current operation */
@@ -1007,6 +1015,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1629,6 +1638,14 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
+ iavf_add_fdir_filter(adapter);
+ return IAVF_SUCCESS;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
+ iavf_del_fdir_filter(adapter);
+ return IAVF_SUCCESS;
+ }
return -EAGAIN;
}
@@ -2529,7 +2546,7 @@ validate_bw:
}
/**
- * iavf_validate_channel_config - validate queue mapping info
+ * iavf_validate_ch_config - validate queue mapping info
* @adapter: board private structure
* @mqprio_qopt: queue parameters
*
@@ -3738,10 +3755,12 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->mac_vlan_list_lock);
spin_lock_init(&adapter->cloud_filter_list_lock);
+ spin_lock_init(&adapter->fdir_fltr_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
INIT_LIST_HEAD(&adapter->cloud_filter_list);
+ INIT_LIST_HEAD(&adapter->fdir_list_head);
INIT_WORK(&adapter->reset_task, iavf_reset_task);
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
@@ -3845,6 +3864,7 @@ static void iavf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf, *cftmp;
@@ -3926,6 +3946,13 @@ static void iavf_remove(struct pci_dev *pdev)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index ffaf2742a2e0..d6cba53a3a21 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2098,7 +2098,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
/**
- * iavf_create_tx_ctx Build the Tx context descriptor
+ * iavf_create_tx_ctx - Build the Tx context descriptor
* @tx_ring: ring to create the descriptor on
* @cd_type_cmd_tso_mss: Quad Word 1
* @cd_tunneling: Quad Word 0 - bits 0-31
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 647e7fde11b4..3069092468b2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -140,6 +140,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
@@ -1005,7 +1006,7 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
}
/**
- * iavf_enable_channel
+ * iavf_enable_channels
* @adapter: adapter structure
*
* Request that the PF enable channels as specified by
@@ -1046,7 +1047,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
}
/**
- * iavf_disable_channel
+ * iavf_disable_channels
* @adapter: adapter structure
*
* Request that the PF disable channels that are configured
@@ -1198,6 +1199,101 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter)
}
/**
+ * iavf_add_fdir_filter
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF add Flow Director filters as specified
+ * by the user via ethtool.
+ **/
+void iavf_add_fdir_filter(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir;
+ struct virtchnl_fdir_add *f;
+ bool process_fltr = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_fdir_add);
+ f = kzalloc(len, GFP_KERNEL);
+ if (!f)
+ return;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ process_fltr = true;
+ fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
+ memcpy(f, &fdir->vc_add_msg, len);
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (!process_fltr) {
+ /* prevent iavf_add_fdir_filter() from being called when there
+ * are no filters to add
+ */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ kfree(f);
+ return;
+ }
+ adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
+ kfree(f);
+}
+
+/**
+ * iavf_del_fdir_filter
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF delete Flow Director filters as specified
+ * by the user via ethtool.
+ **/
+void iavf_del_fdir_filter(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir;
+ struct virtchnl_fdir_del f;
+ bool process_fltr = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_fdir_del);
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
+ process_fltr = true;
+ memset(&f, 0, len);
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (!process_fltr) {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ return;
+ }
+
+ adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
+}
+
+/**
* iavf_request_reset
* @adapter: adapter structure
*
@@ -1357,6 +1453,50 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER: {
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp,
+ &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
+ dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_fdir_fltr(adapter, fdir);
+ if (msglen)
+ dev_err(&adapter->pdev->dev,
+ "%s\n", msg);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER: {
+ struct iavf_fdir_fltr *fdir;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -1490,6 +1630,58 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER: {
+ struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp,
+ &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
+ if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
+ fdir->loc);
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ fdir->flow_id = add_fltr->flow_id;
+ } else {
+ dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
+ add_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER: {
+ struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 73da4f71f530..f391691e2c7e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -26,7 +26,7 @@ ice-y := ice_main.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 357706444dd5..9bf346133cbd 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -73,7 +73,7 @@
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
-#define ICE_FDIR_MSIX 1
+#define ICE_FDIR_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -84,6 +84,8 @@
#define ICE_MAX_LG_RSS_QS 256
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
+/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
+#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
@@ -229,6 +231,7 @@ enum ice_state {
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
__ICE_PHY_INIT_COMPLETE,
+ __ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
__ICE_STATE_NBITS /* must be last */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 3124a3bf519a..1148d768f8ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -275,6 +275,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
}
/**
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+{
+ if (ice_ring_uses_build_skb(rx_ring))
+ return ICE_SKB_PAD;
+ else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ return XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+/**
* ice_setup_rx_ctx - Configure a receive ring context
* @ring: The Rx ring to configure
*
@@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
else
ice_set_ring_build_skb_ena(ring);
+ ring->rx_offset = ice_rx_offset(ring);
+
/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
if (ring->xsk_pool) {
+ bool ok;
+
if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index);
@@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
return 0;
}
- err = ice_alloc_rx_bufs_zc(ring, num_bufs);
- if (err)
+ ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ if (!ok)
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3d9475e222cd..1898325e62b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4373,7 +4373,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
}
/**
- * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
+ * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
* @hw: pointer to HW struct
*/
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index e42727941ef5..85c9eccfdae8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -834,7 +834,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
}
/**
- * ice_get_ieee_dcb_cfg
+ * ice_get_ieee_or_cee_dcb_cfg
* @pi: port information structure
* @dcbx_mode: mode of DCBX (IEEE or CEE)
*
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 2dcfa685b763..4f738425fb44 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -871,68 +871,47 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- char *p = (char *)data;
unsigned int i;
+ u8 *p = data;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- ice_gstrings_vsi_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ICE_VSI_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ ice_gstrings_vsi_stats[i].stat_string);
ice_for_each_alloc_txq(vsi, i) {
- snprintf(p, ETH_GSTRING_LEN,
- "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
}
ice_for_each_alloc_rxq(vsi, i) {
- snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
}
if (vsi->type != ICE_VSI_PF)
return;
- for (i = 0; i < ICE_PF_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- ice_gstrings_pf_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ICE_PF_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "tx_priority_%u_xon.nic", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "tx_priority_%u_xoff.nic", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
+ ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i);
}
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "rx_priority_%u_xon.nic", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "rx_priority_%u_xoff.nic", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i);
+ ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i);
}
break;
case ETH_SS_TEST:
memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- ice_gstrings_priv_flags[i].name);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
+ ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 192729546bbf..440964defa4a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1679,6 +1679,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
input->flex_offset = userdata.flex_offset;
}
+ input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
+ input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+
/* input struct is added to the HW filter list */
ice_fdir_update_list_entry(pf, input, fsp->location);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 59c0c6a0f8c5..59ef68f072c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -40,6 +40,204 @@ static const u8 ice_fdir_ipv4_pkt[] = {
0x00, 0x00
};
+static const u8 ice_fdir_udp4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x58, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_icmp4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x44, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_l2tpv3_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_l2tpv3_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x73, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+static const u8 ice_fdir_ipv6_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x32, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_ah_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x33,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+static const u8 ice_fdir_ipv6_ah_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x33, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_nat_t_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x94, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_nat_t_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x11, 0x94, 0x00, 0x00, 0x00, 0x08,
+};
+
+static const u8 ice_fdir_ipv4_pfcp_node_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x00, 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_pfcp_session_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00,
+ 0x00, 0x00, 0x21, 0x00, 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_pfcp_node_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65,
+ 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_pfcp_session_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65,
+ 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_non_ip_l2_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
static const u8 ice_fdir_tcpv6_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
@@ -239,6 +437,111 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt,
},
{
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP,
+ sizeof(ice_fdir_udp4_gtpu4_pkt),
+ ice_fdir_udp4_gtpu4_pkt,
+ sizeof(ice_fdir_udp4_gtpu4_pkt),
+ ice_fdir_udp4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP,
+ sizeof(ice_fdir_tcp4_gtpu4_pkt),
+ ice_fdir_tcp4_gtpu4_pkt,
+ sizeof(ice_fdir_tcp4_gtpu4_pkt),
+ ice_fdir_tcp4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP,
+ sizeof(ice_fdir_icmp4_gtpu4_pkt),
+ ice_fdir_icmp4_gtpu4_pkt,
+ sizeof(ice_fdir_icmp4_gtpu4_pkt),
+ ice_fdir_icmp4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER,
+ sizeof(ice_fdir_ipv4_gtpu4_pkt),
+ ice_fdir_ipv4_gtpu4_pkt,
+ sizeof(ice_fdir_ipv4_gtpu4_pkt),
+ ice_fdir_ipv4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3,
+ sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt,
+ sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3,
+ sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt,
+ sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_ESP,
+ sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt,
+ sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_ESP,
+ sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt,
+ sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_AH,
+ sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt,
+ sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_AH,
+ sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt,
+ sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP,
+ sizeof(ice_fdir_ipv4_nat_t_esp_pkt),
+ ice_fdir_ipv4_nat_t_esp_pkt,
+ sizeof(ice_fdir_ipv4_nat_t_esp_pkt),
+ ice_fdir_ipv4_nat_t_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP,
+ sizeof(ice_fdir_ipv6_nat_t_esp_pkt),
+ ice_fdir_ipv6_nat_t_esp_pkt,
+ sizeof(ice_fdir_ipv6_nat_t_esp_pkt),
+ ice_fdir_ipv6_nat_t_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE,
+ sizeof(ice_fdir_ipv4_pfcp_node_pkt),
+ ice_fdir_ipv4_pfcp_node_pkt,
+ sizeof(ice_fdir_ipv4_pfcp_node_pkt),
+ ice_fdir_ipv4_pfcp_node_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION,
+ sizeof(ice_fdir_ipv4_pfcp_session_pkt),
+ ice_fdir_ipv4_pfcp_session_pkt,
+ sizeof(ice_fdir_ipv4_pfcp_session_pkt),
+ ice_fdir_ipv4_pfcp_session_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE,
+ sizeof(ice_fdir_ipv6_pfcp_node_pkt),
+ ice_fdir_ipv6_pfcp_node_pkt,
+ sizeof(ice_fdir_ipv6_pfcp_node_pkt),
+ ice_fdir_ipv6_pfcp_node_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION,
+ sizeof(ice_fdir_ipv6_pfcp_session_pkt),
+ ice_fdir_ipv6_pfcp_session_pkt,
+ sizeof(ice_fdir_ipv6_pfcp_session_pkt),
+ ice_fdir_ipv6_pfcp_session_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NON_IP_L2,
+ sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt,
+ sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt,
+ },
+ {
ICE_FLTR_PTYPE_NONF_IPV6_TCP,
sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt,
sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt,
@@ -374,21 +677,31 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES;
fdir_fltr_ctx.qindex = 0;
+ } else if (input->dest_ctl ==
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fdir_fltr_ctx.qindex = 0;
} else {
+ if (input->dest_ctl ==
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP)
+ fdir_fltr_ctx.toq = input->q_region;
fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
fdir_fltr_ctx.qindex = input->q_index;
}
- fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fdir_fltr_ctx.cnt_ena = input->cnt_ena;
fdir_fltr_ctx.cnt_index = input->cnt_index;
fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi);
fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE;
- fdir_fltr_ctx.toq_prio = 3;
+ if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER)
+ fdir_fltr_ctx.toq_prio = 0;
+ else
+ fdir_fltr_ctx.toq_prio = 3;
fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD :
ICE_FXD_FLTR_QW1_PCMD_REMOVE;
fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET;
fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
- fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
- fdir_fltr_ctx.fdid_prio = 3;
+ fdir_fltr_ctx.comp_report = input->comp_report;
+ fdir_fltr_ctx.fdid_prio = input->fdid_prio;
fdir_fltr_ctx.desc_prof = 1;
fdir_fltr_ctx.desc_prof_prio = 3;
ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc);
@@ -471,6 +784,55 @@ static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr)
}
/**
+ * ice_pkt_insert_u6_qfi - insert a u6 value QFI into a memory buffer for GTPU
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 8 bit value to convert and insert into pkt at offset
+ *
+ * This function is designed for inserting QFI (6 bits) for GTPU.
+ */
+static void ice_pkt_insert_u6_qfi(u8 *pkt, int offset, u8 data)
+{
+ u8 ret;
+
+ ret = (data & 0x3F) + (*(pkt + offset) & 0xC0);
+ memcpy(pkt + offset, &ret, sizeof(ret));
+}
+
+/**
+ * ice_pkt_insert_u8 - insert a u8 value into a memory buffer.
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 8 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u8(u8 *pkt, int offset, u8 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_pkt_insert_u8_tc - insert a u8 value into a memory buffer for TC ipv6.
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 8 bit value to convert and insert into pkt at offset
+ *
+ * This function is designed for inserting Traffic Class (TC) for IPv6,
+ * since that TC is not aligned in number of bytes. Here we split it out
+ * into two part and fill each byte with data copy from pkt, then insert
+ * the two bytes data one by one.
+ */
+static void ice_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data)
+{
+ u8 high, low;
+
+ high = (data >> 4) + (*(pkt + offset) & 0xF0);
+ memcpy(pkt + offset, &high, sizeof(high));
+
+ low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4);
+ memcpy(pkt + offset + 1, &low, sizeof(low));
+}
+
+/**
* ice_pkt_insert_u16 - insert a be16 value into a memory buffer
* @pkt: packet buffer
* @offset: offset into buffer
@@ -493,6 +855,16 @@ static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data)
}
/**
+ * ice_pkt_insert_mac_addr - insert a MAC addr into a memory buffer.
+ * @pkt: packet buffer
+ * @addr: MAC address to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr)
+{
+ ether_addr_copy(pkt, addr);
+}
+
+/**
* ice_fdir_get_gen_prgm_pkt - generate a training packet
* @hw: pointer to the hardware structure
* @input: flow director filter data structure
@@ -520,11 +892,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
case IPPROTO_SCTP:
flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
break;
- case IPPROTO_IP:
+ default:
flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
break;
- default:
- return ICE_ERR_PARAM;
}
} else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
switch (input->ip.v6.proto) {
@@ -537,11 +907,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
case IPPROTO_SCTP:
flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
break;
- case IPPROTO_IP:
+ default:
flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
break;
- default:
- return ICE_ERR_PARAM;
}
} else {
flow = input->flow_type;
@@ -580,6 +948,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
if (frag)
loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
break;
@@ -592,6 +963,11 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN,
+ input->ext_data.src_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
@@ -602,13 +978,87 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
input->ip.v4.dst_ip);
- ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET,
+ input->ip.v4.proto);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_GTPU_TEID_OFFSET,
+ input->gtpu_data.teid);
+ ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPU_QFI_OFFSET,
+ input->gtpu_data.qfi);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
+ ice_pkt_insert_u32(loc, ICE_IPV4_L2TPV3_SESS_ID_OFFSET,
+ input->l2tpv3_data.session_id);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
+ ice_pkt_insert_u32(loc, ICE_IPV6_L2TPV3_SESS_ID_OFFSET,
+ input->l2tpv3_data.session_id);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_ESP_SPI_OFFSET,
+ input->ip.v4.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
+ ice_pkt_insert_u32(loc, ICE_IPV6_ESP_SPI_OFFSET,
+ input->ip.v6.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_AH:
+ ice_pkt_insert_u32(loc, ICE_IPV4_AH_SPI_OFFSET,
+ input->ip.v4.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_AH:
+ ice_pkt_insert_u32(loc, ICE_IPV6_AH_SPI_OFFSET,
+ input->ip.v6.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_NAT_T_ESP_SPI_OFFSET,
+ input->ip.v4.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV6_NAT_T_ESP_SPI_OFFSET,
+ input->ip.v6.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NON_IP_L2:
+ ice_pkt_insert_u16(loc, ICE_MAC_ETHTYPE_OFFSET,
+ input->ext_data.ether_type);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
@@ -619,6 +1069,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
@@ -629,6 +1082,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
@@ -639,12 +1095,20 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
input->ip.v6.src_ip);
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
input->ip.v6.dst_ip);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_u8(loc, ICE_IPV6_PROTO_OFFSET,
+ input->ip.v6.proto);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
return ICE_ERR_PARAM;
@@ -671,7 +1135,7 @@ bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
}
/**
- * ice_fdir_find_by_idx - find filter with idx
+ * ice_fdir_find_fltr_by_idx - find filter with idx
* @hw: pointer to hardware structure
* @fltr_idx: index to find.
*
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index 1c587766daab..d2d40e18ae8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -25,6 +25,25 @@
#define ICE_IPV6_UDP_DST_PORT_OFFSET 56
#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54
#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56
+#define ICE_MAC_ETHTYPE_OFFSET 12
+#define ICE_IPV4_TOS_OFFSET 15
+#define ICE_IPV4_TTL_OFFSET 22
+#define ICE_IPV6_TC_OFFSET 14
+#define ICE_IPV6_HLIM_OFFSET 21
+#define ICE_IPV6_PROTO_OFFSET 20
+#define ICE_IPV4_GTPU_TEID_OFFSET 46
+#define ICE_IPV4_GTPU_QFI_OFFSET 56
+#define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34
+#define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54
+#define ICE_IPV4_ESP_SPI_OFFSET 34
+#define ICE_IPV6_ESP_SPI_OFFSET 54
+#define ICE_IPV4_AH_SPI_OFFSET 38
+#define ICE_IPV6_AH_SPI_OFFSET 58
+#define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42
+#define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62
+
+#define ICE_FDIR_MAX_FLTRS 16384
+
/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF
* requests that the packet not be fragmented. MF indicates that a packet has
* been fragmented.
@@ -34,6 +53,8 @@
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP,
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER,
};
enum ice_fltr_prgm_desc_fd_status {
@@ -86,6 +107,7 @@ struct ice_fdir_v4 {
u8 tos;
u8 ip_ver;
u8 proto;
+ u8 ttl;
};
#define ICE_IPV6_ADDR_LEN_AS_U32 4
@@ -99,10 +121,35 @@ struct ice_fdir_v6 {
__be32 sec_parm_idx; /* security parameter index */
u8 tc;
u8 proto;
+ u8 hlim;
+};
+
+struct ice_fdir_udp_gtp {
+ u8 flags;
+ u8 msg_type;
+ __be16 rsrvd_len;
+ __be32 teid;
+ __be16 rsrvd_seq_nbr;
+ u8 rsrvd_n_pdu_nbr;
+ u8 rsrvd_next_ext_type;
+ u8 rsvrd_ext_len;
+ u8 pdu_type:4,
+ spare:4;
+ u8 ppp:1,
+ rqi:1,
+ qfi:6;
+ u32 rsvrd;
+ u8 next_ext;
+};
+
+struct ice_fdir_l2tpv3 {
+ __be32 session_id;
};
struct ice_fdir_extra {
u8 dst_mac[ETH_ALEN]; /* dest MAC address */
+ u8 src_mac[ETH_ALEN]; /* src MAC address */
+ __be16 ether_type; /* for NON_IP_L2 */
u32 usr_def[2]; /* user data */
__be16 vlan_type; /* VLAN ethertype */
__be16 vlan_tag; /* VLAN tag info */
@@ -117,11 +164,19 @@ struct ice_fdir_fltr {
struct ice_fdir_v6 v6;
} ip, mask;
+ struct ice_fdir_udp_gtp gtpu_data;
+ struct ice_fdir_udp_gtp gtpu_mask;
+
+ struct ice_fdir_l2tpv3 l2tpv3_data;
+ struct ice_fdir_l2tpv3 l2tpv3_mask;
+
struct ice_fdir_extra ext_data;
struct ice_fdir_extra ext_mask;
/* flex byte filter data */
__be16 flex_word;
+ /* queue region size (=2^q_region) */
+ u8 q_region;
u16 flex_offset;
u16 flex_fltr;
@@ -129,9 +184,12 @@ struct ice_fdir_fltr {
u16 q_index;
u16 dest_vsi;
u8 dest_ctl;
+ u8 cnt_ena;
u8 fltr_status;
u16 cnt_index;
u32 fltr_id;
+ u8 fdid_prio;
+ u8 comp_report;
};
/* Dummy packet filter definition structure */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 5e1fd30c0a0f..afe77f7a3199 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -2361,18 +2361,82 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
}
/**
- * ice_find_prof_id - find profile ID for a given field vector
+ * ice_prof_has_mask_idx - determine if profile index masking is identical
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @prof: profile to check
+ * @idx: profile index to check
+ * @mask: mask to match
+ */
+static bool
+ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
+ u16 mask)
+{
+ bool expect_no_mask = false;
+ bool found = false;
+ bool match = false;
+ u16 i;
+
+ /* If mask is 0x0000 or 0xffff, then there is no masking */
+ if (mask == 0 || mask == 0xffff)
+ expect_no_mask = true;
+
+ /* Scan the enabled masks on this profile, for the specified idx */
+ for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
+ hw->blk[blk].masks.count; i++)
+ if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
+ if (hw->blk[blk].masks.masks[i].in_use &&
+ hw->blk[blk].masks.masks[i].idx == idx) {
+ found = true;
+ if (hw->blk[blk].masks.masks[i].mask == mask)
+ match = true;
+ break;
+ }
+
+ if (expect_no_mask) {
+ if (found)
+ return false;
+ } else {
+ if (!match)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_prof_has_mask - determine if profile masking is identical
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @prof: profile to check
+ * @masks: masks to match
+ */
+static bool
+ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
+{
+ u16 i;
+
+ /* es->mask_ena[prof] will have the mask */
+ for (i = 0; i < hw->blk[blk].es.fvw; i++)
+ if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_find_prof_id_with_mask - find profile ID for a given field vector
* @hw: pointer to the hardware structure
* @blk: HW block
* @fv: field vector to search for
+ * @masks: masks for FV
* @prof_id: receives the profile ID
*/
static enum ice_status
-ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u8 *prof_id)
+ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
+ struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
- u16 off;
u8 i;
/* For FD, we don't want to re-use a existed profile with the same
@@ -2382,11 +2446,15 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
return ICE_ERR_DOES_NOT_EXIST;
for (i = 0; i < (u8)es->count; i++) {
- off = i * es->fvw;
+ u16 off = i * es->fvw;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
+ /* check if masks settings are the same for this profile */
+ if (masks && !ice_prof_has_mask(hw, blk, i, masks))
+ continue;
+
*prof_id = i;
return 0;
}
@@ -2438,20 +2506,22 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* ice_alloc_tcam_ent - allocate hardware TCAM entry
* @hw: pointer to the HW struct
* @blk: the block to allocate the TCAM for
+ * @btm: true to allocate from bottom of table, false to allocate from top
* @tcam_idx: pointer to variable to receive the TCAM entry
*
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
static enum ice_status
-ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
+ u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM;
- return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+ return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
/**
@@ -2537,6 +2607,330 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
}
/**
+ * ice_write_prof_mask_reg - write profile mask register
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @mask_idx: mask index
+ * @idx: index of the FV which will use the mask
+ * @mask: the 16-bit mask
+ */
+static void
+ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
+ u16 idx, u16 mask)
+{
+ u32 offset;
+ u32 val;
+
+ switch (blk) {
+ case ICE_BLK_RSS:
+ offset = GLQF_HMASK(mask_idx);
+ val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
+ val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
+ break;
+ case ICE_BLK_FD:
+ offset = GLQF_FDMASK(mask_idx);
+ val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
+ val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
+ blk);
+ return;
+ }
+
+ wr32(hw, offset, val);
+ ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
+ blk, idx, offset, val);
+}
+
+/**
+ * ice_write_prof_mask_enable_res - write profile mask enable register
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ * @enable_mask: enable mask
+ */
+static void
+ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
+ u16 prof_id, u32 enable_mask)
+{
+ u32 offset;
+
+ switch (blk) {
+ case ICE_BLK_RSS:
+ offset = GLQF_HMASK_SEL(prof_id);
+ break;
+ case ICE_BLK_FD:
+ offset = GLQF_FDMASK_SEL(prof_id);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
+ blk);
+ return;
+ }
+
+ wr32(hw, offset, enable_mask);
+ ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
+ blk, prof_id, offset, enable_mask);
+}
+
+/**
+ * ice_init_prof_masks - initial prof masks
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ */
+static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 per_pf;
+ u16 i;
+
+ mutex_init(&hw->blk[blk].masks.lock);
+
+ per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
+
+ hw->blk[blk].masks.count = per_pf;
+ hw->blk[blk].masks.first = hw->pf_id * per_pf;
+
+ memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
+
+ for (i = hw->blk[blk].masks.first;
+ i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
+ ice_write_prof_mask_reg(hw, blk, i, 0, 0);
+}
+
+/**
+ * ice_init_all_prof_masks - initialize all prof masks
+ * @hw: pointer to the HW struct
+ */
+static void ice_init_all_prof_masks(struct ice_hw *hw)
+{
+ ice_init_prof_masks(hw, ICE_BLK_RSS);
+ ice_init_prof_masks(hw, ICE_BLK_FD);
+}
+
+/**
+ * ice_alloc_prof_mask - allocate profile mask
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: index of FV which will use the mask
+ * @mask: the 16-bit mask
+ * @mask_idx: variable to receive the mask index
+ */
+static enum ice_status
+ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
+ u16 *mask_idx)
+{
+ bool found_unused = false, found_copy = false;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ u16 unused_idx = 0, copy_idx = 0;
+ u16 i;
+
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->blk[blk].masks.lock);
+
+ for (i = hw->blk[blk].masks.first;
+ i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
+ if (hw->blk[blk].masks.masks[i].in_use) {
+ /* if mask is in use and it exactly duplicates the
+ * desired mask and index, then in can be reused
+ */
+ if (hw->blk[blk].masks.masks[i].mask == mask &&
+ hw->blk[blk].masks.masks[i].idx == idx) {
+ found_copy = true;
+ copy_idx = i;
+ break;
+ }
+ } else {
+ /* save off unused index, but keep searching in case
+ * there is an exact match later on
+ */
+ if (!found_unused) {
+ found_unused = true;
+ unused_idx = i;
+ }
+ }
+
+ if (found_copy)
+ i = copy_idx;
+ else if (found_unused)
+ i = unused_idx;
+ else
+ goto err_ice_alloc_prof_mask;
+
+ /* update mask for a new entry */
+ if (found_unused) {
+ hw->blk[blk].masks.masks[i].in_use = true;
+ hw->blk[blk].masks.masks[i].mask = mask;
+ hw->blk[blk].masks.masks[i].idx = idx;
+ hw->blk[blk].masks.masks[i].ref = 0;
+ ice_write_prof_mask_reg(hw, blk, i, idx, mask);
+ }
+
+ hw->blk[blk].masks.masks[i].ref++;
+ *mask_idx = i;
+ status = 0;
+
+err_ice_alloc_prof_mask:
+ mutex_unlock(&hw->blk[blk].masks.lock);
+
+ return status;
+}
+
+/**
+ * ice_free_prof_mask - free profile mask
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @mask_idx: index of mask
+ */
+static enum ice_status
+ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
+{
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return ICE_ERR_PARAM;
+
+ if (!(mask_idx >= hw->blk[blk].masks.first &&
+ mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ mutex_lock(&hw->blk[blk].masks.lock);
+
+ if (!hw->blk[blk].masks.masks[mask_idx].in_use)
+ goto exit_ice_free_prof_mask;
+
+ if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
+ hw->blk[blk].masks.masks[mask_idx].ref--;
+ goto exit_ice_free_prof_mask;
+ }
+
+ /* remove mask */
+ hw->blk[blk].masks.masks[mask_idx].in_use = false;
+ hw->blk[blk].masks.masks[mask_idx].mask = 0;
+ hw->blk[blk].masks.masks[mask_idx].idx = 0;
+
+ /* update mask as unused entry */
+ ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
+ mask_idx);
+ ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
+
+exit_ice_free_prof_mask:
+ mutex_unlock(&hw->blk[blk].masks.lock);
+
+ return 0;
+}
+
+/**
+ * ice_free_prof_masks - free all profile masks for a profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ */
+static enum ice_status
+ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
+{
+ u32 mask_bm;
+ u16 i;
+
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return ICE_ERR_PARAM;
+
+ mask_bm = hw->blk[blk].es.mask_ena[prof_id];
+ for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
+ if (mask_bm & BIT(i))
+ ice_free_prof_mask(hw, blk, i);
+
+ return 0;
+}
+
+/**
+ * ice_shutdown_prof_masks - releases lock for masking
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ *
+ * This should be called before unloading the driver
+ */
+static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ mutex_lock(&hw->blk[blk].masks.lock);
+
+ for (i = hw->blk[blk].masks.first;
+ i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
+ ice_write_prof_mask_reg(hw, blk, i, 0, 0);
+
+ hw->blk[blk].masks.masks[i].in_use = false;
+ hw->blk[blk].masks.masks[i].idx = 0;
+ hw->blk[blk].masks.masks[i].mask = 0;
+ }
+
+ mutex_unlock(&hw->blk[blk].masks.lock);
+ mutex_destroy(&hw->blk[blk].masks.lock);
+}
+
+/**
+ * ice_shutdown_all_prof_masks - releases all locks for masking
+ * @hw: pointer to the HW struct
+ *
+ * This should be called before unloading the driver
+ */
+static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
+{
+ ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
+ ice_shutdown_prof_masks(hw, ICE_BLK_FD);
+}
+
+/**
+ * ice_update_prof_masking - set registers according to masking
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ * @masks: masks
+ */
+static enum ice_status
+ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
+ u16 *masks)
+{
+ bool err = false;
+ u32 ena_mask = 0;
+ u16 idx;
+ u16 i;
+
+ /* Only support FD and RSS masking, otherwise nothing to be done */
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return 0;
+
+ for (i = 0; i < hw->blk[blk].es.fvw; i++)
+ if (masks[i] && masks[i] != 0xFFFF) {
+ if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
+ ena_mask |= BIT(idx);
+ } else {
+ /* not enough bitmaps */
+ err = true;
+ break;
+ }
+ }
+
+ if (err) {
+ /* free any bitmaps we have allocated */
+ for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
+ if (ena_mask & BIT(i))
+ ice_free_prof_mask(hw, blk, i);
+
+ return ICE_ERR_OUT_OF_RANGE;
+ }
+
+ /* enable the masks for this profile */
+ ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
+
+ /* store enabled masks with profile so that they can be freed later */
+ hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
+
+ return 0;
+}
+
+/**
* ice_write_es - write an extraction sequence to hardware
* @hw: pointer to the HW struct
* @blk: the block in which to write the extraction sequence
@@ -2575,6 +2969,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
ice_write_es(hw, blk, prof_id, NULL);
+ ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
}
@@ -2937,6 +3332,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@@ -2944,6 +3340,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), r);
}
mutex_destroy(&hw->rss_locks);
+ ice_shutdown_all_prof_masks(hw);
memset(hw->blk, 0, sizeof(hw->blk));
}
@@ -2997,6 +3394,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
+ memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
}
}
@@ -3010,6 +3408,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
mutex_init(&hw->rss_locks);
INIT_LIST_HEAD(&hw->rss_list_head);
+ ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
@@ -3112,6 +3511,11 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
goto err;
+
+ es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->mask_ena), GFP_KERNEL);
+ if (!es->mask_ena)
+ goto err;
}
return 0;
@@ -3711,22 +4115,79 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
return 0;
}
+/* The entries here needs to match the order of enum ice_ptype_attrib */
+static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
+ { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
+ { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
+};
+
+/**
+ * ice_get_ptype_attrib_info - get PTYPE attribute information
+ * @type: attribute type
+ * @info: pointer to variable to the attribute information
+ */
+static void
+ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
+ struct ice_ptype_attrib_info *info)
+{
+ *info = ice_ptype_attributes[type];
+}
+
+/**
+ * ice_add_prof_attrib - add any PTG with attributes to profile
+ * @prof: pointer to the profile to which PTG entries will be added
+ * @ptg: PTG to be added
+ * @ptype: PTYPE that needs to be looked up
+ * @attr: array of attributes that will be considered
+ * @attr_cnt: number of elements in the attribute array
+ */
+static enum ice_status
+ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
+ const struct ice_ptype_attributes *attr, u16 attr_cnt)
+{
+ bool found = false;
+ u16 i;
+
+ for (i = 0; i < attr_cnt; i++)
+ if (attr[i].ptype == ptype) {
+ found = true;
+
+ prof->ptg[prof->ptg_cnt] = ptg;
+ ice_get_ptype_attrib_info(attr[i].attrib,
+ &prof->attr[prof->ptg_cnt]);
+
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ return ICE_ERR_MAX_LIMIT;
+ }
+
+ if (!found)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ return 0;
+}
+
/**
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
* @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @attr: array of attributes
+ * @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
+ * @masks: mask for extraction sequence
*
- * This function registers a profile, which matches a set of PTGs with a
+ * This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es)
+ const struct ice_ptype_attributes *attr, u16 attr_cnt,
+ struct ice_fv_word *es, u16 *masks)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -3740,7 +4201,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id(hw, blk, es, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
@@ -3758,6 +4219,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
if (status)
goto err_ice_add_prof;
}
+ status = ice_update_prof_masking(hw, blk, prof_id, masks);
+ if (status)
+ goto err_ice_add_prof;
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
@@ -3792,7 +4256,6 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
BITS_PER_BYTE) {
u16 ptype;
u8 ptg;
- u8 m;
ptype = byte * BITS_PER_BYTE + bit;
@@ -3807,15 +4270,25 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
continue;
set_bit(ptg, ptgs_used);
- prof->ptg[prof->ptg_cnt] = ptg;
-
- if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ /* Check to see there are any attributes for
+ * this PTYPE, and add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype,
+ attr, attr_cnt);
+ if (status == ICE_ERR_MAX_LIMIT)
break;
+ if (status) {
+ /* This is simple a PTYPE/PTG with no
+ * attribute
+ */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- /* nothing left in byte, then exit */
- m = ~(u8)((1 << (bit + 1)) - 1);
- if (!(ptypes[byte] & m))
- break;
+ if (++prof->ptg_cnt >=
+ ICE_MAX_PTG_PER_PROFILE)
+ break;
+ }
}
bytes--;
@@ -4326,7 +4799,12 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
}
/* for re-enabling, reallocate a TCAM */
- status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ /* for entries with empty attribute masks, allocate entry from
+ * the bottom of the TCAM table; otherwise, allocate from the
+ * top of the table in order to give it higher priority
+ */
+ status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
+ &tcam->tcam_idx);
if (status)
return status;
@@ -4336,8 +4814,8 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
return ICE_ERR_NO_MEMORY;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
- tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
- nm_msk);
+ tcam->ptg, vsig, 0, tcam->attr.flags,
+ vl_msk, dc_msk, nm_msk);
if (status)
goto err_ice_prof_tcam_ena_dis;
@@ -4485,7 +4963,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
}
/* allocate the TCAM entry index */
- status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ /* for entries with empty attribute masks, allocate entry from
+ * the bottom of the TCAM table; otherwise, allocate from the
+ * top of the table in order to give it higher priority
+ */
+ status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
+ &tcam_idx);
if (status) {
devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
@@ -4494,6 +4977,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
t->tcam[i].ptg = map->ptg[i];
t->tcam[i].prof_id = map->prof_id;
t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].attr = map->attr[i];
t->tcam[i].in_use = true;
p->type = ICE_TCAM_ADD;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 20deddb807c5..8a58e79729b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -27,7 +27,8 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es);
+ const struct ice_ptype_attributes *attr, u16 attr_cnt,
+ struct ice_fv_word *es, u16 *masks);
enum ice_status
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 24063c1351b2..abc156ce9d8c 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -190,6 +190,64 @@ enum ice_sect {
ICE_SECT_COUNT
};
+#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331
+#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332
+#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333
+#define ICE_MAC_IPV4_GTPU_IPV4_TCP 334
+#define ICE_MAC_IPV4_GTPU_IPV4_ICMP 335
+#define ICE_MAC_IPV6_GTPU_IPV4_FRAG 336
+#define ICE_MAC_IPV6_GTPU_IPV4_PAY 337
+#define ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY 338
+#define ICE_MAC_IPV6_GTPU_IPV4_TCP 339
+#define ICE_MAC_IPV6_GTPU_IPV4_ICMP 340
+#define ICE_MAC_IPV4_GTPU_IPV6_FRAG 341
+#define ICE_MAC_IPV4_GTPU_IPV6_PAY 342
+#define ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY 343
+#define ICE_MAC_IPV4_GTPU_IPV6_TCP 344
+#define ICE_MAC_IPV4_GTPU_IPV6_ICMPV6 345
+#define ICE_MAC_IPV6_GTPU_IPV6_FRAG 346
+#define ICE_MAC_IPV6_GTPU_IPV6_PAY 347
+#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348
+#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349
+#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350
+
+/* Attributes that can modify PTYPE definitions.
+ *
+ * These values will represent special attributes for PTYPEs, which will
+ * resolve into metadata packet flags definitions that can be used in the TCAM
+ * for identifying a PTYPE with specific characteristics.
+ */
+enum ice_ptype_attrib_type {
+ /* GTP PTYPEs */
+ ICE_PTYPE_ATTR_GTP_PDU_EH,
+ ICE_PTYPE_ATTR_GTP_SESSION,
+ ICE_PTYPE_ATTR_GTP_DOWNLINK,
+ ICE_PTYPE_ATTR_GTP_UPLINK,
+};
+
+struct ice_ptype_attrib_info {
+ u16 flags;
+ u16 mask;
+};
+
+/* TCAM flag definitions */
+#define ICE_GTP_PDU BIT(14)
+#define ICE_GTP_PDU_LINK BIT(13)
+
+/* GTP attributes */
+#define ICE_GTP_PDU_FLAG_MASK (ICE_GTP_PDU)
+#define ICE_GTP_PDU_EH ICE_GTP_PDU
+
+#define ICE_GTP_FLAGS_MASK (ICE_GTP_PDU | ICE_GTP_PDU_LINK)
+#define ICE_GTP_SESSION 0
+#define ICE_GTP_DOWNLINK ICE_GTP_PDU
+#define ICE_GTP_UPLINK (ICE_GTP_PDU | ICE_GTP_PDU_LINK)
+
+struct ice_ptype_attributes {
+ u16 ptype;
+ enum ice_ptype_attrib_type attrib;
+};
+
/* package labels */
struct ice_label {
__le16 value;
@@ -335,6 +393,7 @@ struct ice_es {
u16 count;
u16 fvw;
u16 *ref_count;
+ u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
struct mutex prof_map_lock; /* protect access to profiles list */
@@ -372,12 +431,14 @@ struct ice_prof_map {
u8 prof_id;
u8 ptg_cnt;
u8 ptg[ICE_MAX_PTG_PER_PROFILE];
+ struct ice_ptype_attrib_info attr[ICE_MAX_PTG_PER_PROFILE];
};
#define ICE_INVALID_TCAM 0xFFFF
struct ice_tcam_inf {
u16 tcam_idx;
+ struct ice_ptype_attrib_info attr;
u8 ptg;
u8 prof_id;
u8 in_use;
@@ -478,6 +539,21 @@ struct ice_prof_redir {
u16 count;
};
+struct ice_mask {
+ u16 mask; /* 16-bit mask */
+ u16 idx; /* index */
+ u16 ref; /* reference count */
+ u8 in_use; /* non-zero if used */
+};
+
+struct ice_masks {
+ struct mutex lock; /* lock to protect this structure */
+ u16 first; /* first mask owned by the PF */
+ u16 count; /* number of masks owned by the PF */
+#define ICE_PROF_MASK_COUNT 32
+ struct ice_mask masks[ICE_PROF_MASK_COUNT];
+};
+
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
@@ -485,6 +561,7 @@ struct ice_blk_info {
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
+ struct ice_masks masks;
u8 overwrite; /* set to true to allow overwrite of table entries */
u8 is_list_init;
};
@@ -513,6 +590,7 @@ struct ice_chs_chg {
u16 vsig;
u16 orig_vsig;
u16 tcam_idx;
+ struct ice_ptype_attrib_info attr;
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 89a0cef20506..8e8bfc6fa2b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -9,18 +9,50 @@ struct ice_flow_field_info {
enum ice_flow_seg_hdr hdr;
s16 off; /* Offset from start of a protocol header, in bits */
u16 size; /* Size of fields in bits */
+ u16 mask; /* 16-bit mask for field */
};
#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
.hdr = _hdr, \
.off = (_offset_bytes) * BITS_PER_BYTE, \
.size = (_size_bytes) * BITS_PER_BYTE, \
+ .mask = 0, \
+}
+
+#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
+ .hdr = _hdr, \
+ .off = (_offset_bytes) * BITS_PER_BYTE, \
+ .size = (_size_bytes) * BITS_PER_BYTE, \
+ .mask = _mask, \
}
/* Table containing properties of supported protocol header fields */
static const
struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+ /* Ether */
+ /* ICE_FLOW_FIELD_IDX_ETH_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ETH_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_S_VLAN */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_C_VLAN */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)),
/* IPv4 / IPv6 */
+ /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0),
+ /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00),
+ /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff),
+ /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00),
/* ICE_FLOW_FIELD_IDX_IPV4_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
/* ICE_FLOW_FIELD_IDX_IPV4_DA */
@@ -42,21 +74,111 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
+ /* ARP */
+ /* ICE_FLOW_FIELD_IDX_ARP_SIP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_ARP_DIP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_ARP_SHA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ARP_DHA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ARP_OP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)),
+ /* ICMP */
+ /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1),
+ /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1),
/* GRE */
/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
sizeof_field(struct gre_full_hdr, key)),
+ /* GTP */
+ /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
+ 0x3f00),
+ /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
+ /* PPPoE */
+ /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
+ /* PFCP */
+ /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)),
+ /* L2TPv3 */
+ /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)),
+ /* ESP */
+ /* ICE_FLOW_FIELD_IDX_ESP_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)),
+ /* AH */
+ /* ICE_FLOW_FIELD_IDX_AH_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
+ /* NAT_T_ESP */
+ /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
*
- * Packet types for packets with an Outer/First/Single IPv4 header
+ * Packet types for packets with an Outer/First/Single MAC header
+ */
+static const u32 ice_ptypes_mac_ofos[] = {
+ 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
+ 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC VLAN header */
+static const u32 ice_ptypes_macvlan_il[] = {
+ 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
+ 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
+ * include IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000155, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header, includes
+ * IPv4 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv4_ofos_all[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000155, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -67,7 +189,7 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -75,12 +197,28 @@ static const u32 ice_ptypes_ipv4_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv6 header */
+/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
+ * include IPv6 other PTYPEs
+ */
static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header, includes
+ * IPv6 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv6_ofos_all[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -91,7 +229,7 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
0x00000770, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -111,6 +249,18 @@ static const u32 ice_ipv4_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outermost/First ARP header */
+static const u32 ice_ptypes_arp_of[] = {
+ 0x00000800, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
static const u32 ice_ipv4_il_no_l4[] = {
0x60000000, 0x18043008, 0x80000002, 0x6010c021,
@@ -153,7 +303,7 @@ static const u32 ice_ipv6_il_no_l4[] = {
static const u32 ice_ptypes_udp_il[] = {
0x81000000, 0x20204040, 0x04000010, 0x80810102,
0x00000040, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00410000, 0x90842000, 0x00000007,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -165,7 +315,7 @@ static const u32 ice_ptypes_udp_il[] = {
static const u32 ice_ptypes_tcp_il[] = {
0x04000000, 0x80810102, 0x10000040, 0x02040408,
0x00000102, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00820000, 0x21084000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -177,6 +327,18 @@ static const u32 ice_ptypes_tcp_il[] = {
static const u32 ice_ptypes_sctp_il[] = {
0x08000000, 0x01020204, 0x20000081, 0x04080810,
0x00000204, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x01040000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ICMP header */
+static const u32 ice_ptypes_icmp_of[] = {
+ 0x10000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -185,6 +347,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Innermost/Last ICMP header */
+static const u32 ice_ptypes_icmp_il[] = {
+ 0x00000000, 0x02040408, 0x40000102, 0x08101020,
+ 0x00000408, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x42108000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Outermost/First GRE header */
static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
@@ -197,6 +371,218 @@ static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Innermost/Last MAC header */
+static const u32 ice_ptypes_mac_il[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for GTPC */
+static const u32 ice_ptypes_gtpc[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000180, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for GTPC with TEID */
+static const u32 ice_ptypes_gtpc_tid[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000060, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
+};
+
+static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+};
+
+static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
+};
+
+static const u32 ice_ptypes_gtpu[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for PPPoE */
+static const u32 ice_ptypes_pppoe[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with PFCP NODE header */
+static const u32 ice_ptypes_pfcp_node[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x80000000, 0x00000002,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with PFCP SESSION header */
+static const u32 ice_ptypes_pfcp_session[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000005,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for L2TPv3 */
+static const u32 ice_ptypes_l2tpv3[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000300,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for ESP */
+static const u32 ice_ptypes_esp[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000003, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for AH */
+static const u32 ice_ptypes_ah[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with NAT_T ESP header */
+static const u32 ice_ptypes_nat_t_esp[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000030, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 ice_ptypes_mac_non_ip_ofos[] = {
+ 0x00000846, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
@@ -208,12 +594,30 @@ struct ice_flow_prof_params {
* This will give us the direction flags.
*/
struct ice_fv_word es[ICE_MAX_FV_WORDS];
+ /* attributes can be used to add attributes to a particular PTYPE */
+ const struct ice_ptype_attributes *attr;
+ u16 attr_cnt;
+
+ u16 mask[ICE_MAX_FV_WORDS];
DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
};
+#define ICE_FLOW_RSS_HDRS_INNER_MASK \
+ (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
+ ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
+ ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
+ ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
+ ICE_FLOW_SEG_HDR_NAT_T_ESP)
+
+#define ICE_FLOW_SEG_HDRS_L2_MASK \
+ (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
- (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
+ (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+ ICE_FLOW_SEG_HDR_SCTP)
+/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
+#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
/**
@@ -243,8 +647,11 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Sizes of fixed known protocol headers without header options */
#define ICE_FLOW_PROT_HDR_SZ_MAC 14
+#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
+#define ICE_FLOW_PROT_HDR_SZ_ARP 28
+#define ICE_FLOW_PROT_HDR_SZ_ICMP 8
#define ICE_FLOW_PROT_HDR_SZ_TCP 20
#define ICE_FLOW_PROT_HDR_SZ_UDP 8
#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
@@ -256,16 +663,27 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
*/
static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
{
- u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
+ u16 sz;
+
+ /* L2 headers */
+ sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
+ ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
/* L3 headers */
if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
+ sz += ICE_FLOW_PROT_HDR_SZ_ARP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
+ /* An L3 header is required if L4 is specified */
+ return 0;
/* L4 headers */
- if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
+ sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
sz += ICE_FLOW_PROT_HDR_SZ_TCP;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
sz += ICE_FLOW_PROT_HDR_SZ_UDP;
@@ -298,8 +716,39 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
hdrs = prof->segs[i].hdrs;
+ if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
+ src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
+ (const unsigned long *)ice_ptypes_mac_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
+ src = (const unsigned long *)ice_ptypes_macvlan_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
+ bitmap_and(params->ptypes, params->ptypes,
+ (const unsigned long *)ice_ptypes_arp_of,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
- !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ? (const unsigned long *)ice_ptypes_ipv4_il :
+ (const unsigned long *)ice_ptypes_ipv4_ofos_all;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ? (const unsigned long *)ice_ptypes_ipv6_il :
+ (const unsigned long *)ice_ptypes_ipv6_ofos_all;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
(const unsigned long *)ice_ipv4_il_no_l4;
bitmap_and(params->ptypes, params->ptypes, src,
@@ -310,7 +759,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
- !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
(const unsigned long *)ice_ipv6_il_no_l4;
bitmap_and(params->ptypes, params->ptypes, src,
@@ -322,6 +771,20 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
ICE_FLOW_PTYPE_MAX);
}
+ if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
+ src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
+ src = (const unsigned long *)ice_ptypes_pppoe;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else {
+ src = (const unsigned long *)ice_ptypes_pppoe;
+ bitmap_andnot(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
src = (const unsigned long *)ice_ptypes_udp_il;
bitmap_and(params->ptypes, params->ptypes, src,
@@ -334,12 +797,89 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+ src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
+ (const unsigned long *)ice_ptypes_icmp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const unsigned long *)ice_ptypes_gre_of;
bitmap_and(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
}
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
+ src = (const unsigned long *)ice_ptypes_gtpc;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
+ src = (const unsigned long *)ice_ptypes_gtpc_tid;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with downlink */
+ params->attr = ice_attr_gtpu_down;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with uplink */
+ params->attr = ice_attr_gtpu_up;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with Extension Header */
+ params->attr = ice_attr_gtpu_eh;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
+ src = (const unsigned long *)ice_ptypes_l2tpv3;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
+ src = (const unsigned long *)ice_ptypes_esp;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
+ src = (const unsigned long *)ice_ptypes_ah;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
+ src = (const unsigned long *)ice_ptypes_nat_t_esp;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
+ if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
+ src = (const unsigned long *)ice_ptypes_pfcp_node;
+ else
+ src = (const unsigned long *)ice_ptypes_pfcp_session;
+
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else {
+ src = (const unsigned long *)ice_ptypes_pfcp_node;
+ bitmap_andnot(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ src = (const unsigned long *)ice_ptypes_pfcp_session;
+ bitmap_andnot(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
}
}
@@ -352,6 +892,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* @params: information about the flow to be processed
* @seg: packet segment index of the field to be extracted
* @fld: ID of field to be extracted
+ * @match: bit field of all fields
*
* This function determines the protocol ID, offset, and size of the given
* field. It then allocates one or more extraction sequence entries for the
@@ -359,17 +900,73 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
*/
static enum ice_status
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
- u8 seg, enum ice_flow_field fld)
+ u8 seg, enum ice_flow_field fld, u64 match)
{
+ enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
+ u16 sib_mask = 0;
+ u16 mask;
u16 off;
flds = params->prof->segs[seg].fields;
switch (fld) {
+ case ICE_FLOW_FIELD_IDX_ETH_DA:
+ case ICE_FLOW_FIELD_IDX_ETH_SA:
+ case ICE_FLOW_FIELD_IDX_S_VLAN:
+ case ICE_FLOW_FIELD_IDX_C_VLAN:
+ prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_ETH_TYPE:
+ prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV4_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
+ */
+ if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
+
+ /* If the sibling field is also included, that field's
+ * mask needs to be included.
+ */
+ if (match & BIT(sib))
+ sib_mask = ice_flds_info[sib].mask;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV6_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
+ */
+ if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
+
+ /* If the sibling field is also included, that field's
+ * mask needs to be included.
+ */
+ if (match & BIT(sib))
+ sib_mask = ice_flds_info[sib].mask;
+ break;
case ICE_FLOW_FIELD_IDX_IPV4_SA:
case ICE_FLOW_FIELD_IDX_IPV4_DA:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
@@ -380,6 +977,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
break;
case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+ case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
prot_id = ICE_PROT_TCP_IL;
break;
case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
@@ -390,6 +988,49 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL;
break;
+ case ICE_FLOW_FIELD_IDX_GTPC_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
+ /* GTP is accessed through UDP OF protocol */
+ prot_id = ICE_PROT_UDP_OF;
+ break;
+ case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
+ prot_id = ICE_PROT_PPPOE;
+ break;
+ case ICE_FLOW_FIELD_IDX_PFCP_SEID:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
+ prot_id = ICE_PROT_L2TPV3;
+ break;
+ case ICE_FLOW_FIELD_IDX_ESP_SPI:
+ prot_id = ICE_PROT_ESP_F;
+ break;
+ case ICE_FLOW_FIELD_IDX_AH_SPI:
+ prot_id = ICE_PROT_ESP_2;
+ break;
+ case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_ARP_SIP:
+ case ICE_FLOW_FIELD_IDX_ARP_DIP:
+ case ICE_FLOW_FIELD_IDX_ARP_SHA:
+ case ICE_FLOW_FIELD_IDX_ARP_DHA:
+ case ICE_FLOW_FIELD_IDX_ARP_OP:
+ prot_id = ICE_PROT_ARP_OF;
+ break;
+ case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
+ case ICE_FLOW_FIELD_IDX_ICMP_CODE:
+ /* ICMP type and code share the same extraction seq. entry */
+ prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
+ ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
+ sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
+ ICE_FLOW_FIELD_IDX_ICMP_CODE :
+ ICE_FLOW_FIELD_IDX_ICMP_TYPE;
+ break;
case ICE_FLOW_FIELD_IDX_GRE_KEYID:
prot_id = ICE_PROT_GRE_OF;
break;
@@ -407,6 +1048,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
ICE_FLOW_FV_EXTRACT_SZ;
flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
flds[fld].xtrct.idx = params->es_cnt;
+ flds[fld].xtrct.mask = ice_flds_info[fld].mask;
/* Adjust the next field-entry index after accommodating the number of
* entries this field consumes
@@ -416,24 +1058,34 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
/* Fill in the extraction sequence entries needed for this field */
off = flds[fld].xtrct.off;
+ mask = flds[fld].xtrct.mask;
for (i = 0; i < cnt; i++) {
- u8 idx;
-
- /* Make sure the number of extraction sequence required
- * does not exceed the block's capability
+ /* Only consume an extraction sequence entry if there is no
+ * sibling field associated with this field or the sibling entry
+ * already extracts the word shared with this field.
*/
- if (params->es_cnt >= fv_words)
- return ICE_ERR_MAX_LIMIT;
+ if (sib == ICE_FLOW_FIELD_IDX_MAX ||
+ flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
+ flds[sib].xtrct.off != off) {
+ u8 idx;
- /* some blocks require a reversed field vector layout */
- if (hw->blk[params->blk].es.reverse)
- idx = fv_words - params->es_cnt - 1;
- else
- idx = params->es_cnt;
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= fv_words)
+ return ICE_ERR_MAX_LIMIT;
- params->es[idx].prot_id = prot_id;
- params->es[idx].off = off;
- params->es_cnt++;
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = prot_id;
+ params->es[idx].off = off;
+ params->mask[idx] = mask | sib_mask;
+ params->es_cnt++;
+ }
off += ICE_FLOW_FV_EXTRACT_SZ;
}
@@ -533,14 +1185,15 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
- u8 j;
+ u64 match = params->prof->segs[i].match;
+ enum ice_flow_field j;
- for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
+ for_each_set_bit(j, (unsigned long *)&match,
ICE_FLOW_FIELD_IDX_MAX) {
- status = ice_flow_xtract_fld(hw, params, i,
- (enum ice_flow_field)j);
+ status = ice_flow_xtract_fld(hw, params, i, j, match);
if (status)
return status;
+ clear_bit(j, (unsigned long *)&match);
}
/* Process raw matching bytes */
@@ -751,7 +1404,8 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
- params->es);
+ params->attr, params->attr_cnt, params->es,
+ params->mask);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1158,6 +1812,9 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
seg->raws_cnt++;
}
+#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
+ (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
+
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
@@ -1165,7 +1822,8 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
- (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+ (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
+ ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
/**
@@ -1193,7 +1851,8 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
ICE_FLOW_SET_HDRS(segs, flow_hdr);
- if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+ if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
+ ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return ICE_ERR_PARAM;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 829f90b1e998..eec9def8ffca 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -30,6 +30,80 @@
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_FLOW_HASH_GTP_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
+
+#define ICE_FLOW_HASH_GTP_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+
+#define ICE_FLOW_HASH_GTP_U_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
+#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
+
+#define ICE_FLOW_HASH_GTP_U_EH_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_EH_QFI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_EH \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
+ ICE_FLOW_HASH_GTP_U_EH_QFI)
+#define ICE_FLOW_HASH_GTP_U_IPV6_EH \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
+ ICE_FLOW_HASH_GTP_U_EH_QFI)
+
+#define ICE_FLOW_HASH_PPPOE_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
+
+#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
+#define ICE_FLOW_HASH_PPPOE_TCP_ID \
+ (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
+#define ICE_FLOW_HASH_PPPOE_UDP_ID \
+ (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
+
+#define ICE_FLOW_HASH_PFCP_SEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
+#define ICE_FLOW_HASH_PFCP_IPV4_SEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
+#define ICE_FLOW_HASH_PFCP_IPV6_SEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
+
+#define ICE_FLOW_HASH_L2TPV3_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
+#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
+
+#define ICE_FLOW_HASH_ESP_SPI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
+#define ICE_FLOW_HASH_ESP_IPV4_SPI \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
+#define ICE_FLOW_HASH_ESP_IPV6_SPI \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
+
+#define ICE_FLOW_HASH_AH_SPI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
+#define ICE_FLOW_HASH_AH_IPV4_SPI \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
+#define ICE_FLOW_HASH_AH_IPV6_SPI \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
+
+#define ICE_FLOW_HASH_NAT_T_ESP_SPI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
+#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
@@ -38,16 +112,66 @@
*/
enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_NONE = 0x00000000,
+ ICE_FLOW_SEG_HDR_ETH = 0x00000001,
+ ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
+ ICE_FLOW_SEG_HDR_ARP = 0x00000010,
+ ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
+ ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
+ ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
+ ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
+ ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
+ ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
+ ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
+ ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
+ ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
+ ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
+ ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
+ ICE_FLOW_SEG_HDR_ESP = 0x00100000,
+ ICE_FLOW_SEG_HDR_AH = 0x00200000,
+ ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
+ ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
+ /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
+ * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
+ */
+ ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
};
+/* These segments all have the same PTYPES, but are otherwise distinguished by
+ * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
+ *
+ * gtp_eh_pdu gtp_eh_pdu_link
+ * ICE_FLOW_SEG_HDR_GTPU_IP 0 0
+ * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
+ * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
+ * ICE_FLOW_SEG_HDR_GTPU_UP 1 1
+ */
+#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
+ ICE_FLOW_SEG_HDR_GTPU_EH | \
+ ICE_FLOW_SEG_HDR_GTPU_DWN | \
+ ICE_FLOW_SEG_HDR_GTPU_UP)
+#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
+ ICE_FLOW_SEG_HDR_PFCP_SESSION)
+
enum ice_flow_field {
+ /* L2 */
+ ICE_FLOW_FIELD_IDX_ETH_DA,
+ ICE_FLOW_FIELD_IDX_ETH_SA,
+ ICE_FLOW_FIELD_IDX_S_VLAN,
+ ICE_FLOW_FIELD_IDX_C_VLAN,
+ ICE_FLOW_FIELD_IDX_ETH_TYPE,
/* L3 */
+ ICE_FLOW_FIELD_IDX_IPV4_DSCP,
+ ICE_FLOW_FIELD_IDX_IPV6_DSCP,
+ ICE_FLOW_FIELD_IDX_IPV4_TTL,
+ ICE_FLOW_FIELD_IDX_IPV4_PROT,
+ ICE_FLOW_FIELD_IDX_IPV6_TTL,
+ ICE_FLOW_FIELD_IDX_IPV6_PROT,
ICE_FLOW_FIELD_IDX_IPV4_SA,
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
@@ -59,9 +183,42 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+ /* ARP */
+ ICE_FLOW_FIELD_IDX_ARP_SIP,
+ ICE_FLOW_FIELD_IDX_ARP_DIP,
+ ICE_FLOW_FIELD_IDX_ARP_SHA,
+ ICE_FLOW_FIELD_IDX_ARP_DHA,
+ ICE_FLOW_FIELD_IDX_ARP_OP,
+ /* ICMP */
+ ICE_FLOW_FIELD_IDX_ICMP_TYPE,
+ ICE_FLOW_FIELD_IDX_ICMP_CODE,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
- /* The total number of enums must not exceed 64 */
+ /* GTPC_TEID */
+ ICE_FLOW_FIELD_IDX_GTPC_TEID,
+ /* GTPU_IP */
+ ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
+ /* GTPU_EH */
+ ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
+ ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
+ /* GTPU_UP */
+ ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
+ /* GTPU_DWN */
+ ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
+ /* PPPoE */
+ ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
+ /* PFCP */
+ ICE_FLOW_FIELD_IDX_PFCP_SEID,
+ /* L2TPv3 */
+ ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
+ /* ESP */
+ ICE_FLOW_FIELD_IDX_ESP_SPI,
+ /* AH */
+ ICE_FLOW_FIELD_IDX_AH_SPI,
+ /* NAT_T ESP */
+ ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
+ /* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
@@ -138,6 +295,7 @@ struct ice_flow_seg_xtrct {
u16 off; /* Starting offset of the field in header in bytes */
u8 idx; /* Index of FV entry used */
u8 disp; /* Displacement of field in bits fr. FV entry's start */
+ u16 mask; /* Mask for field */
};
enum ice_flow_fld_match_type {
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 093a1818a392..67b5b9b9d009 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -306,8 +306,23 @@
#define GLQF_FD_SIZE_FD_BSIZE_S 16
#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4))
+#define GLQF_FDMASK_MAX_INDEX 31
+#define GLQF_FDMASK_MSK_INDEX_S 0
+#define GLQF_FDMASK_MSK_INDEX_M ICE_M(0x1F, 0)
+#define GLQF_FDMASK_MASK_S 16
+#define GLQF_FDMASK_MASK_M ICE_M(0xFFFF, 16)
#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4))
#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4))
+#define GLQF_HMASK_MAX_INDEX 31
+#define GLQF_HMASK_MSK_INDEX_S 0
+#define GLQF_HMASK_MSK_INDEX_M ICE_M(0x1F, 0)
+#define GLQF_HMASK_MASK_S 16
+#define GLQF_HMASK_MASK_M ICE_M(0xFFFF, 16)
+#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
+#define GLQF_HMASK_SEL_MAX_INDEX 127
+#define GLQF_HMASK_SEL_MASK_SEL_S 0
#define PFQF_FD_ENA 0x0043A000
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
#define PFQF_FD_SIZE 0x00460100
@@ -369,6 +384,9 @@
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
+#define VSIQF_FD_CNT_FD_BCNT_S 16
+#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
+#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define PFPM_APM 0x000B8080
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 4ec24c3e813f..21329ed3087e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -55,6 +55,7 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \
(0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)
#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL
#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14
#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)
@@ -128,6 +129,7 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
+#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL
#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
@@ -138,6 +140,26 @@ struct ice_fltr_desc {
(0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)
#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL
+/* definition for FD filter programming status descriptor WB format */
+#define ICE_FXD_FLTR_WB_QW1_DD_S 0
+#define ICE_FXD_FLTR_WB_QW1_DD_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_DD_S)
+#define ICE_FXD_FLTR_WB_QW1_DD_YES 0x1ULL
+
+#define ICE_FXD_FLTR_WB_QW1_PROG_ID_S 1
+#define ICE_FXD_FLTR_WB_QW1_PROG_ID_M \
+ (0x3ULL << ICE_FXD_FLTR_WB_QW1_PROG_ID_S)
+#define ICE_FXD_FLTR_WB_QW1_PROG_ADD 0x0ULL
+#define ICE_FXD_FLTR_WB_QW1_PROG_DEL 0x1ULL
+
+#define ICE_FXD_FLTR_WB_QW1_FAIL_S 4
+#define ICE_FXD_FLTR_WB_QW1_FAIL_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_S)
+#define ICE_FXD_FLTR_WB_QW1_FAIL_YES 0x1ULL
+
+#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S 5
+#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M \
+ (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
+#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
+
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 8d4e2ad4328d..c345432fac72 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -343,6 +343,9 @@ static int ice_vsi_clear(struct ice_vsi *vsi)
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
+ if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
+ vsi->vf_id != ICE_INVAL_VFID)
+ pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
@@ -454,8 +457,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL) {
- /* Use the last VSI slot as the index for the control VSI */
+ if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
+ /* Use the last VSI slot as the index for PF control VSI */
vsi->idx = pf->num_alloc_vsi - 1;
pf->ctrl_vsi_idx = vsi->idx;
pf->vsi[vsi->idx] = vsi;
@@ -468,6 +471,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
}
+
+ if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
+ pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
goto unlock_pf;
err_rings:
@@ -506,7 +512,7 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
if (!b_val)
return -EPERM;
- if (vsi->type != ICE_VSI_PF)
+ if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF))
return -EPERM;
if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
@@ -517,6 +523,13 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
+ if (vsi->type == ICE_VSI_VF) {
+ vsi->num_gfltr = 0;
+
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ }
+
return 0;
}
@@ -856,7 +869,8 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
u8 dflt_q_group, dflt_q_prio;
u16 dflt_q, report_q, val;
- if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
+ vsi->type != ICE_VSI_VF)
return;
val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
@@ -1179,7 +1193,24 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
+ struct ice_vf *vf;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ vf = &pf->vf[i];
+ if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
+ break;
+ }
+ }
+ if (i == pf->num_alloc_vfs)
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ ICE_RES_VF_CTRL_VEC_ID);
+ } else {
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ vsi->idx);
+ }
if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
@@ -2308,7 +2339,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_VF)
+ if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
@@ -2323,7 +2354,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE;
- if (vsi->type == ICE_VSI_VF)
+ if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
vsi->vf_id = vf_id;
ice_alloc_fd_res(vsi);
@@ -2770,7 +2801,24 @@ int ice_vsi_release(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type != ICE_VSI_VF) {
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
+ struct ice_vf *vf;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ vf = &pf->vf[i];
+ if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
+ break;
+ }
+ if (i == pf->num_alloc_vfs) {
+ /* No other VFs left that have control VSI, reclaim SW
+ * interrupts back to the common pool
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ }
+ } else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 2c23c8f468a5..f318d7f607e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1044,7 +1044,7 @@ struct ice_aq_task {
};
/**
- * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
+ * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @opcode: the opcode to wait for
* @timeout: how long to wait, in jiffies
@@ -2071,6 +2071,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
ice_sync_arfs_fltrs(pf);
+ ice_flush_fdir_ctx(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -2082,6 +2083,7 @@ static void ice_service_task(struct work_struct *work)
test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_FD_VF_FLUSH_CTX, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -2220,8 +2222,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
- q_vector->name, q_vector);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
+ err = devm_request_irq(dev, irq_num, vsi->irq_handler,
+ IRQF_SHARED, q_vector->name,
+ q_vector);
+ else
+ err = devm_request_irq(dev, irq_num, vsi->irq_handler,
+ 0, q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
err);
@@ -4314,7 +4321,7 @@ static void ice_set_wake(struct ice_pf *pf)
}
/**
- * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
+ * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
* @pf: pointer to the PF struct
*
* Issue firmware command to enable multicast magic wake, making
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 7f4c1ec1eff2..199aa5b71540 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -13,6 +13,9 @@
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
+ ICE_PROT_MAC_IL = 4,
+ ICE_PROT_ETYPE_OL = 9,
+ ICE_PROT_ETYPE_IL = 10,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
@@ -21,7 +24,14 @@ enum ice_prot_id {
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
+ ICE_PROT_ESP_F = 88,
+ ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
+ ICE_PROT_ICMP_IL = 98,
+ ICE_PROT_ICMPV6_IL = 100,
+ ICE_PROT_PPPOE = 103,
+ ICE_PROT_L2TPV3 = 104,
+ ICE_PROT_ARP_OF = 118,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2403cb38b93c..f890337cc24a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1857,7 +1857,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
}
/**
- * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
+ * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index d4bfa7905652..c71f2fbbb262 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -444,22 +444,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
}
/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
-{
- if (ice_ring_uses_build_skb(rx_ring))
- return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
-/**
* ice_setup_rx_ring - Allocate the Rx descriptors
* @rx_ring: the Rx ring to set up
*
@@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
- rx_ring->rx_offset = ice_rx_offset(rx_ring);
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
@@ -1114,6 +1097,11 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+
+ if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
+ ctrl_vsi->vf_id != ICE_INVAL_VFID)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 02b12736ea80..207f6ee3a7f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -143,6 +143,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
case ICE_RX_PTYPE_INNER_PROT_UDP:
case ICE_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a6cb0c35748c..2893143d9e62 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -192,6 +192,24 @@ enum ice_fltr_ptype {
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER,
+ ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3,
+ ICE_FLTR_PTYPE_NONF_IPV4_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV6_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV4_AH,
+ ICE_FLTR_PTYPE_NONF_IPV6_AH,
+ ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE,
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION,
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE,
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION,
+ ICE_FLTR_PTYPE_NON_IP_L2,
ICE_FLTR_PTYPE_FRAG_IPV4,
ICE_FLTR_PTYPE_NONF_IPV6_UDP,
ICE_FLTR_PTYPE_NONF_IPV6_TCP,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
new file mode 100644
index 000000000000..1f4ba38b1599
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -0,0 +1,2204 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_flow.h"
+
+#define to_fltr_conf_from_desc(p) \
+ container_of(p, struct virtchnl_fdir_fltr_conf, input)
+
+#define ICE_FLOW_PROF_TYPE_S 0
+#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
+#define ICE_FLOW_PROF_VSI_S 32
+#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
+
+/* Flow profile ID format:
+ * [0:31] - flow type, flow + tun_offs
+ * [32:63] - VSI index
+ */
+#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
+ ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
+ (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
+
+#define GTPU_TEID_OFFSET 4
+#define GTPU_EH_QFI_OFFSET 1
+#define GTPU_EH_QFI_MASK 0x3F
+#define PFCP_S_OFFSET 0
+#define PFCP_S_MASK 0x1
+#define PFCP_PORT_NR 8805
+
+#define FDIR_INSET_FLAG_ESP_S 0
+#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
+#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
+#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
+
+enum ice_fdir_tunnel_type {
+ ICE_FDIR_TUNNEL_TYPE_NONE = 0,
+ ICE_FDIR_TUNNEL_TYPE_GTPU,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
+};
+
+struct virtchnl_fdir_fltr_conf {
+ struct ice_fdir_fltr input;
+ enum ice_fdir_tunnel_type ttype;
+ u64 inset_flag;
+ u32 flow_id;
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_PFCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_PFCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+struct virtchnl_fdir_pattern_match_item {
+ enum virtchnl_proto_hdr_type *list;
+ u64 input_set;
+ u64 *meta;
+};
+
+static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
+ {vc_pattern_ipv4, 0, NULL},
+ {vc_pattern_ipv4_tcp, 0, NULL},
+ {vc_pattern_ipv4_udp, 0, NULL},
+ {vc_pattern_ipv4_sctp, 0, NULL},
+ {vc_pattern_ipv6, 0, NULL},
+ {vc_pattern_ipv6_tcp, 0, NULL},
+ {vc_pattern_ipv6_udp, 0, NULL},
+ {vc_pattern_ipv6_sctp, 0, NULL},
+};
+
+static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
+ {vc_pattern_ipv4, 0, NULL},
+ {vc_pattern_ipv4_tcp, 0, NULL},
+ {vc_pattern_ipv4_udp, 0, NULL},
+ {vc_pattern_ipv4_sctp, 0, NULL},
+ {vc_pattern_ipv6, 0, NULL},
+ {vc_pattern_ipv6_tcp, 0, NULL},
+ {vc_pattern_ipv6_udp, 0, NULL},
+ {vc_pattern_ipv6_sctp, 0, NULL},
+ {vc_pattern_ether, 0, NULL},
+ {vc_pattern_ipv4_gtpu, 0, NULL},
+ {vc_pattern_ipv4_gtpu_eh, 0, NULL},
+ {vc_pattern_ipv4_l2tpv3, 0, NULL},
+ {vc_pattern_ipv6_l2tpv3, 0, NULL},
+ {vc_pattern_ipv4_esp, 0, NULL},
+ {vc_pattern_ipv6_esp, 0, NULL},
+ {vc_pattern_ipv4_ah, 0, NULL},
+ {vc_pattern_ipv6_ah, 0, NULL},
+ {vc_pattern_ipv4_nat_t_esp, 0, NULL},
+ {vc_pattern_ipv6_nat_t_esp, 0, NULL},
+ {vc_pattern_ipv4_pfcp, 0, NULL},
+ {vc_pattern_ipv6_pfcp, 0, NULL},
+};
+
+struct virtchnl_fdir_inset_map {
+ enum virtchnl_proto_hdr_field field;
+ enum ice_flow_field fld;
+ u64 flag;
+ u64 mask;
+};
+
+static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
+ {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
+ {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
+ FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
+ {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
+ FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
+ {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
+ {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
+ {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
+};
+
+/**
+ * ice_vc_fdir_param_check
+ * @vf: pointer to the VF structure
+ * @vsi_id: VF relative VSI ID
+ *
+ * Check for the valid VSI ID, PF's state and VF's state
+ *
+ * Return: 0 on success, and -EINVAL on error.
+ */
+static int
+ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EINVAL;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return -EINVAL;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
+ return -EINVAL;
+
+ if (vsi_id != vf->lan_vsi_num)
+ return -EINVAL;
+
+ if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
+ return -EINVAL;
+
+ if (!pf->vsi[vf->lan_vsi_idx])
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ice_vf_start_ctrl_vsi
+ * @vf: pointer to the VF structure
+ *
+ * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *ctrl_vsi;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ return -EEXIST;
+
+ ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "Could not setup control VSI for VF %d\n",
+ vf->vf_id);
+ return -ENOMEM;
+ }
+
+ err = ice_vsi_open_ctrl(ctrl_vsi);
+ if (err) {
+ dev_dbg(dev, "Could not open control VSI for VF %d\n",
+ vf->vf_id);
+ goto err_vsi_open;
+ }
+
+ return 0;
+
+err_vsi_open:
+ ice_vsi_release(ctrl_vsi);
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[vf->ctrl_vsi_idx] = NULL;
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+ return err;
+}
+
+/**
+ * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+
+ if (!fdir->fdir_prof) {
+ fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
+ ICE_FLTR_PTYPE_MAX,
+ sizeof(*fdir->fdir_prof),
+ GFP_KERNEL);
+ if (!fdir->fdir_prof)
+ return -ENOMEM;
+ }
+
+ if (!fdir->fdir_prof[flow]) {
+ fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
+ sizeof(**fdir->fdir_prof),
+ GFP_KERNEL);
+ if (!fdir->fdir_prof[flow])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_free_prof - free profile for this filter flow type
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ */
+static void
+ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+
+ if (!fdir->fdir_prof)
+ return;
+
+ if (!fdir->fdir_prof[flow])
+ return;
+
+ devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
+ fdir->fdir_prof[flow] = NULL;
+}
+
+/**
+ * ice_vc_fdir_free_prof_all - free all the profile for this VF
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+ enum ice_fltr_ptype flow;
+
+ if (!fdir->fdir_prof)
+ return;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
+ ice_vc_fdir_free_prof(vf, flow);
+
+ devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
+ fdir->fdir_prof = NULL;
+}
+
+/**
+ * ice_vc_fdir_parse_flow_fld
+ * @proto_hdr: virtual channel protocol filter header
+ * @conf: FDIR configuration for each filter
+ * @fld: field type array
+ * @fld_cnt: field counter
+ *
+ * Parse the virtual channel filter header and store them into field type array
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum ice_flow_field *fld, int *fld_cnt)
+{
+ struct virtchnl_proto_hdr hdr;
+ u32 i;
+
+ memcpy(&hdr, proto_hdr, sizeof(hdr));
+
+ for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
+ VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
+ if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
+ if (fdir_inset_map[i].mask &&
+ ((fdir_inset_map[i].mask & conf->inset_flag) !=
+ fdir_inset_map[i].flag))
+ continue;
+
+ fld[*fld_cnt] = fdir_inset_map[i].fld;
+ *fld_cnt += 1;
+ if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
+ return -EINVAL;
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
+ fdir_inset_map[i].field);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_set_flow_fld
+ * @vf: pointer to the VF structure
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ * @seg: array of one or more packet segments that describe the flow
+ *
+ * Parse the virtual channel add msg buffer's field vector and store them into
+ * flow's packet segment field
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf,
+ struct ice_flow_seg_info *seg)
+{
+ struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
+ enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct virtchnl_proto_hdrs *proto;
+ int fld_cnt = 0;
+ int i;
+
+ proto = &rule->proto_hdrs;
+ for (i = 0; i < proto->count; i++) {
+ struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
+ int ret;
+
+ ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
+ if (ret)
+ return ret;
+ }
+
+ if (fld_cnt == 0) {
+ dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fld_cnt; i++)
+ ice_flow_set_fld(seg, fld[i],
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ * @seg: array of one or more packet segments that describe the flow
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ struct ice_flow_seg_info *seg)
+{
+ enum ice_fltr_ptype flow = conf->input.flow_type;
+ enum ice_fdir_tunnel_type ttype = conf->ttype;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NON_IP_L2:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_AH:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
+ if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ } else {
+ dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
+ flow, vf->vf_id);
+ return -EINVAL;
+ }
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_AH:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ default:
+ dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
+ flow, vf->vf_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_rem_prof - remove profile for this filter flow type
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
+ */
+static void
+ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+ struct ice_fd_hw_prof *vf_prof;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_hw *hw;
+ u64 prof_id;
+ int i;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
+ return;
+
+ vf_prof = fdir->fdir_prof[flow];
+
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vf_vsi) {
+ dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
+ return;
+ }
+
+ if (!fdir->prof_entry_cnt[flow][tun])
+ return;
+
+ prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
+ flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
+
+ for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
+ if (vf_prof->entry_h[i][tun]) {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
+
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD,
+ vf_prof->entry_h[i][tun]);
+ vf_prof->entry_h[i][tun] = 0;
+ }
+
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ devm_kfree(dev, vf_prof->fdir_seg[tun]);
+ vf_prof->fdir_seg[tun] = NULL;
+
+ for (i = 0; i < vf_prof->cnt; i++)
+ vf_prof->vsi_h[i] = 0;
+
+ fdir->prof_entry_cnt[flow][tun] = 0;
+}
+
+/**
+ * ice_vc_fdir_rem_prof_all - remove profile for this VF
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
+{
+ enum ice_fltr_ptype flow;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE;
+ flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ ice_vc_fdir_rem_prof(vf, flow, 0);
+ ice_vc_fdir_rem_prof(vf, flow, 1);
+ }
+}
+
+/**
+ * ice_vc_fdir_write_flow_prof
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ * @seg: array of one or more packet segments that describe the flow
+ * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
+ *
+ * Write the flow's profile config and packet segment into the hardware
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
+ struct ice_flow_seg_info *seg, int tun)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ struct ice_flow_seg_info *old_seg;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_fd_hw_prof *vf_prof;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u64 entry1_h = 0;
+ u64 entry2_h = 0;
+ u64 prof_id;
+ int ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vf_vsi)
+ return -EINVAL;
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ vf_prof = fdir->fdir_prof[flow];
+ old_seg = vf_prof->fdir_seg[tun];
+ if (old_seg) {
+ if (!memcmp(old_seg, seg, sizeof(*seg))) {
+ dev_dbg(dev, "Duplicated profile for VF %d!\n",
+ vf->vf_id);
+ return -EEXIST;
+ }
+
+ if (fdir->fdir_fltr_cnt[flow][tun]) {
+ ret = -EINVAL;
+ dev_dbg(dev, "Input set conflicts for VF %d\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
+ /* remove previously allocated profile */
+ ice_vc_fdir_rem_prof(vf, flow, tun);
+ }
+
+ prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
+ tun ? ICE_FLTR_PTYPE_MAX : 0);
+
+ status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ tun + 1, &prof);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
+ flow, vf->vf_id);
+ goto err_exit;
+ }
+
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
+ flow, vf->vf_id);
+ goto err_prof;
+ }
+
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev,
+ "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
+ flow, vf->vf_id);
+ goto err_entry_1;
+ }
+
+ vf_prof->fdir_seg[tun] = seg;
+ vf_prof->cnt = 0;
+ fdir->prof_entry_cnt[flow][tun] = 0;
+
+ vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
+ vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
+ vf_prof->cnt++;
+ fdir->prof_entry_cnt[flow][tun]++;
+
+ vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
+ vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
+ vf_prof->cnt++;
+ fdir->prof_entry_cnt[flow][tun]++;
+
+ return 0;
+
+err_entry_1:
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+ ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
+err_prof:
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+err_exit:
+ return ret;
+}
+
+/**
+ * ice_vc_fdir_config_input_set
+ * @vf: pointer to the VF structure
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
+ *
+ * Config the input set type and value for virtual channel add msg buffer
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf, int tun)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_flow_seg_info *seg;
+ enum ice_fltr_ptype flow;
+ int ret;
+
+ flow = input->flow_type;
+ ret = ice_vc_fdir_alloc_prof(vf, flow);
+ if (ret) {
+ dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
+ return ret;
+ }
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
+ if (ret) {
+ dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
+ if (ret) {
+ dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
+ if (ret == -EEXIST) {
+ devm_kfree(dev, seg);
+ } else if (ret) {
+ dev_dbg(dev, "Write flow profile for VF %d failed\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ devm_kfree(dev, seg);
+ return ret;
+}
+
+/**
+ * ice_vc_fdir_match_pattern
+ * @fltr: virtual channel add cmd buffer
+ * @type: virtual channel protocol filter header type
+ *
+ * Matching the header type by comparing fltr and type's value.
+ *
+ * Return: true on success, and false on error.
+ */
+static bool
+ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
+ enum virtchnl_proto_hdr_type *type)
+{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
+ int i = 0;
+
+ while ((i < proto->count) &&
+ (*type == proto->proto_hdr[i].type) &&
+ (*type != VIRTCHNL_PROTO_HDR_NONE)) {
+ type++;
+ i++;
+ }
+
+ return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
+}
+
+/**
+ * ice_vc_fdir_get_pattern - get while list pattern
+ * @vf: pointer to the VF info
+ * @len: filter list length
+ *
+ * Return: pointer to allowed filter list
+ */
+static const struct virtchnl_fdir_pattern_match_item *
+ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
+{
+ const struct virtchnl_fdir_pattern_match_item *item;
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
+ sizeof(hw->active_pkg_name))) {
+ item = vc_fdir_pattern_comms;
+ *len = ARRAY_SIZE(vc_fdir_pattern_comms);
+ } else {
+ item = vc_fdir_pattern_os;
+ *len = ARRAY_SIZE(vc_fdir_pattern_os);
+ }
+
+ return item;
+}
+
+/**
+ * ice_vc_fdir_search_pattern
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ *
+ * Search for matched pattern from supported pattern list
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
+{
+ const struct virtchnl_fdir_pattern_match_item *pattern;
+ int len, i;
+
+ pattern = ice_vc_fdir_get_pattern(vf, &len);
+
+ for (i = 0; i < len; i++)
+ if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
+ return 0;
+
+ return -EINVAL;
+}
+
+/**
+ * ice_vc_fdir_parse_pattern
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's pattern and store them into conf
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
+ enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
+ enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_fdir_fltr *input = &conf->input;
+ int i;
+
+ if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
+ dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
+ proto->count, vf->vf_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < proto->count; i++) {
+ struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
+ struct ip_esp_hdr *esph;
+ struct ip_auth_hdr *ah;
+ struct sctphdr *sctph;
+ struct ipv6hdr *ip6h;
+ struct udphdr *udph;
+ struct tcphdr *tcph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ u8 s_field;
+ u8 *rawh;
+
+ switch (hdr->type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ eth = (struct ethhdr *)hdr->buffer;
+ input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
+
+ if (hdr->field_selector)
+ input->ext_data.ether_type = eth->h_proto;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ iph = (struct iphdr *)hdr->buffer;
+ l3 = VIRTCHNL_PROTO_HDR_IPV4;
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+
+ if (hdr->field_selector) {
+ input->ip.v4.src_ip = iph->saddr;
+ input->ip.v4.dst_ip = iph->daddr;
+ input->ip.v4.tos = iph->tos;
+ input->ip.v4.proto = iph->protocol;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ip6h = (struct ipv6hdr *)hdr->buffer;
+ l3 = VIRTCHNL_PROTO_HDR_IPV6;
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+
+ if (hdr->field_selector) {
+ memcpy(input->ip.v6.src_ip,
+ ip6h->saddr.in6_u.u6_addr8,
+ sizeof(ip6h->saddr));
+ memcpy(input->ip.v6.dst_ip,
+ ip6h->daddr.in6_u.u6_addr8,
+ sizeof(ip6h->daddr));
+ input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
+ (ip6h->flow_lbl[0] >> 4);
+ input->ip.v6.proto = ip6h->nexthdr;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ tcph = (struct tcphdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
+ input->ip.v4.src_port = tcph->source;
+ input->ip.v4.dst_port = tcph->dest;
+ } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
+ input->ip.v6.src_port = tcph->source;
+ input->ip.v6.dst_port = tcph->dest;
+ }
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ udph = (struct udphdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
+ input->ip.v4.src_port = udph->source;
+ input->ip.v4.dst_port = udph->dest;
+ } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
+ input->ip.v6.src_port = udph->source;
+ input->ip.v6.dst_port = udph->dest;
+ }
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ sctph = (struct sctphdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
+ input->ip.v4.src_port = sctph->source;
+ input->ip.v4.dst_port = sctph->dest;
+ } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
+ input->ip.v6.src_port = sctph->source;
+ input->ip.v6.dst_port = sctph->dest;
+ }
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
+
+ if (hdr->field_selector)
+ input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
+ break;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ esph = (struct ip_esp_hdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
+ l4 == VIRTCHNL_PROTO_HDR_UDP)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
+ l4 == VIRTCHNL_PROTO_HDR_UDP)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
+ l4 == VIRTCHNL_PROTO_HDR_NONE)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
+ l4 == VIRTCHNL_PROTO_HDR_NONE)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
+
+ if (l4 == VIRTCHNL_PROTO_HDR_UDP)
+ conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
+ else
+ conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->ip.v4.sec_parm_idx = esph->spi;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->ip.v6.sec_parm_idx = esph->spi;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_AH:
+ ah = (struct ip_auth_hdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->ip.v4.sec_parm_idx = ah->spi;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->ip.v6.sec_parm_idx = ah->spi;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ rawh = (u8 *)hdr->buffer;
+ s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ rawh = (u8 *)hdr->buffer;
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
+
+ if (hdr->field_selector)
+ input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
+ conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ rawh = (u8 *)hdr->buffer;
+
+ if (hdr->field_selector)
+ input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
+ conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
+ break;
+ default:
+ dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
+ hdr->type, vf->vf_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_parse_action
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's action and store them into conf
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_fdir_fltr *input = &conf->input;
+ u32 dest_num = 0;
+ u32 mark_num = 0;
+ int i;
+
+ if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
+ dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
+ as->count, vf->vf_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < as->count; i++) {
+ struct virtchnl_filter_action *action = &as->actions[i];
+
+ switch (action->type) {
+ case VIRTCHNL_ACTION_PASSTHRU:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
+ break;
+ case VIRTCHNL_ACTION_DROP:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ case VIRTCHNL_ACTION_QUEUE:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ input->q_index = action->act_conf.queue.index;
+ break;
+ case VIRTCHNL_ACTION_Q_REGION:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
+ input->q_index = action->act_conf.queue.index;
+ input->q_region = action->act_conf.queue.region;
+ break;
+ case VIRTCHNL_ACTION_MARK:
+ mark_num++;
+ input->fltr_id = action->act_conf.mark_id;
+ input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
+ break;
+ default:
+ dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
+ action->type, vf->vf_id);
+ return -EINVAL;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ dev_dbg(dev, "Invalid destination action for VF %d\n",
+ vf->vf_id);
+ return -EINVAL;
+ }
+
+ if (mark_num >= 2) {
+ dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_fdir_fltr - validate the virtual channel filter
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ int ret;
+
+ ret = ice_vc_fdir_search_pattern(vf, fltr);
+ if (ret)
+ return ret;
+
+ ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
+ if (ret)
+ return ret;
+
+ return ice_vc_fdir_parse_action(vf, fltr, conf);
+}
+
+/**
+ * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
+ * @conf_a: FDIR configuration for filter a
+ * @conf_b: FDIR configuration for filter b
+ *
+ * Return: 0 on success, and other on error.
+ */
+static bool
+ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
+ struct virtchnl_fdir_fltr_conf *conf_b)
+{
+ struct ice_fdir_fltr *a = &conf_a->input;
+ struct ice_fdir_fltr *b = &conf_b->input;
+
+ if (conf_a->ttype != conf_b->ttype)
+ return false;
+ if (a->flow_type != b->flow_type)
+ return false;
+ if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
+ return false;
+ if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
+ return false;
+ if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
+ return false;
+ if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
+ return false;
+ if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
+ return false;
+ if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
+ return false;
+ if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
+ return false;
+ if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_fdir_is_dup_fltr
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ *
+ * Check if there is duplicated rule with same conf value
+ *
+ * Return: 0 true success, and false on error.
+ */
+static bool
+ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct ice_fdir_fltr *desc;
+ bool ret;
+
+ list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
+ struct virtchnl_fdir_fltr_conf *node =
+ to_fltr_conf_from_desc(desc);
+
+ ret = ice_vc_fdir_comp_rules(node, conf);
+ if (ret)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_vc_fdir_insert_entry
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @id: pointer to ID value allocated by driver
+ *
+ * Insert FDIR conf entry into list and allocate ID for this filter
+ *
+ * Return: 0 true success, and other on error.
+ */
+static int
+ice_vc_fdir_insert_entry(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf, u32 *id)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+ int i;
+
+ /* alloc ID corresponding with conf */
+ i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
+ ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
+ if (i < 0)
+ return -EINVAL;
+ *id = i;
+
+ list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @id: filter rule's ID
+ */
+static void
+ice_vc_fdir_remove_entry(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf, u32 id)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+
+ idr_remove(&vf->fdir.fdir_rule_idr, id);
+ list_del(&input->fltr_node);
+}
+
+/**
+ * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
+ * @vf: pointer to the VF info
+ * @id: filter rule's ID
+ *
+ * Return: NULL on error, and other on success.
+ */
+static struct virtchnl_fdir_fltr_conf *
+ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
+{
+ return idr_find(&vf->fdir.fdir_rule_idr, id);
+}
+
+/**
+ * ice_vc_fdir_flush_entry - remove all FDIR conf entry
+ * @vf: pointer to the VF info
+ */
+static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
+{
+ struct virtchnl_fdir_fltr_conf *conf;
+ struct ice_fdir_fltr *desc, *temp;
+
+ list_for_each_entry_safe(desc, temp,
+ &vf->fdir.fdir_rule_list, fltr_node) {
+ conf = to_fltr_conf_from_desc(desc);
+ list_del(&desc->fltr_node);
+ devm_kfree(ice_pf_to_dev(vf->pf), conf);
+ }
+}
+
+/**
+ * ice_vc_fdir_write_fltr - write filter rule into hardware
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @add: true implies add rule, false implies del rules
+ * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ bool add, bool is_tun)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+ struct ice_vsi *vsi, *ctrl_vsi;
+ struct ice_fltr_desc desc;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int ret;
+ u8 *pkt;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ input->dest_vsi = vsi->idx;
+ input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
+ vf->vf_id, input->flow_type);
+ goto err_free_pkt;
+ }
+
+ ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
+ if (ret)
+ goto err_free_pkt;
+
+ return 0;
+
+err_free_pkt:
+ devm_kfree(dev, pkt);
+ return ret;
+}
+
+/**
+ * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
+ * @t: pointer to timer_list
+ */
+static void ice_vf_fdir_timer(struct timer_list *t)
+{
+ struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
+ struct ice_vf_fdir_ctx *ctx_done;
+ struct ice_vf_fdir *fdir;
+ unsigned long flags;
+ struct ice_vf *vf;
+ struct ice_pf *pf;
+
+ fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
+ vf = container_of(fdir, struct ice_vf, fdir);
+ ctx_done = &fdir->ctx_done;
+ pf = vf->pf;
+ spin_lock_irqsave(&fdir->ctx_lock, flags);
+ if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
+
+ ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
+ ctx_done->conf = ctx_irq->conf;
+ ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
+ ctx_done->v_opcode = ctx_irq->v_opcode;
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+
+ set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
+ * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
+ * @ctrl_vsi: pointer to a VF's CTRL VSI
+ * @rx_desc: pointer to FDIR Rx queue descriptor
+ */
+void
+ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_pf *pf = ctrl_vsi->back;
+ struct ice_vf_fdir_ctx *ctx_done;
+ struct ice_vf_fdir_ctx *ctx_irq;
+ struct ice_vf_fdir *fdir;
+ unsigned long flags;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ vf = &pf->vf[ctrl_vsi->vf_id];
+
+ fdir = &vf->fdir;
+ ctx_done = &fdir->ctx_done;
+ ctx_irq = &fdir->ctx_irq;
+ dev = ice_pf_to_dev(pf);
+ spin_lock_irqsave(&fdir->ctx_lock, flags);
+ if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
+
+ ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
+ ctx_done->conf = ctx_irq->conf;
+ ctx_done->stat = ICE_FDIR_CTX_IRQ;
+ ctx_done->v_opcode = ctx_irq->v_opcode;
+ memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+
+ ret = del_timer(&ctx_irq->rx_tmr);
+ if (!ret)
+ dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
+
+ set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
+ * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
+ * @vf: pointer to the VF info
+ */
+static void ice_vf_fdir_dump_info(struct ice_vf *vf)
+{
+ struct ice_vsi *vf_vsi;
+ u32 fd_size, fd_cnt;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u16 vsi_num;
+
+ pf = vf->pf;
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
+
+ fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
+ fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
+ dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
+ vf->vf_id,
+ (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
+ (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
+ (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
+ (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
+}
+
+/**
+ * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
+ * @vf: pointer to the VF info
+ * @ctx: FDIR context info for post processing
+ * @status: virtchnl FDIR program status
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ enum virtchnl_fdir_prgm_status *status)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u32 stat_err, error, prog_id;
+ int ret;
+
+ stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
+ if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
+ ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
+ ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
+ if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
+ ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
+ dev_err(dev, "VF %d: Desc show add, but ctx not",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
+ ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
+ dev_err(dev, "VF %d: Desc show del, but ctx not",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
+ ICE_FXD_FLTR_WB_QW1_FAIL_S;
+ if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
+ if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
+ dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ } else {
+ dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
+ }
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
+ ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
+ if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
+ dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ *status = VIRTCHNL_FDIR_SUCCESS;
+
+ return 0;
+
+err_exit:
+ ice_vf_fdir_dump_info(vf);
+ return ret;
+}
+
+/**
+ * ice_vc_add_fdir_fltr_post
+ * @vf: pointer to the VF structure
+ * @ctx: FDIR context info for post processing
+ * @status: virtchnl FDIR program status
+ * @success: true implies success, false implies failure
+ *
+ * Post process for flow director add command. If success, then do post process
+ * and send back success msg by virtchnl. Otherwise, do context reversion and
+ * send back failure msg by virtchnl.
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ enum virtchnl_fdir_prgm_status status,
+ bool success)
+{
+ struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum virtchnl_status_code v_ret;
+ struct virtchnl_fdir_add *resp;
+ int ret, len, is_tun;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ len = sizeof(*resp);
+ resp = kzalloc(len, GFP_KERNEL);
+ if (!resp) {
+ len = 0;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
+ goto err_exit;
+ }
+
+ if (!success)
+ goto err_exit;
+
+ is_tun = 0;
+ resp->status = status;
+ resp->flow_id = conf->flow_id;
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+
+ dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
+ vf->vf_id, conf->flow_id,
+ (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
+ "add" : "del");
+ return ret;
+
+err_exit:
+ if (resp)
+ resp->status = status;
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ devm_kfree(dev, conf);
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+ return ret;
+}
+
+/**
+ * ice_vc_del_fdir_fltr_post
+ * @vf: pointer to the VF structure
+ * @ctx: FDIR context info for post processing
+ * @status: virtchnl FDIR program status
+ * @success: true implies success, false implies failure
+ *
+ * Post process for flow director del command. If success, then do post process
+ * and send back success msg by virtchnl. Otherwise, do context reversion and
+ * send back failure msg by virtchnl.
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ enum virtchnl_fdir_prgm_status status,
+ bool success)
+{
+ struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum virtchnl_status_code v_ret;
+ struct virtchnl_fdir_del *resp;
+ int ret, len, is_tun;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ len = sizeof(*resp);
+ resp = kzalloc(len, GFP_KERNEL);
+ if (!resp) {
+ len = 0;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
+ goto err_exit;
+ }
+
+ if (!success)
+ goto err_exit;
+
+ is_tun = 0;
+ resp->status = status;
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+
+ dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
+ vf->vf_id, conf->flow_id,
+ (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
+ "add" : "del");
+ devm_kfree(dev, conf);
+ return ret;
+
+err_exit:
+ if (resp)
+ resp->status = status;
+ if (success)
+ devm_kfree(dev, conf);
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+ return ret;
+}
+
+/**
+ * ice_flush_fdir_ctx
+ * @pf: pointer to the PF structure
+ *
+ * Flush all the pending event on ctx_done list and process them.
+ */
+void ice_flush_fdir_ctx(struct ice_pf *pf)
+{
+ int i;
+
+ if (!test_and_clear_bit(__ICE_FD_VF_FLUSH_CTX, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ struct device *dev = ice_pf_to_dev(pf);
+ enum virtchnl_fdir_prgm_status status;
+ struct ice_vf *vf = &pf->vf[i];
+ struct ice_vf_fdir_ctx *ctx;
+ unsigned long flags;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ if (vf->ctrl_vsi_idx == ICE_NO_VSI)
+ continue;
+
+ ctx = &vf->fdir.ctx_done;
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+
+ WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
+ if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
+ status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
+ dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vf_verify_rx_desc(vf, ctx, &status);
+ if (ret)
+ goto err_exit;
+
+ if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
+ ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
+ else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
+ ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
+ else
+ dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
+
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ continue;
+err_exit:
+ if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
+ ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
+ else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
+ ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
+ else
+ dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
+
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ }
+}
+
+/**
+ * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ * @v_opcode: virtual channel operation code
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_ops v_opcode)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vf_fdir_ctx *ctx;
+ unsigned long flags;
+
+ ctx = &vf->fdir.ctx_irq;
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
+ (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ dev_dbg(dev, "VF %d: Last request is still in progress\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ ctx->flags |= ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+
+ ctx->conf = conf;
+ ctx->v_opcode = v_opcode;
+ ctx->stat = ICE_FDIR_CTX_READY;
+ timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
+
+ mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
+ * @vf: pointer to the VF structure
+ *
+ * Return: 0 on success, and other on error.
+ */
+static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
+{
+ struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
+ unsigned long flags;
+
+ del_timer(&ctx->rx_tmr);
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+}
+
+/**
+ * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Return: 0 on success, and other on error.
+ */
+int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
+ struct virtchnl_fdir_add *stat = NULL;
+ struct virtchnl_fdir_fltr_conf *conf;
+ enum virtchnl_status_code v_ret;
+ struct device *dev;
+ struct ice_pf *pf;
+ int is_tun = 0;
+ int len = 0;
+ int ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vf_start_ctrl_vsi(vf);
+ if (ret && (ret != -EEXIST)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_exit;
+ }
+
+ stat = kzalloc(sizeof(*stat), GFP_KERNEL);
+ if (!stat) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ len = sizeof(*stat);
+ ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
+ dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
+ goto err_free_conf;
+ }
+
+ if (fltr->validate_only) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_SUCCESS;
+ devm_kfree(dev, conf);
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
+ v_ret, (u8 *)stat, len);
+ goto exit;
+ }
+
+ ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
+ dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_is_dup_fltr(vf, conf);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
+ dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
+ vf->vf_id);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_rem_entry;
+ }
+
+exit:
+ kfree(stat);
+ return ret;
+
+err_rem_entry:
+ ice_vc_fdir_clear_irq_ctx(vf);
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+err_free_conf:
+ devm_kfree(dev, conf);
+err_exit:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
+ (u8 *)stat, len);
+ kfree(stat);
+ return ret;
+}
+
+/**
+ * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Return: 0 on success, and other on error.
+ */
+int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
+ struct virtchnl_fdir_del *stat = NULL;
+ struct virtchnl_fdir_fltr_conf *conf;
+ enum virtchnl_status_code v_ret;
+ struct device *dev;
+ struct ice_pf *pf;
+ int is_tun = 0;
+ int len = 0;
+ int ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ stat = kzalloc(sizeof(*stat), GFP_KERNEL);
+ if (!stat) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ len = sizeof(*stat);
+
+ conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
+ if (!conf) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
+ dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
+ vf->vf_id, fltr->flow_id);
+ goto err_exit;
+ }
+
+ /* Just return failure when ctrl_vsi idx is invalid */
+ if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_del_tmr;
+ }
+
+ kfree(stat);
+
+ return ret;
+
+err_del_tmr:
+ ice_vc_fdir_clear_irq_ctx(vf);
+err_exit:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
+ (u8 *)stat, len);
+ kfree(stat);
+ return ret;
+}
+
+/**
+ * ice_vf_fdir_init - init FDIR resource for VF
+ * @vf: pointer to the VF info
+ */
+void ice_vf_fdir_init(struct ice_vf *vf)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+
+ idr_init(&fdir->fdir_rule_idr);
+ INIT_LIST_HEAD(&fdir->fdir_rule_list);
+
+ spin_lock_init(&fdir->ctx_lock);
+ fdir->ctx_irq.flags = 0;
+ fdir->ctx_done.flags = 0;
+}
+
+/**
+ * ice_vf_fdir_exit - destroy FDIR resource for VF
+ * @vf: pointer to the VF info
+ */
+void ice_vf_fdir_exit(struct ice_vf *vf)
+{
+ ice_vc_fdir_flush_entry(vf);
+ idr_destroy(&vf->fdir.fdir_rule_idr);
+ ice_vc_fdir_rem_prof_all(vf);
+ ice_vc_fdir_free_prof_all(vf);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
new file mode 100644
index 000000000000..f4e629f4c09b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_FDIR_H_
+#define _ICE_VIRTCHNL_FDIR_H_
+
+struct ice_vf;
+struct ice_pf;
+
+enum ice_fdir_ctx_stat {
+ ICE_FDIR_CTX_READY,
+ ICE_FDIR_CTX_IRQ,
+ ICE_FDIR_CTX_TIMEOUT,
+};
+
+struct ice_vf_fdir_ctx {
+ struct timer_list rx_tmr;
+ enum virtchnl_ops v_opcode;
+ enum ice_fdir_ctx_stat stat;
+ union ice_32b_rx_flex_desc rx_desc;
+#define ICE_VF_FDIR_CTX_VALID BIT(0)
+ u32 flags;
+
+ void *conf;
+};
+
+/* VF FDIR information structure */
+struct ice_vf_fdir {
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ struct ice_fd_hw_prof **fdir_prof;
+
+ struct idr fdir_rule_idr;
+ struct list_head fdir_rule_list;
+
+ spinlock_t ctx_lock; /* protects FDIR context info */
+ struct ice_vf_fdir_ctx ctx_irq;
+ struct ice_vf_fdir_ctx ctx_done;
+};
+
+#ifdef CONFIG_PCI_IOV
+int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg);
+int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg);
+void ice_vf_fdir_init(struct ice_vf *vf);
+void ice_vf_fdir_exit(struct ice_vf *vf);
+void
+ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
+ union ice_32b_rx_flex_desc *rx_desc);
+void ice_flush_fdir_ctx(struct ice_pf *pf);
+#else
+static inline void
+ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { }
+static inline void ice_flush_fdir_ctx(struct ice_pf *pf) { }
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VIRTCHNL_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 1f38a8d0c525..78679ece2e08 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -202,6 +202,25 @@ static void ice_vf_vsi_release(struct ice_vf *vf)
}
/**
+ * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
+ * @vf: VF that control VSI is being invalidated on
+ */
+static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
+ * @vf: VF that control VSI is being released on
+ */
+static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
+ ice_vf_ctrl_invalidate_vsi(vf);
+}
+
+/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -214,6 +233,10 @@ static void ice_free_vf_res(struct ice_vf *vf)
* accessing the VF's VSI after it's freed or invalidated.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_vf_fdir_exit(vf);
+ /* free VF control VSI */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
/* free VSI and disconnect it from the parent uplink */
if (vf->lan_vsi_idx != ICE_NO_VSI) {
@@ -560,6 +583,28 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
/**
+ * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
+ * @vf: VF to setup control VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
+ ice_vf_ctrl_invalidate_vsi(vf);
+ }
+
+ return vsi;
+}
+
+/**
* ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
* @pf: pointer to PF structure
* @vf: pointer to VF that the first MSIX vector index is being calculated for
@@ -1256,6 +1301,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
+ ice_vf_fdir_exit(vf);
+ /* clean VF control VSI when resetting VFs since it should be
+ * setup only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_invalidate_vsi(vf);
+
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
@@ -1374,6 +1426,13 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_err(dev, "disabling promiscuous mode failed\n");
}
+ ice_vf_fdir_exit(vf);
+ /* clean VF control VSI when resetting VF since it should be setup
+ * only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi_with_release(vf);
ice_vf_post_vsi_rebuild(vf);
@@ -1532,7 +1591,7 @@ teardown:
}
/**
- * ice_set_dflt_settings - set VF defaults during initialization/creation
+ * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
* @pf: PF holding reference to all VFs for default configuration
*/
static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
@@ -1549,6 +1608,12 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
vf->spoofchk = true;
vf->num_vf_qs = pf->num_qps_per_vf;
+
+ /* ctrl_vsi_idx will be set to a valid value only when VF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
}
}
@@ -1848,7 +1913,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*
* send msg to VF
*/
-static int
+int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
@@ -1996,6 +2061,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
@@ -2084,7 +2152,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
*
* check for the valid VSI ID
*/
-static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -3816,6 +3884,12 @@ error_handler:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
err = ice_vc_dis_vlan_stripping(vf);
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ err = ice_vc_add_fdir_fltr(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ err = ice_vc_del_fdir_fltr(vf, msg);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
@@ -4108,7 +4182,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
}
/**
- * ice_print_vfs_mdd_event - print VFs malicious driver detect event
+ * ice_print_vfs_mdd_events - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
* Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 0f519fba3770..46abc5388fc7 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -4,6 +4,7 @@
#ifndef _ICE_VIRTCHNL_PF_H_
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
+#include "ice_virtchnl_fdir.h"
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
@@ -70,6 +71,8 @@ struct ice_vf {
u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
+ u16 ctrl_vsi_idx;
+ struct ice_vf_fdir fdir;
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
@@ -138,6 +141,11 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 83f3c9574ed1..17ab8ef024ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -358,18 +358,18 @@ xsk_pool_if_up:
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
- * Returns false if all allocations were successful, true if any fail.
+ * Returns true if all allocations were successful, false if any fail.
*/
bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
struct ice_rx_buf *rx_buf;
- bool ret = false;
+ bool ok = true;
dma_addr_t dma;
if (!count)
- return false;
+ return true;
rx_desc = ICE_RX_DESC(rx_ring, ntu);
rx_buf = &rx_ring->rx_buf[ntu];
@@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
do {
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) {
- ret = true;
+ ok = false;
break;
}
@@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
ice_release_rx_desc(rx_ring, ntu);
}
- return ret;
+ return ok;
}
/**
@@ -473,6 +473,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ rcu_read_unlock();
+ return result;
+ }
+
switch (act) {
case XDP_PASS:
break;
@@ -480,10 +488,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
break;
- case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
- break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 5d87957b2627..44111f65afc7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2007 - 2018 Intel Corporation. */
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
+#ifndef _E1000_IGB_HW_H_
+#define _E1000_IGB_HW_H_
#include <linux/types.h>
#include <linux/delay.h>
@@ -551,4 +551,4 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
-#endif /* _E1000_HW_H_ */
+#endif /* _E1000_IGB_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 33cceb77e960..29383112bc19 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -441,7 +441,7 @@ out_no_read:
}
/**
- * e1000_init_mbx_params_pf - set initial values for pf mailbox
+ * igb_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 8c8eb82e6272..a018000f7db9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -836,6 +836,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
break;
case e1000_ms_auto:
data &= ~CR_1000T_MS_ENABLE;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index aaa954aae574..7bda8c5edea5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb);
+int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 28baf203459a..7545da216d8b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2347,35 +2347,23 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
IGB_TEST_LEN*ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
- memcpy(p, igb_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
- memcpy(p, igb_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ igb_gstrings_stats[i].stat_string);
+ for (i = 0; i < IGB_NETDEV_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ igb_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_restart", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_restart", i);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_drops", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_csum_err", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_alloc_failed", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_drops", i);
+ ethtool_sprintf(&p, "rx_queue_%u_csum_err", i);
+ ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i);
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
@@ -3022,6 +3010,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
break;
case ETHTOOL_SRXCLSRLDEL:
ret = igb_del_ethtool_nfc_entry(adapter, cmd);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cb0d07ff2492..c9e8c65a3cfe 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2037,7 +2037,7 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
- * Detect and switch function for Media Auto Sense
+ * igb_check_swap_media - Detect and switch function for Media Auto Sense
* @adapter: address of the board private structure
**/
static void igb_check_swap_media(struct igb_adapter *adapter)
@@ -3114,7 +3114,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
return 0;
/* Initialize the i2c bus which is controlled by the registers.
- * This bus will use the i2c_algo_bit structue that implements
+ * This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
*/
adapter->i2c_adap.owner = THIS_MODULE;
@@ -4019,7 +4019,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
}
/**
- * igb_open - Called when a network interface is made active
+ * __igb_open - Called when a network interface is made active
* @netdev: network interface device structure
* @resuming: indicates whether we are in a resume call
*
@@ -4137,7 +4137,7 @@ int igb_open(struct net_device *netdev)
}
/**
- * igb_close - Disables a network interface
+ * __igb_close - Disables a network interface
* @netdev: network interface device structure
* @suspending: indicates we are in a suspend call
*
@@ -5855,7 +5855,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
*/
if (tx_ring->launchtime_enable) {
ts = ktime_to_timespec64(first->skb->tstamp);
- first->skb->tstamp = ktime_set(0, 0);
+ skb_txtime_consumed(first->skb);
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->seqnum_seed = 0;
@@ -8213,7 +8213,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+ int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -8224,7 +8225,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define IGB_LAST_OFFSET \
@@ -8300,9 +8301,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
- xdp->data += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
+ if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
+ xdp->data += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
}
/* Determine available headroom for copy */
@@ -8363,8 +8365,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
- __skb_pull(skb, IGB_TS_HDR_LEN);
+ if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
+ __skb_pull(skb, IGB_TS_HDR_LEN);
}
/* update buffer offset */
@@ -8613,11 +8615,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
}
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
- const unsigned int size)
+ const unsigned int size, int *rx_buf_pgcnt)
{
struct igb_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -8633,9 +8641,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
}
static void igb_put_rx_buffer(struct igb_ring *rx_ring,
- struct igb_rx_buffer *rx_buffer)
+ struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
{
- if (igb_can_reuse_rx_page(rx_buffer)) {
+ if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -8663,6 +8671,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
u32 frame_sz = 0;
+ int rx_buf_pgcnt;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -8692,7 +8701,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
*/
dma_rmb();
- rx_buffer = igb_get_rx_buffer(rx_ring, size);
+ rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -8735,7 +8744,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
break;
}
- igb_put_rx_buffer(rx_ring, rx_buffer);
+ igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7cc5428c3b3d..ba61fe9bfaf4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
dev_kfree_skb_any(skb);
}
+#define IGB_RET_PTP_DISABLED 1
+#define IGB_RET_PTP_INVALID 2
+
/**
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
@@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
- * byte 8.
+ * byte 8
+ *
+ * Returns: 0 if success, nonzero if failure
**/
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb)
+int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ struct sk_buff *skb)
{
- __le64 *regval = (__le64 *)va;
struct igb_adapter *adapter = q_vector->adapter;
+ __le64 *regval = (__le64 *)va;
int adjust = 0;
+ if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
+ return IGB_RET_PTP_DISABLED;
+
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
+
+ /* check reserved dwords are zero, be/le doesn't matter for zero */
+ if (regval[0])
+ return IGB_RET_PTP_INVALID;
+
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
@@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
}
skb_hwtstamps(skb)->hwtstamp =
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
+ return 0;
}
/**
@@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
* This function is meant to retrieve a timestamp from the internal registers
* of the adapter and store it in the skb.
**/
-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
- struct sk_buff *skb)
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
- u64 regval;
int adjust = 0;
+ u64 regval;
+
+ if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
+ return;
/* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -1008,6 +1025,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
+ break;
case HWTSTAMP_TX_ON:
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 5d2809dfd06a..1b08a7dc7bc4 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -547,7 +547,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb);
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index b909f00a79e6..35ed997af075 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -441,11 +441,6 @@
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 824a6c454bca..8722294ab90c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1711,6 +1711,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
Autoneg);
}
+ /* Set pause flow control settings */
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
switch (hw->fc.requested_mode) {
case igc_fc_full:
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
@@ -1725,9 +1728,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
Asym_Pause);
break;
default:
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Asym_Pause);
+ break;
}
status = pm_runtime_suspended(&adapter->pdev->dev) ?
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 7ec04e48860c..cc83bb5c15e8 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -6,7 +6,7 @@
#include "igc_hw.h"
/**
- * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
+ * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Acquire the necessary semaphores for exclusive access to the EEPROM.
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 7ac9597ddb84..baa45a1f3a65 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -941,7 +941,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
ktime_t txtime = first->skb->tstamp;
- first->skb->tstamp = ktime_set(0, 0);
+ skb_txtime_consumed(first->skb);
context_desc->launch_time = igc_tx_launchtime(adapter,
txtime);
} else {
@@ -3580,7 +3580,7 @@ void igc_up(struct igc_adapter *adapter)
netif_tx_start_all_queues(adapter->netdev);
/* start the watchdog. */
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
schedule_work(&adapter->watchdog_task);
}
@@ -3831,10 +3831,19 @@ static void igc_reset_task(struct work_struct *work)
adapter = container_of(work, struct igc_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGC_DOWN, &adapter->state) ||
+ test_bit(__IGC_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igc_rings_dump(adapter);
igc_regs_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igc_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -4000,7 +4009,7 @@ static irqreturn_t igc_msix_other(int irq, void *data)
}
if (icr & IGC_ICR_LSC) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -4378,7 +4387,7 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
}
if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
@@ -4420,7 +4429,7 @@ static irqreturn_t igc_intr(int irq, void *data)
}
if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -4574,7 +4583,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
netif_tx_start_all_queues(netdev);
/* start the watchdog. */
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
schedule_work(&adapter->watchdog_task);
return IGC_SUCCESS;
@@ -4915,7 +4924,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
{
struct igc_mac_info *mac = &adapter->hw.mac;
- mac->autoneg = 0;
+ mac->autoneg = false;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
* for the switch() below to work
@@ -4937,13 +4946,13 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
mac->forced_speed_duplex = ADVERTISE_100_FULL;
break;
case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
+ mac->autoneg = true;
adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
goto err_inval;
case SPEED_2500 + DUPLEX_FULL:
- mac->autoneg = 1;
+ mac->autoneg = true;
adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
break;
case SPEED_2500 + DUPLEX_HALF: /* not supported */
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index ac0b9c85da7c..545f4d0e67cf 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
}
/**
- * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
* @skb: Buffer containing timestamp and packet
*
- * This function is meant to retrieve the first timestamp from the
- * first buffer of an incoming frame. The value is stored in little
- * endian format starting on byte 0. There's a second timestamp
- * starting on byte 8.
- **/
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ * This function retrieves the timestamp saved in the beginning of packet
+ * buffer. While two timestamps are available, one in timer0 reference and the
+ * other in timer1 reference, this function considers only the timestamp in
+ * timer0 reference.
+ */
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb)
{
struct igc_adapter *adapter = q_vector->adapter;
- __le64 *regval = (__le64 *)va;
- int adjust = 0;
-
- /* The timestamp is recorded in little endian format.
- * DWORD: | 0 | 1 | 2 | 3
- * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
+ u64 regval;
+ int adjust;
+
+ /* Timestamps are saved in little endian at the beginning of the packet
+ * buffer following the layout:
+ *
+ * DWORD: | 0 | 1 | 2 | 3 |
+ * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
+ *
+ * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
+ * part of the timestamp.
*/
- igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
- le64_to_cpu(regval[0]));
-
- /* adjust timestamp for the RX latency based on link speed */
- if (adapter->hw.mac.type == igc_i225) {
- switch (adapter->link_speed) {
- case SPEED_10:
- adjust = IGC_I225_RX_LATENCY_10;
- break;
- case SPEED_100:
- adjust = IGC_I225_RX_LATENCY_100;
- break;
- case SPEED_1000:
- adjust = IGC_I225_RX_LATENCY_1000;
- break;
- case SPEED_2500:
- adjust = IGC_I225_RX_LATENCY_2500;
- break;
- }
+ regval = le32_to_cpu(va[2]);
+ regval |= (u64)le32_to_cpu(va[3]) << 32;
+ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+ /* Adjust timestamp for the RX latency based on link speed */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGC_I225_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGC_I225_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGC_I225_RX_LATENCY_1000;
+ break;
+ case SPEED_2500:
+ adjust = IGC_I225_RX_LATENCY_2500;
+ break;
+ default:
+ adjust = 0;
+ netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
+ break;
}
skb_hwtstamps(skb)->hwtstamp =
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 8d3798a32f0e..e324e42fab2d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1351,7 +1351,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
}
/**
- * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
* @input: unique input dword
* @common: compressed common input dword
@@ -1542,6 +1542,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (input_mask->formatted.vm_pool & 0x7F) {
case 0x0:
fdirm |= IXGBE_FDIRM_POOL;
+ break;
case 0x7F:
break;
default:
@@ -1557,6 +1558,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
hw_dbg(hw, " Error on src/dst port mask\n");
return IXGBE_ERR_CONFIG;
}
+ break;
case IXGBE_ATR_L4TYPE_MASK:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 62ddb452f862..03ccbe6b66d2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -93,6 +93,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
default:
break;
}
+ break;
default:
break;
}
@@ -2707,7 +2708,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_enable_rx_buff - Enables the receive data path
+ * ixgbe_enable_rx_buff_generic - Enables the receive data path
* @hw: pointer to hardware structure
*
* Enables the receive data path
@@ -3029,14 +3030,14 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * ixgbe_set_vmdq_san_mac_generic - Associate VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ *
* This function should only be involved in the IOV mode.
* In IOV mode, Default pool is next pool after the number of
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
- *
- * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @vmdq: VMDq pool index
**/
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
@@ -3896,7 +3897,7 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
}
/**
- * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Returns the thermal sensor data structure
@@ -4054,8 +4055,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_oem_prod_version Etrack ID from EEPROM
- *
+ * ixgbe_get_oem_prod_version - Etrack ID from EEPROM
* @hw: pointer to hardware structure
* @nvm_ver: pointer to output structure
*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index a280aa34ca1d..4ceaca0f6ce3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1368,45 +1368,33 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- char *p = (char *)data;
unsigned int i;
+ u8 *p = data;
switch (stringset) {
case ETH_SS_TEST:
- for (i = 0; i < IXGBE_TEST_LEN; i++) {
- memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_TEST_LEN; i++)
+ ethtool_sprintf(&p, ixgbe_gstrings_test[i]);
break;
case ETH_SS_STATS:
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ ixgbe_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
- sprintf(p, "tx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_pb_%u_pxon", i);
+ ethtool_sprintf(&p, "tx_pb_%u_pxoff", i);
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
- sprintf(p, "rx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_pb_%u_pxon", i);
+ ethtool_sprintf(&p, "rx_pb_%u_pxoff", i);
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index df389a11d3af..0218f6c9b925 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -132,6 +132,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
else
*tx = (tc + 4) << 4; /* 96, 112 */
}
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 730c28a1a204..7ba1c2985ef7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -225,7 +225,7 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
+ * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent
* @hw: hw specific details
*
* This function is used by probe to determine whether a device's PCI-Express
@@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
+ ring->rx_offset = ixgbe_rx_offset(ring);
+
if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
@@ -6156,7 +6158,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_eee_capable - helper function to determine EEE support on X550
+ * ixgbe_set_eee_capable - helper function to determine EEE support on X550
* @adapter: board private structure
*/
static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
@@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
- rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
/* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index fc389eecdd2b..73bc170d1ae9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -461,12 +461,13 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
- * the SWFW lock
+ * ixgbe_read_phy_reg_mdi - read PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
* @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register without the SWFW lock
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 *phy_data)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 22a874eee2e8..23ddfd79fc8b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -999,6 +999,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
+ break;
case HWTSTAMP_TX_ON:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 4b93ba149ec5..d5cfb51ff648 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -701,7 +701,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
- * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
* @hw: pointer to hardware structure
*
* This function clears hardware semaphore bits.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 5e339afa682a..9724ffb16518 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1248,7 +1248,7 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_fw_recovery_mode - Check FW NVM recovery mode
+ * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
* @hw: pointer t hardware structure
*
* Returns true if in FW NVM recovery mode.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 3771857cf887..91ad5b902673 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -104,6 +104,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ rcu_read_unlock();
+ return result;
+ }
+
switch (act) {
case XDP_PASS:
break;
@@ -115,10 +122,6 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
}
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
break;
- case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
- break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 449d7d5b280d..ba2ed8a43d2d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2633,6 +2633,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss;
adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index bfe6dfcec4ab..5fc347abab3c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -121,9 +121,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
}
/**
+ * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
+ * @hw: pointer to private hardware struct
+ *
* Hyper-V variant; the VF/PF communication is through the PCI
* config space.
- * @hw: pointer to private hardware struct
*/
static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
{
@@ -513,9 +515,11 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
- * Hyper-V variant - just a stub.
+ * ixgbevf_hv_update_mc_addr_list_vf - stub
* @hw: unused
* @netdev: unused
+ *
+ * Hyper-V variant - just a stub.
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
@@ -564,9 +568,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
}
/**
- * Hyper-V variant - just a stub.
+ * ixgbevf_hv_update_xcast_mode - stub
* @hw: unused
* @xcast_mode: unused
+ *
+ * Hyper-V variant - just a stub.
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
@@ -608,7 +614,7 @@ mbx_err:
}
/**
- * Hyper-V variant - just a stub.
+ * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
* @hw: unused
* @vlan: unused
* @vind: unused
@@ -726,11 +732,13 @@ out:
}
/**
- * Hyper-V variant; there is no mailbox communication.
+ * ixgbevf_hv_check_mac_link_vf - check link
* @hw: pointer to private hardware struct
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
* @autoneg_wait_to_complete: unused
+ *
+ * Hyper-V variant; there is no mailbox communication.
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,