summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLarysa Zaremba <larysa.zaremba@intel.com>2024-06-04 00:42:34 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-06-21 15:40:07 +0300
commit2e3325ea08f05ffe2000c711ce6956b68d4a747d (patch)
treee897af8fc2d4df57d1ec2b0d6914361ec3969430 /drivers
parent4dbe27806893134f4aa4463236803de76ba13b2e (diff)
downloadlinux-2e3325ea08f05ffe2000c711ce6956b68d4a747d.tar.xz
ice: map XDP queues to vectors in ice_vsi_map_rings_to_vectors()
[ Upstream commit f3df4044254c98128890b512bf19cc05588f1fe5 ] ice_pf_dcb_recfg() re-maps queues to vectors with ice_vsi_map_rings_to_vectors(), which does not restore the previous state for XDP queues. This leads to no AF_XDP traffic after rebuild. Map XDP queues to vectors in ice_vsi_map_rings_to_vectors(). Also, move the code around, so XDP queues are mapped independently only through .ndo_bpf(). Fixes: 6624e780a577 ("ice: split ice_vsi_setup into smaller functions") Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com> Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com> Reviewed-by: Simon Horman <horms@kernel.org> Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-5-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c96
4 files changed, 68 insertions, 46 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a3286964d6c3..8e40f26aa506 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -942,6 +942,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type);
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a545a7917e4f..9d23a436d2a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -860,6 +860,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
rx_rings_rem -= rx_rings_per_v;
}
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_map_xdp_rings(vsi);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5de7c50b439e..acf732ce04ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2323,13 +2323,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
if (ret)
goto unroll_vector_base;
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi);
-
- vsi->stat_offsets_loaded = false;
-
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
@@ -2340,6 +2333,13 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
}
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
+ vsi->stat_offsets_loaded = false;
+
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b53fe27dbed7..10fef2e726b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2670,6 +2670,60 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *ring;
+
+ if (static_key_enabled(&ice_xdp_locking_key))
+ return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+ q_vector = vsi->rx_rings[qid]->q_vector;
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (ice_ring_is_xdp(ring))
+ return ring;
+
+ return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ int v_idx, q_idx;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ ice_for_each_rxq(vsi, q_idx) {
+ vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+ q_idx);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
@@ -2682,7 +2736,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -2695,8 +2748,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
- int i, v_idx;
- int status;
+ int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2715,42 +2767,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
- /* follow the logic from ice_vsi_map_rings_to_vectors */
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- int xdp_rings_per_v, q_id, q_base;
-
- xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
- vsi->num_q_vectors - v_idx);
- q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
- xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.tx_ring;
- q_vector->tx.tx_ring = xdp_ring;
- }
- xdp_rings_rem -= xdp_rings_per_v;
- }
-
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key)) {
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- } else {
- struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx) {
- if (ice_ring_is_xdp(ring)) {
- vsi->rx_rings[i]->xdp_ring = ring;
- break;
- }
- }
- }
- ice_tx_xsk_pool(vsi, i);
- }
-
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -2758,6 +2774,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (cfg_type == ICE_XDP_CFG_PART)
return 0;
+ ice_map_xdp_rings(vsi);
+
/* tell the Tx scheduler that right now we have
* additional queues
*/