summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/google
diff options
context:
space:
mode:
authorShailend Chand <shailend@google.com>2024-05-02 02:25:44 +0300
committerDavid S. Miller <davem@davemloft.net>2024-05-05 16:35:34 +0300
commit864616d97a4505a719d5f67c29d31776038d39ef (patch)
tree46edda4c0c5b33f89f0d3d6ddba8963f41a3cd33 /drivers/net/ethernet/google
parent5abc37bdcbc5a32f7c5cb21f5c0334cbf0e81752 (diff)
downloadlinux-864616d97a4505a719d5f67c29d31776038d39ef.tar.xz
gve: Make gve_turnup work for nonempty queues
gVNIC has a requirement that all queues have to be quiesced before any queue is operated on (created or destroyed). To enable the implementation of future ndo hooks that work on a single queue, we need to evolve gve_turnup to account for queues already having some unprocessed descriptors in the ring. Say rxq 4 is being stopped and started via the queue api. Due to gve's requirement of quiescence, queues 0 through 3 are not processing their rings while queue 4 is being toggled. Once they are made live, these queues need to be poked to cause them to check their rings for descriptors that were written during their brief period of quiescence. Tested-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com> Signed-off-by: Shailend Chand <shailend@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google')
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 469a914c71d6..ef902b72b9a9 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1979,6 +1979,13 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->tx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -1994,6 +2001,13 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->rx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
gve_set_napi_enabled(priv);