summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/google/gve/gve_main.c
diff options
context:
space:
mode:
authorRushil Gupta <rushilg@google.com>2023-08-05 00:34:41 +0300
committerDavid S. Miller <davem@davemloft.net>2023-08-06 10:34:36 +0300
commit66ce8e6b49df401854f0c98bed50a65e4167825b (patch)
treee272500da4e99e2466ca0c2d4e9e0466e77f1637 /drivers/net/ethernet/google/gve/gve_main.c
parent16fd753995f740fb968edaf5fd57ffb96020102e (diff)
downloadlinux-66ce8e6b49df401854f0c98bed50a65e4167825b.tar.xz
gve: Control path for DQO-QPL
GVE supports QPL ("queue-page-list") mode where all data is communicated through a set of pre-registered pages. Adding this mode to DQO descriptor format. Add checks, abi-changes and device options to support QPL mode for DQO in addition to GQI. Also, use pages-per-qpl supplied by device-option to control the size of the "queue-page-list". Signed-off-by: Rushil Gupta <rushilg@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com> Signed-off-by: Bailey Forrest <bcf@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve_main.c')
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e6f1711d9be0..5704b5f57cd0 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -31,7 +31,6 @@
// Minimum amount of time between queue kicks in msec (10 seconds)
#define MIN_TX_TIMEOUT_GAP (1000 * 10)
-#define DQO_TX_MAX 0x3FFFF
char gve_driver_name[] = "gve";
const char gve_version_str[] = GVE_VERSION;
@@ -494,7 +493,7 @@ static int gve_setup_device_resources(struct gve_priv *priv)
goto abort_with_stats_report;
}
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (!gve_is_gqi(priv)) {
priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
GFP_KERNEL);
if (!priv->ptype_lut_dqo) {
@@ -1083,11 +1082,12 @@ free_qpls:
static int gve_alloc_qpls(struct gve_priv *priv)
{
int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int page_count;
int start_id;
int i, j;
int err;
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ if (!gve_is_qpl(priv))
return 0;
priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
@@ -1095,17 +1095,25 @@ static int gve_alloc_qpls(struct gve_priv *priv)
return -ENOMEM;
start_id = gve_tx_start_qpl_id(priv);
+ page_count = priv->tx_pages_per_qpl;
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
- priv->tx_pages_per_qpl);
+ page_count);
if (err)
goto free_qpls;
}
start_id = gve_rx_start_qpl_id(priv);
+
+ /* For GQI_QPL number of pages allocated have 1:1 relationship with
+ * number of descriptors. For DQO, number of pages required are
+ * more than descriptors (because of out of order completions).
+ */
+ page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ?
+ priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
- priv->rx_data_slot_cnt);
+ page_count);
if (err)
goto free_qpls;
}
@@ -2051,7 +2059,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
/* Big TCP is only supported on DQ*/
if (!gve_is_gqi(priv))
- netif_set_tso_max_size(priv->dev, DQO_TX_MAX);
+ netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
priv->num_registered_pages = 0;
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;