summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/google/gve/gve.h35
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c146
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h48
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c85
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c30
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c4
8 files changed, 235 insertions, 122 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 4814c96d5fe7..e97633b68e25 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -50,6 +50,10 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
+/* Default minimum ring size */
+#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
+#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
+
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_MAX_RX_BUFFER_SIZE 4096
@@ -63,7 +67,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
-#define DQO_QPL_DEFAULT_RX_PAGES 2048
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
@@ -621,11 +624,6 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
};
-struct gve_options_dqo_rda {
- u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
- u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
-};
-
struct gve_irq_db {
__be32 index;
} ____cacheline_aligned;
@@ -718,9 +716,13 @@ struct gve_priv {
u16 num_event_counters;
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
+ u16 max_tx_desc_cnt;
+ u16 max_rx_desc_cnt;
+ u16 min_tx_desc_cnt;
+ u16 min_rx_desc_cnt;
+ bool modify_ring_size_enabled;
+ bool default_min_ring_size;
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
- u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
- u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
@@ -792,7 +794,6 @@ struct gve_priv {
u64 link_speed;
bool up_before_suspend; /* True if dev was up before suspend */
- struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
@@ -1044,6 +1045,14 @@ static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
return gve_get_rx_qpl_id(tx_cfg, 0);
}
+static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
+{
+ /* For DQO, page count should be more than ring size for
+ * out-of-order completions. Set it to two times of ring size.
+ */
+ return 2 * rx_desc_cnt;
+}
+
/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
@@ -1150,6 +1159,14 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index ae12ac38e18b..b2b619aa2310 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -32,6 +32,8 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
}
+#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8
+
static
void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_descriptor *device_descriptor,
@@ -41,7 +43,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -165,6 +168,27 @@ void gve_parse_device_option(struct gve_priv *priv,
"Buffer Sizes");
*dev_op_buffer_sizes = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_MODIFY_RING:
+ if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Modify Ring", (int)sizeof(**dev_op_modify_ring),
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_modify_ring)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
+ }
+
+ *dev_op_modify_ring = (void *)(option + 1);
+
+ /* device has not provided min ring size */
+ if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
+ priv->default_min_ring_size = true;
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -183,7 +207,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -204,7 +229,8 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl, dev_op_buffer_sizes);
+ dev_op_dqo_qpl, dev_op_buffer_sizes,
+ dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -565,6 +591,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
+ .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
};
if (gve_is_gqi(priv)) {
@@ -573,24 +600,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
- u16 comp_ring_size;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- comp_ring_size =
- priv->options_dqo_rda.tx_comp_ring_entries;
- } else {
+ else
qpl_id = tx->dqo.qpl->id;
- comp_ring_size = priv->tx_desc_cnt;
- }
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_tx_queue.tx_ring_size =
- cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size =
- cpu_to_be16(comp_ring_size);
+ cpu_to_be16(priv->tx_desc_cnt);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -621,6 +641,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
+ .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
};
if (gve_is_gqi(priv)) {
@@ -635,20 +656,13 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
- u16 rx_buff_ring_entries;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- rx_buff_ring_entries =
- priv->options_dqo_rda.rx_buff_ring_entries;
- } else {
+ else
qpl_id = rx->dqo.qpl->id;
- rx_buff_ring_entries = priv->rx_desc_cnt;
- }
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.rx_ring_size =
- cpu_to_be16(priv->rx_desc_cnt);
cmd.create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus);
cmd.create_rx_queue.rx_data_ring_addr =
@@ -656,7 +670,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo);
cmd.create_rx_queue.rx_buff_ring_size =
- cpu_to_be16(rx_buff_ring_entries);
+ cpu_to_be16(priv->rx_desc_cnt);
cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled)
@@ -745,31 +759,17 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
return gve_adminq_kick_and_wait(priv);
}
-static int gve_set_desc_cnt(struct gve_priv *priv,
- struct gve_device_descriptor *descriptor)
+static void gve_set_default_desc_cnt(struct gve_priv *priv,
+ const struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- return 0;
-}
-static int
-gve_set_desc_cnt_dqo(struct gve_priv *priv,
- const struct gve_device_descriptor *descriptor,
- const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
-{
- priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
-
- if (priv->queue_format == GVE_DQO_QPL_FORMAT)
- return 0;
-
- priv->options_dqo_rda.tx_comp_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
- priv->options_dqo_rda.rx_buff_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
-
- return 0;
+ /* set default ranges */
+ priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+ priv->min_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
static void gve_enable_supported_features(struct gve_priv *priv,
@@ -779,7 +779,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_dqo_qpl
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
- *dev_op_buffer_sizes)
+ *dev_op_buffer_sizes,
+ const struct gve_device_option_modify_ring
+ *dev_op_modify_ring)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -796,12 +798,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
- priv->rx_pages_per_qpl =
- be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
- if (priv->rx_pages_per_qpl == 0)
- priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
if (dev_op_buffer_sizes &&
@@ -814,12 +812,33 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
}
+
+ /* Read and store ring size ranges given by device */
+ if (dev_op_modify_ring &&
+ (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+ priv->modify_ring_size_enabled = true;
+
+ /* max ring size for DQO QPL should not be overwritten because of device limit */
+ if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
+ priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
+ priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
+ }
+ if (priv->default_min_ring_size) {
+ /* If device hasn't provided minimums, use default minimums */
+ priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+ priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+ } else {
+ priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
+ priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
+ }
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
+ struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -851,9 +870,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
- &dev_op_jumbo_frames,
- &dev_op_dqo_qpl,
- &dev_op_buffer_sizes);
+ &dev_op_jumbo_frames, &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes,
+ &dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -888,15 +907,13 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n");
}
- if (gve_is_gqi(priv)) {
- err = gve_set_desc_cnt(priv, descriptor);
- } else {
- /* DQO supports LRO. */
+
+ /* set default descriptor counts */
+ gve_set_default_desc_cnt(priv, descriptor);
+
+ /* DQO supports LRO. */
+ if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
- err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
- }
- if (err)
- goto free_device_descriptor;
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
@@ -912,18 +929,11 @@ int gve_adminq_describe_device(struct gve_priv *priv)
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
- priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-
- if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
- dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_data_slot_cnt);
- priv->rx_desc_cnt = priv->rx_data_slot_cnt;
- }
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
- dev_op_buffer_sizes);
+ dev_op_buffer_sizes, dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5ac972e45ff8..beedf2353847 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -103,8 +103,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
struct gve_device_option_dqo_rda {
__be32 supported_features_mask;
- __be16 tx_comp_ring_entries;
- __be16 rx_buff_ring_entries;
+ __be32 reserved;
};
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
@@ -134,6 +133,16 @@ struct gve_device_option_buffer_sizes {
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+struct gve_device_option_modify_ring {
+ __be32 supported_featured_mask;
+ __be16 max_rx_ring_size;
+ __be16 max_tx_ring_size;
+ __be16 min_rx_ring_size;
+ __be16 min_tx_ring_size;
+};
+
+static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -143,28 +152,31 @@ static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
* the device for read/write and data is copied from/to SKBs.
*/
enum gve_dev_opt_id {
- GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
- GVE_DEV_OPT_ID_GQI_RDA = 0x2,
- GVE_DEV_OPT_ID_GQI_QPL = 0x3,
- GVE_DEV_OPT_ID_DQO_RDA = 0x4,
- GVE_DEV_OPT_ID_DQO_QPL = 0x7,
- GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
- GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+ GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+ GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+ GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
+ GVE_DEV_OPT_ID_DQO_QPL = 0x7,
+ GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
};
enum gve_sup_feature_mask {
- GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
- GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_MODIFY_RING_MASK = 1 << 0,
+ GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index dbe05402d40b..815dead31673 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -490,8 +490,8 @@ static void gve_get_ringparam(struct net_device *netdev,
{
struct gve_priv *priv = netdev_priv(netdev);
- cmd->rx_max_pending = priv->rx_desc_cnt;
- cmd->tx_max_pending = priv->tx_desc_cnt;
+ cmd->rx_max_pending = priv->max_rx_desc_cnt;
+ cmd->tx_max_pending = priv->max_tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
@@ -503,20 +503,93 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
+static int gve_adjust_ring_sizes(struct gve_priv *priv,
+ u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ struct gve_qpl_config new_qpl_cfg;
+ int err;
+
+ /* get current queue configuration */
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ /* copy over the new ring_size from ethtool */
+ tx_alloc_cfg.ring_size = new_tx_desc_cnt;
+ rx_alloc_cfg.ring_size = new_rx_desc_cnt;
+
+ /* qpl_cfg is not read-only, it contains a map that gets updated as
+ * rings are allocated, which is why we cannot use the yet unreleased
+ * one in priv.
+ */
+ qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+ }
+
+ /* Set new ring_size for the next up */
+ priv->tx_desc_cnt = new_tx_desc_cnt;
+ priv->rx_desc_cnt = new_rx_desc_cnt;
+
+ return 0;
+}
+
+static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ /* check for valid range */
+ if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
+ new_tx_desc_cnt > priv->max_tx_desc_cnt ||
+ new_rx_desc_cnt < priv->min_rx_desc_cnt ||
+ new_rx_desc_cnt > priv->max_rx_desc_cnt) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ u16 new_tx_cnt, new_rx_cnt;
+ int err;
+
+ err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ if (err)
+ return err;
- if (priv->tx_desc_cnt != cmd->tx_pending ||
- priv->rx_desc_cnt != cmd->rx_pending) {
- dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
+ return 0;
+
+ if (!priv->modify_ring_size_enabled) {
+ dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
return -EOPNOTSUPP;
}
- return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ new_tx_cnt = cmd->tx_pending;
+ new_rx_cnt = cmd->rx_pending;
+
+ if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
+ return -EINVAL;
+
+ return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 166bd827a6d7..a515e5af843c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1103,13 +1103,13 @@ free_qpls:
return err;
}
-static int gve_alloc_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
+static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
int rx_start_id, tx_num_qpls, rx_num_qpls;
struct gve_queue_page_list *qpls;
- int page_count;
+ u32 page_count;
int err;
if (cfg->raw_addressing)
@@ -1141,8 +1141,12 @@ static int gve_alloc_qpls(struct gve_priv *priv,
/* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions).
+ * Set it to twice the number of descriptors.
*/
- page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
+ if (cfg->is_gqi)
+ page_count = rx_alloc_cfg->ring_size;
+ else
+ page_count = gve_get_rx_pages_per_qpl_dqo(rx_alloc_cfg->ring_size);
rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
if (err)
@@ -1310,10 +1314,10 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->rx = priv->rx;
}
-static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
@@ -1363,7 +1367,7 @@ static int gve_queues_mem_alloc(struct gve_priv *priv,
{
int err;
- err = gve_alloc_qpls(priv, qpls_alloc_cfg);
+ err = gve_alloc_qpls(priv, qpls_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
return err;
@@ -1863,10 +1867,10 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static int gve_adjust_config(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 20f5a9e7fae9..cd727e55ae0f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -240,7 +240,7 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->rx_data_slot_cnt;
+ u32 slots = cfg->ring_size;
int filled_pages;
size_t bytes;
int err;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8e8071308aeb..15108407b54f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -178,7 +178,7 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return err;
} else {
idx = rx->dqo.next_qpl_page_idx;
- if (idx >= priv->rx_pages_per_qpl) {
+ if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
@@ -305,8 +305,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
size_t size;
int i;
- const u32 buffer_queue_slots = cfg->raw_addressing ?
- priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -322,7 +321,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
- priv->rx_pages_per_qpl;
+ gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index bc34b6cd3a3e..70f29b90a982 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -295,9 +295,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
/* Queue sizes must be a power of 2 */
tx->mask = cfg->ring_size - 1;
- tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.tx_comp_ring_entries - 1 :
- tx->mask;
+ tx->dqo.complq_mask = tx->mask;
/* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue.