summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-12-05 05:04:08 +0300
committerJakub Kicinski <kuba@kernel.org>2023-12-05 05:04:09 +0300
commita90d56049acc45802f67cd7d4c058ac45b1bc26f (patch)
treebe114090f7ef16761647fd78573ca7b4fe6a40e8
parent3706f141e5639d3034bd83448e929b8462362651 (diff)
parente3b57ffdb3256e4ff2cf83be0dc076c4b07f92b9 (diff)
downloadlinux-a90d56049acc45802f67cd7d4c058ac45b1bc26f.tar.xz
Merge branch 'introduce-queue-and-napi-support-in-netdev-genl-was-introduce-napi-queues-support'
Amritha Nambiar says: ==================== Introduce queue and NAPI support in netdev-genl (Was: Introduce NAPI queues support) Add the capability to export the following via netdev-genl interface: - queue information supported by the device - NAPI information supported by the device Introduce support for associating queue and NAPI instance. Extend the netdev_genl generic netlink family for netdev with queue and NAPI data. The queue parameters exposed are: - queue index - queue type - ifindex - NAPI id associated with the queue Additional rx and tx queue parameters can be exposed in follow up patches by stashing them in netdev queue structures. XDP queue type can also be supported in future. The NAPI fields exposed are: - NAPI id - NAPI device ifindex - Interrupt number associated with the NAPI instance - PID for the NAPI thread This series only supports 'get' ability for retrieving certain queue and NAPI attributes. The 'set' ability for configuring queue and associated NAPI instance via netdev-genl will be submitted as a separate patch series. Previous discussion at: https://lore.kernel.org/netdev/c8476530638a5f4381d64db0e024ed49c2db3b02.camel@gmail.com/T/#m00999652a8b4731fbdb7bf698d2e3666c65a60e7 $ ./cli.py --spec netdev.yaml --do queue-get --json='{"ifindex": 12, "id": 0, "type": 0}' {'id': 0, 'ifindex': 12, 'napi-id': 593, 'type': 'rx'} $ ./cli.py --spec netdev.yaml --do queue-get --json='{"ifindex": 12, "id": 0, "type": 1}' {'id': 0, 'ifindex': 12, 'napi-id': 593, 'type': 'tx'} $ ./cli.py --spec netdev.yaml --dump queue-get --json='{"ifindex": 12}' [{'id': 0, 'ifindex': 12, 'napi-id': 593, 'type': 'rx'}, {'id': 1, 'ifindex': 12, 'napi-id': 594, 'type': 'rx'}, {'id': 2, 'ifindex': 12, 'napi-id': 595, 'type': 'rx'}, {'id': 3, 'ifindex': 12, 'napi-id': 596, 'type': 'rx'}, {'id': 0, 'ifindex': 12, 'napi-id': 593, 'type': 'tx'}, {'id': 1, 'ifindex': 12, 'napi-id': 594, 'type': 'tx'}, {'id': 2, 'ifindex': 12, 'napi-id': 595, 'type': 'tx'}, {'id': 3, 'ifindex': 12, 'napi-id': 596, 'type': 'tx'}] $ ./cli.py --spec netdev.yaml --do napi-get --json='{"id": 593}' {'id': 593, 'ifindex': 12, 'irq': 291, 'pid': 3727} $ ./cli.py --spec netdev.yaml --dump napi-get --json='{"ifindex": 12}' [{'id': 596, 'ifindex': 12, 'irq': 294, 'pid': 3724}, {'id': 595, 'ifindex': 12, 'irq': 293, 'pid': 3725}, {'id': 594, 'ifindex': 12, 'irq': 292, 'pid': 3726}, {'id': 593, 'ifindex': 12, 'irq': 291, 'pid': 3727}] ==================== Link: https://lore.kernel.org/r/170147307026.5260.9300080745237900261.stgit@anambiarhost.jf.intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--Documentation/netlink/specs/netdev.yaml94
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--include/linux/netdevice.h14
-rw-r--r--include/net/netdev_rx_queue.h4
-rw-r--r--include/uapi/linux/netdev.h27
-rw-r--r--net/core/dev.c41
-rw-r--r--net/core/dev.h2
-rw-r--r--net/core/netdev-genl-gen.c50
-rw-r--r--net/core/netdev-genl-gen.h5
-rw-r--r--net/core/netdev-genl.c331
-rw-r--r--tools/include/uapi/linux/netdev.h27
-rw-r--r--tools/net/ynl/generated/netdev-user.c289
-rw-r--r--tools/net/ynl/generated/netdev-user.h178
17 files changed, 1159 insertions, 6 deletions
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index eef6358ec587..f2c76d103bd8 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -66,6 +66,10 @@ definitions:
name: tx-checksum
doc:
L3 checksum HW offload is supported by the driver.
+ -
+ name: queue-type
+ type: enum
+ entries: [ rx, tx ]
attribute-sets:
-
@@ -209,6 +213,54 @@ attribute-sets:
name: recycle-released-refcnt
type: uint
+ -
+ name: napi
+ attributes:
+ -
+ name: ifindex
+ doc: ifindex of the netdevice to which NAPI instance belongs.
+ type: u32
+ checks:
+ min: 1
+ -
+ name: id
+ doc: ID of the NAPI instance.
+ type: u32
+ -
+ name: irq
+ doc: The associated interrupt vector number for the napi
+ type: u32
+ -
+ name: pid
+ doc: PID of the napi thread, if NAPI is configured to operate in
+ threaded mode. If NAPI is not in threaded mode (i.e. uses normal
+ softirq context), the attribute will be absent.
+ type: u32
+ -
+ name: queue
+ attributes:
+ -
+ name: id
+ doc: Queue index; most queue types are indexed like a C array, with
+ indexes starting at 0 and ending at queue count - 1. Queue indexes
+ are scoped to an interface and queue type.
+ type: u32
+ -
+ name: ifindex
+ doc: ifindex of the netdevice to which the queue belongs.
+ type: u32
+ checks:
+ min: 1
+ -
+ name: type
+ doc: Queue type as rx, tx. Each queue type defines a separate ID space.
+ type: u32
+ enum: queue-type
+ -
+ name: napi-id
+ doc: ID of the NAPI instance which services this queue.
+ type: u32
+
operations:
list:
-
@@ -307,6 +359,48 @@ operations:
dump:
reply: *pp-stats-reply
config-cond: page-pool-stats
+ -
+ name: queue-get
+ doc: Get queue information from the kernel.
+ Only configured queues will be reported (as opposed to all available
+ hardware queues).
+ attribute-set: queue
+ do:
+ request:
+ attributes:
+ - ifindex
+ - type
+ - id
+ reply: &queue-get-op
+ attributes:
+ - id
+ - type
+ - napi-id
+ - ifindex
+ dump:
+ request:
+ attributes:
+ - ifindex
+ reply: *queue-get-op
+ -
+ name: napi-get
+ doc: Get information about NAPI instances configured on the system.
+ attribute-set: napi
+ do:
+ request:
+ attributes:
+ - id
+ reply: &napi-get-op
+ attributes:
+ - id
+ - ifindex
+ - irq
+ - pid
+ dump:
+ request:
+ attributes:
+ - ifindex
+ reply: *napi-get-op
mcast-groups:
list:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a405c89b00d3..92a54113f872 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4008,6 +4008,9 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
@@ -4084,6 +4087,11 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (i >= bp->tx_nr_rings_xdp)
+ netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
+ NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
}
return 0;
@@ -9930,6 +9938,7 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
+ netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
@@ -9957,6 +9966,11 @@ static void bnxt_del_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 7fa43827a3f0..edad5f9ab16c 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -189,10 +189,18 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ if (vsi->netdev)
+ netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
tx_ring->q_vector = NULL;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ }
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ if (vsi->netdev)
+ netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
rx_ring->q_vector = NULL;
+ }
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d826b5afa143..626577c7d5b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2452,6 +2452,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi, true);
+
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -2932,6 +2936,71 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ * @locked: is the rtnl_lock already held
+ *
+ * Set the napi instance for the queue
+ */
+static void
+ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
+{
+ if (!locked)
+ rtnl_lock();
+ netif_queue_set_napi(dev, queue_index, type, napi);
+ if (!locked)
+ rtnl_unlock();
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector
+ */
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_vsi_set_napi_queues
+ * @vsi: VSI pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate queue[s] with napi for all vectors
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+{
+ int i;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index f24f5d1e6f9c..71bd27244941 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -91,6 +91,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 43ba3e55b8c1..9e3d3919307b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3375,9 +3375,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, v_idx)
+ ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
+ ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ }
}
/**
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c2d74bc112dd..5551177e024e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -382,6 +382,7 @@ struct napi_struct {
/* control-path-only fields follow */
struct list_head dev_list;
struct hlist_node napi_hash_node;
+ int irq;
};
enum {
@@ -665,6 +666,10 @@ struct netdev_queue {
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
/*
* write-mostly part
*/
@@ -2657,6 +2662,15 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type,
+ struct napi_struct *napi);
+
+static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+{
+ napi->irq = irq;
+}
+
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index cdcafb30d437..aa1716fb0e53 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -21,6 +21,10 @@ struct netdev_rx_queue {
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
} ____cacheline_aligned_in_smp;
/*
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 6244c0164976..424c5e28f495 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -62,6 +62,11 @@ enum netdev_xsk_flags {
NETDEV_XSK_FLAGS_TX_CHECKSUM = 2,
};
+enum netdev_queue_type {
+ NETDEV_QUEUE_TYPE_RX,
+ NETDEV_QUEUE_TYPE_TX,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -105,6 +110,26 @@ enum {
};
enum {
+ NETDEV_A_NAPI_IFINDEX = 1,
+ NETDEV_A_NAPI_ID,
+ NETDEV_A_NAPI_IRQ,
+ NETDEV_A_NAPI_PID,
+
+ __NETDEV_A_NAPI_MAX,
+ NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
+};
+
+enum {
+ NETDEV_A_QUEUE_ID = 1,
+ NETDEV_A_QUEUE_IFINDEX,
+ NETDEV_A_QUEUE_TYPE,
+ NETDEV_A_QUEUE_NAPI_ID,
+
+ __NETDEV_A_QUEUE_MAX,
+ NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -114,6 +139,8 @@ enum {
NETDEV_CMD_PAGE_POOL_DEL_NTF,
NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
NETDEV_CMD_PAGE_POOL_STATS_GET,
+ NETDEV_CMD_QUEUE_GET,
+ NETDEV_CMD_NAPI_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/net/core/dev.c b/net/core/dev.c
index 3950ced396b5..1d720fc59161 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -165,7 +165,6 @@ static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack);
-static struct napi_struct *napi_by_id(unsigned int napi_id);
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -6139,7 +6138,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
EXPORT_SYMBOL(napi_complete_done);
/* must be called under rcu_read_lock(), as we dont take a reference */
-static struct napi_struct *napi_by_id(unsigned int napi_id)
+struct napi_struct *napi_by_id(unsigned int napi_id)
{
unsigned int hash = napi_id % HASH_SIZE(napi_hash);
struct napi_struct *napi;
@@ -6400,6 +6399,43 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
EXPORT_SYMBOL(dev_set_threaded);
+/**
+ * netif_queue_set_napi - Associate queue with the napi
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context, pass NULL to clear previously set NAPI
+ *
+ * Set queue with its corresponding napi context. This should be done after
+ * registering the NAPI handler for the queue-vector and the queues have been
+ * mapped to the corresponding interrupt vector.
+ */
+void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
+{
+ struct netdev_rx_queue *rxq;
+ struct netdev_queue *txq;
+
+ if (WARN_ON_ONCE(napi && !napi->dev))
+ return;
+ if (dev->reg_state >= NETREG_REGISTERED)
+ ASSERT_RTNL();
+
+ switch (type) {
+ case NETDEV_QUEUE_TYPE_RX:
+ rxq = __netif_get_rx_queue(dev, queue_index);
+ rxq->napi = napi;
+ return;
+ case NETDEV_QUEUE_TYPE_TX:
+ txq = netdev_get_tx_queue(dev, queue_index);
+ txq->napi = napi;
+ return;
+ default:
+ return;
+ }
+}
+EXPORT_SYMBOL(netif_queue_set_napi);
+
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
@@ -6435,6 +6471,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
*/
if (dev->threaded && napi_kthread_create(napi))
dev->threaded = 0;
+ netif_napi_set_irq(napi, -1);
}
EXPORT_SYMBOL(netif_napi_add_weight);
diff --git a/net/core/dev.h b/net/core/dev.h
index 5aa45f0fd4ae..7795b8ad841d 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -145,4 +145,6 @@ void xdp_do_check_flushed(struct napi_struct *napi);
#else
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
#endif
+
+struct napi_struct *napi_by_id(unsigned int napi_id);
#endif
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index dccd8c3a141e..be7f2ebd61b2 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -46,6 +46,28 @@ static const struct nla_policy netdev_page_pool_stats_get_nl_policy[NETDEV_A_PAG
};
#endif /* CONFIG_PAGE_POOL_STATS */
+/* NETDEV_CMD_QUEUE_GET - do */
+static const struct nla_policy netdev_queue_get_do_nl_policy[NETDEV_A_QUEUE_TYPE + 1] = {
+ [NETDEV_A_QUEUE_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NETDEV_A_QUEUE_TYPE] = NLA_POLICY_MAX(NLA_U32, 1),
+ [NETDEV_A_QUEUE_ID] = { .type = NLA_U32, },
+};
+
+/* NETDEV_CMD_QUEUE_GET - dump */
+static const struct nla_policy netdev_queue_get_dump_nl_policy[NETDEV_A_QUEUE_IFINDEX + 1] = {
+ [NETDEV_A_QUEUE_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+/* NETDEV_CMD_NAPI_GET - do */
+static const struct nla_policy netdev_napi_get_do_nl_policy[NETDEV_A_NAPI_ID + 1] = {
+ [NETDEV_A_NAPI_ID] = { .type = NLA_U32, },
+};
+
+/* NETDEV_CMD_NAPI_GET - dump */
+static const struct nla_policy netdev_napi_get_dump_nl_policy[NETDEV_A_NAPI_IFINDEX + 1] = {
+ [NETDEV_A_NAPI_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -88,6 +110,34 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.flags = GENL_CMD_CAP_DUMP,
},
#endif /* CONFIG_PAGE_POOL_STATS */
+ {
+ .cmd = NETDEV_CMD_QUEUE_GET,
+ .doit = netdev_nl_queue_get_doit,
+ .policy = netdev_queue_get_do_nl_policy,
+ .maxattr = NETDEV_A_QUEUE_TYPE,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NETDEV_CMD_QUEUE_GET,
+ .dumpit = netdev_nl_queue_get_dumpit,
+ .policy = netdev_queue_get_dump_nl_policy,
+ .maxattr = NETDEV_A_QUEUE_IFINDEX,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = NETDEV_CMD_NAPI_GET,
+ .doit = netdev_nl_napi_get_doit,
+ .policy = netdev_napi_get_do_nl_policy,
+ .maxattr = NETDEV_A_NAPI_ID,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NETDEV_CMD_NAPI_GET,
+ .dumpit = netdev_nl_napi_get_dumpit,
+ .policy = netdev_napi_get_dump_nl_policy,
+ .maxattr = NETDEV_A_NAPI_IFINDEX,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index 649e4b46eccf..a47f2bcbe4fa 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -23,6 +23,11 @@ int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
struct genl_info *info);
int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info);
+int netdev_nl_queue_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
+int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
enum {
NETDEV_NLGRP_MGMT,
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 10f2124e9e23..fd98936da3ae 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -7,8 +7,25 @@
#include <net/sock.h>
#include <net/xdp.h>
#include <net/xdp_sock.h>
+#include <net/netdev_rx_queue.h>
+#include <net/busy_poll.h>
#include "netdev-genl-gen.h"
+#include "dev.h"
+
+struct netdev_nl_dump_ctx {
+ unsigned long ifindex;
+ unsigned int rxq_idx;
+ unsigned int txq_idx;
+ unsigned int napi_id;
+};
+
+static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
+{
+ NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx);
+
+ return (struct netdev_nl_dump_ctx *)cb->ctx;
+}
static int
netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
@@ -122,12 +139,13 @@ err_free_msg:
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
struct net *net = sock_net(skb->sk);
struct net_device *netdev;
int err = 0;
rtnl_lock();
- for_each_netdev_dump(net, netdev, cb->args[0]) {
+ for_each_netdev_dump(net, netdev, ctx->ifindex) {
err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
if (err < 0)
break;
@@ -140,6 +158,317 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+static int
+netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
+ const struct genl_info *info)
+{
+ void *hdr;
+ pid_t pid;
+
+ if (WARN_ON_ONCE(!napi->dev))
+ return -EINVAL;
+ if (!(napi->dev->flags & IFF_UP))
+ return 0;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (napi->napi_id >= MIN_NAPI_ID &&
+ nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
+ goto nla_put_failure;
+
+ if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
+ goto nla_put_failure;
+
+ if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
+ goto nla_put_failure;
+
+ if (napi->thread) {
+ pid = task_pid_nr(napi->thread);
+ if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
+ goto nla_put_failure;
+ }
+
+ genlmsg_end(rsp, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct napi_struct *napi;
+ struct sk_buff *rsp;
+ u32 napi_id;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
+ return -EINVAL;
+
+ napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ rtnl_lock();
+
+ napi = napi_by_id(napi_id);
+ if (napi)
+ err = netdev_nl_napi_fill_one(rsp, napi, info);
+ else
+ err = -EINVAL;
+
+ rtnl_unlock();
+
+ if (err)
+ goto err_free_msg;
+
+ return genlmsg_reply(rsp, info);
+
+err_free_msg:
+ nlmsg_free(rsp);
+ return err;
+}
+
+static int
+netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info,
+ struct netdev_nl_dump_ctx *ctx)
+{
+ struct napi_struct *napi;
+ int err = 0;
+
+ if (!(netdev->flags & IFF_UP))
+ return err;
+
+ list_for_each_entry(napi, &netdev->napi_list, dev_list) {
+ if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
+ continue;
+
+ err = netdev_nl_napi_fill_one(rsp, napi, info);
+ if (err)
+ return err;
+ ctx->napi_id = napi->napi_id;
+ }
+ return err;
+}
+
+int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ u32 ifindex = 0;
+ int err = 0;
+
+ if (info->attrs[NETDEV_A_NAPI_IFINDEX])
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
+
+ rtnl_lock();
+ if (ifindex) {
+ netdev = __dev_get_by_index(net, ifindex);
+ if (netdev)
+ err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
+ else
+ err = -ENODEV;
+ } else {
+ for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
+ if (err < 0)
+ break;
+ ctx->napi_id = 0;
+ }
+ }
+ rtnl_unlock();
+
+ if (err != -EMSGSIZE)
+ return err;
+
+ return skb->len;
+}
+
+static int
+netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
+ u32 q_idx, u32 q_type, const struct genl_info *info)
+{
+ struct netdev_rx_queue *rxq;
+ struct netdev_queue *txq;
+ void *hdr;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
+ nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
+ nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
+
+ switch (q_type) {
+ case NETDEV_QUEUE_TYPE_RX:
+ rxq = __netif_get_rx_queue(netdev, q_idx);
+ if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
+ rxq->napi->napi_id))
+ goto nla_put_failure;
+ break;
+ case NETDEV_QUEUE_TYPE_TX:
+ txq = netdev_get_tx_queue(netdev, q_idx);
+ if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
+ txq->napi->napi_id))
+ goto nla_put_failure;
+ }
+
+ genlmsg_end(rsp, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
+ u32 q_type)
+{
+ switch (q_type) {
+ case NETDEV_QUEUE_TYPE_RX:
+ if (q_id >= netdev->real_num_rx_queues)
+ return -EINVAL;
+ return 0;
+ case NETDEV_QUEUE_TYPE_TX:
+ if (q_id >= netdev->real_num_tx_queues)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
+ u32 q_type, const struct genl_info *info)
+{
+ int err = 0;
+
+ if (!(netdev->flags & IFF_UP))
+ return err;
+
+ err = netdev_nl_queue_validate(netdev, q_idx, q_type);
+ if (err)
+ return err;
+
+ return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
+}
+
+int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ u32 q_id, q_type, ifindex;
+ struct net_device *netdev;
+ struct sk_buff *rsp;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
+ return -EINVAL;
+
+ q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
+ q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ if (netdev)
+ err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
+ else
+ err = -ENODEV;
+
+ rtnl_unlock();
+
+ if (err)
+ goto err_free_msg;
+
+ return genlmsg_reply(rsp, info);
+
+err_free_msg:
+ nlmsg_free(rsp);
+ return err;
+}
+
+static int
+netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info,
+ struct netdev_nl_dump_ctx *ctx)
+{
+ int err = 0;
+ int i;
+
+ if (!(netdev->flags & IFF_UP))
+ return err;
+
+ for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
+ err = netdev_nl_queue_fill_one(rsp, netdev, i,
+ NETDEV_QUEUE_TYPE_RX, info);
+ if (err)
+ return err;
+ ctx->rxq_idx = i++;
+ }
+ for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
+ err = netdev_nl_queue_fill_one(rsp, netdev, i,
+ NETDEV_QUEUE_TYPE_TX, info);
+ if (err)
+ return err;
+ ctx->txq_idx = i++;
+ }
+
+ return err;
+}
+
+int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ u32 ifindex = 0;
+ int err = 0;
+
+ if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
+
+ rtnl_lock();
+ if (ifindex) {
+ netdev = __dev_get_by_index(net, ifindex);
+ if (netdev)
+ err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
+ else
+ err = -ENODEV;
+ } else {
+ for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
+ if (err < 0)
+ break;
+ ctx->rxq_idx = 0;
+ ctx->txq_idx = 0;
+ }
+ }
+ rtnl_unlock();
+
+ if (err != -EMSGSIZE)
+ return err;
+
+ return skb->len;
+}
+
static int netdev_genl_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index 6244c0164976..424c5e28f495 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -62,6 +62,11 @@ enum netdev_xsk_flags {
NETDEV_XSK_FLAGS_TX_CHECKSUM = 2,
};
+enum netdev_queue_type {
+ NETDEV_QUEUE_TYPE_RX,
+ NETDEV_QUEUE_TYPE_TX,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -105,6 +110,26 @@ enum {
};
enum {
+ NETDEV_A_NAPI_IFINDEX = 1,
+ NETDEV_A_NAPI_ID,
+ NETDEV_A_NAPI_IRQ,
+ NETDEV_A_NAPI_PID,
+
+ __NETDEV_A_NAPI_MAX,
+ NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
+};
+
+enum {
+ NETDEV_A_QUEUE_ID = 1,
+ NETDEV_A_QUEUE_IFINDEX,
+ NETDEV_A_QUEUE_TYPE,
+ NETDEV_A_QUEUE_NAPI_ID,
+
+ __NETDEV_A_QUEUE_MAX,
+ NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -114,6 +139,8 @@ enum {
NETDEV_CMD_PAGE_POOL_DEL_NTF,
NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
NETDEV_CMD_PAGE_POOL_STATS_GET,
+ NETDEV_CMD_QUEUE_GET,
+ NETDEV_CMD_NAPI_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/tools/net/ynl/generated/netdev-user.c b/tools/net/ynl/generated/netdev-user.c
index 3b9dee94d4ce..ed8bcb855a1d 100644
--- a/tools/net/ynl/generated/netdev-user.c
+++ b/tools/net/ynl/generated/netdev-user.c
@@ -23,6 +23,8 @@ static const char * const netdev_op_strmap[] = {
[NETDEV_CMD_PAGE_POOL_DEL_NTF] = "page-pool-del-ntf",
[NETDEV_CMD_PAGE_POOL_CHANGE_NTF] = "page-pool-change-ntf",
[NETDEV_CMD_PAGE_POOL_STATS_GET] = "page-pool-stats-get",
+ [NETDEV_CMD_QUEUE_GET] = "queue-get",
+ [NETDEV_CMD_NAPI_GET] = "napi-get",
};
const char *netdev_op_str(int op)
@@ -76,6 +78,18 @@ const char *netdev_xsk_flags_str(enum netdev_xsk_flags value)
return netdev_xsk_flags_strmap[value];
}
+static const char * const netdev_queue_type_strmap[] = {
+ [0] = "rx",
+ [1] = "tx",
+};
+
+const char *netdev_queue_type_str(enum netdev_queue_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(netdev_queue_type_strmap))
+ return NULL;
+ return netdev_queue_type_strmap[value];
+}
+
/* Policies */
struct ynl_policy_attr netdev_page_pool_info_policy[NETDEV_A_PAGE_POOL_MAX + 1] = {
[NETDEV_A_PAGE_POOL_ID] = { .name = "id", .type = YNL_PT_UINT, },
@@ -135,6 +149,30 @@ struct ynl_policy_nest netdev_page_pool_stats_nest = {
.table = netdev_page_pool_stats_policy,
};
+struct ynl_policy_attr netdev_queue_policy[NETDEV_A_QUEUE_MAX + 1] = {
+ [NETDEV_A_QUEUE_ID] = { .name = "id", .type = YNL_PT_U32, },
+ [NETDEV_A_QUEUE_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
+ [NETDEV_A_QUEUE_TYPE] = { .name = "type", .type = YNL_PT_U32, },
+ [NETDEV_A_QUEUE_NAPI_ID] = { .name = "napi-id", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest netdev_queue_nest = {
+ .max_attr = NETDEV_A_QUEUE_MAX,
+ .table = netdev_queue_policy,
+};
+
+struct ynl_policy_attr netdev_napi_policy[NETDEV_A_NAPI_MAX + 1] = {
+ [NETDEV_A_NAPI_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
+ [NETDEV_A_NAPI_ID] = { .name = "id", .type = YNL_PT_U32, },
+ [NETDEV_A_NAPI_IRQ] = { .name = "irq", .type = YNL_PT_U32, },
+ [NETDEV_A_NAPI_PID] = { .name = "pid", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest netdev_napi_nest = {
+ .max_attr = NETDEV_A_NAPI_MAX,
+ .table = netdev_napi_policy,
+};
+
/* Common nested types */
void netdev_page_pool_info_free(struct netdev_page_pool_info *obj)
{
@@ -174,6 +212,16 @@ int netdev_page_pool_info_parse(struct ynl_parse_arg *yarg,
return MNL_CB_ERROR;
dst->_present.ifindex = 1;
dst->ifindex = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_NAPI_IRQ) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.irq = 1;
+ dst->irq = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_NAPI_PID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.pid = 1;
+ dst->pid = mnl_attr_get_u32(attr);
}
}
@@ -617,6 +665,247 @@ free_list:
return NULL;
}
+/* ============== NETDEV_CMD_QUEUE_GET ============== */
+/* NETDEV_CMD_QUEUE_GET - do */
+void netdev_queue_get_req_free(struct netdev_queue_get_req *req)
+{
+ free(req);
+}
+
+void netdev_queue_get_rsp_free(struct netdev_queue_get_rsp *rsp)
+{
+ free(rsp);
+}
+
+int netdev_queue_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct netdev_queue_get_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == NETDEV_A_QUEUE_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.id = 1;
+ dst->id = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_QUEUE_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.type = 1;
+ dst->type = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_QUEUE_NAPI_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.napi_id = 1;
+ dst->napi_id = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_QUEUE_IFINDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ifindex = 1;
+ dst->ifindex = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct netdev_queue_get_rsp *
+netdev_queue_get(struct ynl_sock *ys, struct netdev_queue_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct netdev_queue_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_QUEUE_GET, 1);
+ ys->req_policy = &netdev_queue_nest;
+ yrs.yarg.rsp_policy = &netdev_queue_nest;
+
+ if (req->_present.ifindex)
+ mnl_attr_put_u32(nlh, NETDEV_A_QUEUE_IFINDEX, req->ifindex);
+ if (req->_present.type)
+ mnl_attr_put_u32(nlh, NETDEV_A_QUEUE_TYPE, req->type);
+ if (req->_present.id)
+ mnl_attr_put_u32(nlh, NETDEV_A_QUEUE_ID, req->id);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = netdev_queue_get_rsp_parse;
+ yrs.rsp_cmd = NETDEV_CMD_QUEUE_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ netdev_queue_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* NETDEV_CMD_QUEUE_GET - dump */
+void netdev_queue_get_list_free(struct netdev_queue_get_list *rsp)
+{
+ struct netdev_queue_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp);
+ }
+}
+
+struct netdev_queue_get_list *
+netdev_queue_get_dump(struct ynl_sock *ys,
+ struct netdev_queue_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct netdev_queue_get_list);
+ yds.cb = netdev_queue_get_rsp_parse;
+ yds.rsp_cmd = NETDEV_CMD_QUEUE_GET;
+ yds.rsp_policy = &netdev_queue_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_QUEUE_GET, 1);
+ ys->req_policy = &netdev_queue_nest;
+
+ if (req->_present.ifindex)
+ mnl_attr_put_u32(nlh, NETDEV_A_QUEUE_IFINDEX, req->ifindex);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ netdev_queue_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== NETDEV_CMD_NAPI_GET ============== */
+/* NETDEV_CMD_NAPI_GET - do */
+void netdev_napi_get_req_free(struct netdev_napi_get_req *req)
+{
+ free(req);
+}
+
+void netdev_napi_get_rsp_free(struct netdev_napi_get_rsp *rsp)
+{
+ free(rsp);
+}
+
+int netdev_napi_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct netdev_napi_get_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == NETDEV_A_NAPI_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.id = 1;
+ dst->id = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_NAPI_IFINDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ifindex = 1;
+ dst->ifindex = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct netdev_napi_get_rsp *
+netdev_napi_get(struct ynl_sock *ys, struct netdev_napi_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct netdev_napi_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_NAPI_GET, 1);
+ ys->req_policy = &netdev_napi_nest;
+ yrs.yarg.rsp_policy = &netdev_napi_nest;
+
+ if (req->_present.id)
+ mnl_attr_put_u32(nlh, NETDEV_A_NAPI_ID, req->id);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = netdev_napi_get_rsp_parse;
+ yrs.rsp_cmd = NETDEV_CMD_NAPI_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ netdev_napi_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* NETDEV_CMD_NAPI_GET - dump */
+void netdev_napi_get_list_free(struct netdev_napi_get_list *rsp)
+{
+ struct netdev_napi_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp);
+ }
+}
+
+struct netdev_napi_get_list *
+netdev_napi_get_dump(struct ynl_sock *ys, struct netdev_napi_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct netdev_napi_get_list);
+ yds.cb = netdev_napi_get_rsp_parse;
+ yds.rsp_cmd = NETDEV_CMD_NAPI_GET;
+ yds.rsp_policy = &netdev_napi_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_NAPI_GET, 1);
+ ys->req_policy = &netdev_napi_nest;
+
+ if (req->_present.ifindex)
+ mnl_attr_put_u32(nlh, NETDEV_A_NAPI_IFINDEX, req->ifindex);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ netdev_napi_get_list_free(yds.first);
+ return NULL;
+}
+
static const struct ynl_ntf_info netdev_ntf_info[] = {
[NETDEV_CMD_DEV_ADD_NTF] = {
.alloc_sz = sizeof(struct netdev_dev_get_ntf),
diff --git a/tools/net/ynl/generated/netdev-user.h b/tools/net/ynl/generated/netdev-user.h
index cc3d80d1cf8c..3830cf2ab6b8 100644
--- a/tools/net/ynl/generated/netdev-user.h
+++ b/tools/net/ynl/generated/netdev-user.h
@@ -20,6 +20,7 @@ const char *netdev_op_str(int op);
const char *netdev_xdp_act_str(enum netdev_xdp_act value);
const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value);
const char *netdev_xsk_flags_str(enum netdev_xsk_flags value);
+const char *netdev_queue_type_str(enum netdev_queue_type value);
/* Common nested types */
struct netdev_page_pool_info {
@@ -261,4 +262,181 @@ netdev_page_pool_stats_get_list_free(struct netdev_page_pool_stats_get_list *rsp
struct netdev_page_pool_stats_get_list *
netdev_page_pool_stats_get_dump(struct ynl_sock *ys);
+/* ============== NETDEV_CMD_QUEUE_GET ============== */
+/* NETDEV_CMD_QUEUE_GET - do */
+struct netdev_queue_get_req {
+ struct {
+ __u32 ifindex:1;
+ __u32 type:1;
+ __u32 id:1;
+ } _present;
+
+ __u32 ifindex;
+ enum netdev_queue_type type;
+ __u32 id;
+};
+
+static inline struct netdev_queue_get_req *netdev_queue_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_queue_get_req));
+}
+void netdev_queue_get_req_free(struct netdev_queue_get_req *req);
+
+static inline void
+netdev_queue_get_req_set_ifindex(struct netdev_queue_get_req *req,
+ __u32 ifindex)
+{
+ req->_present.ifindex = 1;
+ req->ifindex = ifindex;
+}
+static inline void
+netdev_queue_get_req_set_type(struct netdev_queue_get_req *req,
+ enum netdev_queue_type type)
+{
+ req->_present.type = 1;
+ req->type = type;
+}
+static inline void
+netdev_queue_get_req_set_id(struct netdev_queue_get_req *req, __u32 id)
+{
+ req->_present.id = 1;
+ req->id = id;
+}
+
+struct netdev_queue_get_rsp {
+ struct {
+ __u32 id:1;
+ __u32 type:1;
+ __u32 napi_id:1;
+ __u32 ifindex:1;
+ } _present;
+
+ __u32 id;
+ enum netdev_queue_type type;
+ __u32 napi_id;
+ __u32 ifindex;
+};
+
+void netdev_queue_get_rsp_free(struct netdev_queue_get_rsp *rsp);
+
+/*
+ * Get queue information from the kernel. Only configured queues will be reported (as opposed to all available hardware queues).
+ */
+struct netdev_queue_get_rsp *
+netdev_queue_get(struct ynl_sock *ys, struct netdev_queue_get_req *req);
+
+/* NETDEV_CMD_QUEUE_GET - dump */
+struct netdev_queue_get_req_dump {
+ struct {
+ __u32 ifindex:1;
+ } _present;
+
+ __u32 ifindex;
+};
+
+static inline struct netdev_queue_get_req_dump *
+netdev_queue_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_queue_get_req_dump));
+}
+void netdev_queue_get_req_dump_free(struct netdev_queue_get_req_dump *req);
+
+static inline void
+netdev_queue_get_req_dump_set_ifindex(struct netdev_queue_get_req_dump *req,
+ __u32 ifindex)
+{
+ req->_present.ifindex = 1;
+ req->ifindex = ifindex;
+}
+
+struct netdev_queue_get_list {
+ struct netdev_queue_get_list *next;
+ struct netdev_queue_get_rsp obj __attribute__((aligned(8)));
+};
+
+void netdev_queue_get_list_free(struct netdev_queue_get_list *rsp);
+
+struct netdev_queue_get_list *
+netdev_queue_get_dump(struct ynl_sock *ys,
+ struct netdev_queue_get_req_dump *req);
+
+/* ============== NETDEV_CMD_NAPI_GET ============== */
+/* NETDEV_CMD_NAPI_GET - do */
+struct netdev_napi_get_req {
+ struct {
+ __u32 id:1;
+ } _present;
+
+ __u32 id;
+};
+
+static inline struct netdev_napi_get_req *netdev_napi_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_napi_get_req));
+}
+void netdev_napi_get_req_free(struct netdev_napi_get_req *req);
+
+static inline void
+netdev_napi_get_req_set_id(struct netdev_napi_get_req *req, __u32 id)
+{
+ req->_present.id = 1;
+ req->id = id;
+}
+
+struct netdev_napi_get_rsp {
+ struct {
+ __u32 id:1;
+ __u32 ifindex:1;
+ __u32 irq:1;
+ __u32 pid:1;
+ } _present;
+
+ __u32 id;
+ __u32 ifindex;
+ __u32 irq;
+ __u32 pid;
+};
+
+void netdev_napi_get_rsp_free(struct netdev_napi_get_rsp *rsp);
+
+/*
+ * Get information about NAPI instances configured on the system.
+ */
+struct netdev_napi_get_rsp *
+netdev_napi_get(struct ynl_sock *ys, struct netdev_napi_get_req *req);
+
+/* NETDEV_CMD_NAPI_GET - dump */
+struct netdev_napi_get_req_dump {
+ struct {
+ __u32 ifindex:1;
+ } _present;
+
+ __u32 ifindex;
+};
+
+static inline struct netdev_napi_get_req_dump *
+netdev_napi_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_napi_get_req_dump));
+}
+void netdev_napi_get_req_dump_free(struct netdev_napi_get_req_dump *req);
+
+static inline void
+netdev_napi_get_req_dump_set_ifindex(struct netdev_napi_get_req_dump *req,
+ __u32 ifindex)
+{
+ req->_present.ifindex = 1;
+ req->ifindex = ifindex;
+}
+
+struct netdev_napi_get_list {
+ struct netdev_napi_get_list *next;
+ struct netdev_napi_get_rsp obj __attribute__((aligned(8)));
+};
+
+void netdev_napi_get_list_free(struct netdev_napi_get_list *rsp);
+
+struct netdev_napi_get_list *
+netdev_napi_get_dump(struct ynl_sock *ys, struct netdev_napi_get_req_dump *req);
+
#endif /* _LINUX_NETDEV_GEN_H */