summaryrefslogtreecommitdiff
path: root/net/xdp
diff options
context:
space:
mode:
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xsk.c46
-rw-r--r--net/xdp/xsk_buff_pool.c1
-rw-r--r--net/xdp/xsk_diag.c17
-rw-r--r--net/xdp/xsk_queue.h6
-rw-r--r--net/xdp/xskmap.c3
5 files changed, 63 insertions, 10 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3700266229f6..c3231620d210 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
addr = xp_get_handle(xskb);
err = xskq_prod_reserve_desc(xs->rx, addr, len);
if (err) {
- xs->rx_dropped++;
+ xs->rx_queue_full++;
return err;
}
@@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
rcu_read_lock();
list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
- if (!xskq_cons_peek_desc(xs->tx, desc, umem))
+ if (!xskq_cons_peek_desc(xs->tx, desc, umem)) {
+ xs->tx->queue_empty_descs++;
continue;
+ }
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
@@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk)
sent_frame = true;
}
+ xs->tx->queue_empty_descs++;
+
out:
if (sent_frame)
sk->sk_write_space(sk);
@@ -698,7 +702,7 @@ struct xdp_umem_reg_v1 {
};
static int xsk_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
@@ -716,7 +720,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(entries))
return -EINVAL;
- if (copy_from_user(&entries, optval, sizeof(entries)))
+ if (copy_from_sockptr(&entries, optval, sizeof(entries)))
return -EFAULT;
mutex_lock(&xs->mutex);
@@ -743,7 +747,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
else if (optlen < sizeof(mr))
mr_size = sizeof(struct xdp_umem_reg_v1);
- if (copy_from_user(&mr, optval, mr_size))
+ if (copy_from_sockptr(&mr, optval, mr_size))
return -EFAULT;
mutex_lock(&xs->mutex);
@@ -770,7 +774,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
struct xsk_queue **q;
int entries;
- if (copy_from_user(&entries, optval, sizeof(entries)))
+ if (copy_from_sockptr(&entries, optval, sizeof(entries)))
return -EFAULT;
mutex_lock(&xs->mutex);
@@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
ring->desc = offsetof(struct xdp_umem_ring, desc);
}
+struct xdp_statistics_v1 {
+ __u64 rx_dropped;
+ __u64 rx_invalid_descs;
+ __u64 tx_invalid_descs;
+};
+
static int xsk_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -830,20 +840,36 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case XDP_STATISTICS:
{
- struct xdp_statistics stats;
+ struct xdp_statistics stats = {};
+ bool extra_stats = true;
+ size_t stats_size;
- if (len < sizeof(stats))
+ if (len < sizeof(struct xdp_statistics_v1)) {
return -EINVAL;
+ } else if (len < sizeof(stats)) {
+ extra_stats = false;
+ stats_size = sizeof(struct xdp_statistics_v1);
+ } else {
+ stats_size = sizeof(stats);
+ }
mutex_lock(&xs->mutex);
stats.rx_dropped = xs->rx_dropped;
+ if (extra_stats) {
+ stats.rx_ring_full = xs->rx_queue_full;
+ stats.rx_fill_ring_empty_descs =
+ xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
+ stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
+ } else {
+ stats.rx_dropped += xs->rx_queue_full;
+ }
stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
mutex_unlock(&xs->mutex);
- if (copy_to_user(optval, &stats, sizeof(stats)))
+ if (copy_to_user(optval, &stats, stats_size))
return -EFAULT;
- if (put_user(sizeof(stats), optlen))
+ if (put_user(stats_size, optlen))
return -EFAULT;
return 0;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 08b80669f649..a2044c245215 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -189,6 +189,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
for (;;) {
if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
+ pool->fq->queue_empty_descs++;
xp_release(xskb);
return NULL;
}
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 0163b26aaf63..21e9c2d123ee 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -76,6 +76,19 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
return err;
}
+static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
+{
+ struct xdp_diag_stats du = {};
+
+ du.n_rx_dropped = xs->rx_dropped;
+ du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
+ du.n_rx_full = xs->rx_queue_full;
+ du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
+ du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
+ du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
+ return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
+}
+
static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
struct xdp_diag_req *req,
struct user_namespace *user_ns,
@@ -118,6 +131,10 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
goto out_nlmsg_trim;
+ if ((req->xdiag_show & XDP_SHOW_STATS) &&
+ xsk_diag_put_stats(xs, nlskb))
+ goto out_nlmsg_trim;
+
mutex_unlock(&xs->mutex);
nlmsg_end(nlskb, nlh);
return 0;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 5b5d24d2dd37..bf42cfd74b89 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -38,6 +38,7 @@ struct xsk_queue {
u32 cached_cons;
struct xdp_ring *ring;
u64 invalid_descs;
+ u64 queue_empty_descs;
};
/* The structure of the shared state of the rings are the same as the
@@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
return q ? q->invalid_descs : 0;
}
+static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
+{
+ return q ? q->queue_empty_descs : 0;
+}
+
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops);
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 1dc7208c71ba..8367adbbe9df 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -254,6 +254,7 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
spin_unlock_bh(&map->lock);
}
+static int xsk_map_btf_id;
const struct bpf_map_ops xsk_map_ops = {
.map_alloc = xsk_map_alloc,
.map_free = xsk_map_free,
@@ -264,4 +265,6 @@ const struct bpf_map_ops xsk_map_ops = {
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "xsk_map",
+ .map_btf_id = &xsk_map_btf_id,
};