summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-06-18 04:30:01 +0300
committerJakub Kicinski <kuba@kernel.org>2022-06-18 04:30:01 +0300
commit582573f1b23df1cf7458e665e2aa3acd0551fd2c (patch)
tree69f7f069dad9006f8c87c29e144ad2e445752f67 /net
parentb4a028c4d031c27704ad73b1195ca69a1206941e (diff)
parentc0f3bb4054ef036e5f67e27f2e3cad9e6512cf00 (diff)
downloadlinux-582573f1b23df1cf7458e665e2aa3acd0551fd2c.tar.xz
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2022-06-17 We've added 12 non-merge commits during the last 4 day(s) which contain a total of 14 files changed, 305 insertions(+), 107 deletions(-). The main changes are: 1) Fix x86 JIT tailcall count offset on BPF-2-BPF call, from Jakub Sitnicki. 2) Fix a kprobe_multi link bug which misplaces BPF cookies, from Jiri Olsa. 3) Fix an infinite loop when processing a module's BTF, from Kumar Kartikeya Dwivedi. 4) Fix getting a rethook only in RCU available context, from Masami Hiramatsu. 5) Fix request socket refcount leak in sk lookup helpers, from Jon Maxwell. 6) Fix xsk xmit behavior which wrongly adds skb to already full cq, from Ciara Loftus. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: rethook: Reject getting a rethook if RCU is not watching fprobe, samples: Add use_trace option and show hit/missed counter bpf, docs: Update some of the JIT/maintenance entries selftest/bpf: Fix kprobe_multi bench test bpf: Force cookies array to follow symbols sorting ftrace: Keep address offset in ftrace_lookup_symbols selftests/bpf: Shuffle cookies symbols in kprobe multi test selftests/bpf: Test tail call counting with bpf2bpf and data on stack bpf, x86: Fix tail call count offset calculation on bpf2bpf call bpf: Limit maximum modifier chain length in btf_check_type_tags bpf: Fix request_sock leak in sk lookup helpers xsk: Fix generic transmit when completion queue reservation fails ==================== Link: https://lore.kernel.org/r/20220617202119.2421-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c34
-rw-r--r--net/xdp/xsk.c16
2 files changed, 37 insertions, 13 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 5af58eb48587..5d16d66727fc 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6516,10 +6516,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
ifindex, proto, netns_id, flags);
if (sk) {
- sk = sk_to_full_sk(sk);
- if (!sk_fullsock(sk)) {
+ struct sock *sk2 = sk_to_full_sk(sk);
+
+ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
+ * sock refcnt is decremented to prevent a request_sock leak.
+ */
+ if (!sk_fullsock(sk2))
+ sk2 = NULL;
+ if (sk2 != sk) {
sock_gen_put(sk);
- return NULL;
+ /* Ensure there is no need to bump sk2 refcnt */
+ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
+ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+ return NULL;
+ }
+ sk = sk2;
}
}
@@ -6553,10 +6564,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
flags);
if (sk) {
- sk = sk_to_full_sk(sk);
- if (!sk_fullsock(sk)) {
+ struct sock *sk2 = sk_to_full_sk(sk);
+
+ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
+ * sock refcnt is decremented to prevent a request_sock leak.
+ */
+ if (!sk_fullsock(sk2))
+ sk2 = NULL;
+ if (sk2 != sk) {
sock_gen_put(sk);
- return NULL;
+ /* Ensure there is no need to bump sk2 refcnt */
+ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
+ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+ return NULL;
+ }
+ sk = sk2;
}
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 19ac872a6624..09002387987e 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -538,12 +538,6 @@ static int xsk_generic_xmit(struct sock *sk)
goto out;
}
- skb = xsk_build_skb(xs, &desc);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto out;
- }
-
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
* if there is space in it. This avoids having to implement
@@ -552,11 +546,19 @@ static int xsk_generic_xmit(struct sock *sk)
spin_lock_irqsave(&xs->pool->cq_lock, flags);
if (xskq_prod_reserve(xs->pool->cq)) {
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
- kfree_skb(skb);
goto out;
}
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ skb = xsk_build_skb(xs, &desc);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
+ xskq_prod_cancel(xs->pool->cq);
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ goto out;
+ }
+
err = __dev_direct_xmit(skb, xs->queue_id);
if (err == NETDEV_TX_BUSY) {
/* Tell user-space to retry the send */