summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorLorenz Bauer <lmb@cloudflare.com>2020-01-10 16:23:36 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-01-23 10:22:49 +0300
commit4921b2b1caaf720f30979b13f73e2fc3cd0182d1 (patch)
treed5c13bc0ff4937c6140d738219d5f6af9b71fa5c /net/core
parenta6c89cdf72c10e56e71693bed0fa5f74c784edb5 (diff)
downloadlinux-4921b2b1caaf720f30979b13f73e2fc3cd0182d1.tar.xz
net: bpf: Don't leak time wait and request sockets
commit 2e012c74823629d9db27963c79caa3f5b2010746 upstream. It's possible to leak time wait and request sockets via the following BPF pseudo code:   sk = bpf_skc_lookup_tcp(...) if (sk) bpf_sk_release(sk) If sk->sk_state is TCP_NEW_SYN_RECV or TCP_TIME_WAIT the refcount taken by bpf_skc_lookup_tcp is not undone by bpf_sk_release. This is because sk_flags is re-used for other data in both kinds of sockets. The check !sock_flag(sk, SOCK_RCU_FREE) therefore returns a bogus result. Check that sk_flags is valid by calling sk_fullsock. Skip checking SOCK_RCU_FREE if we already know that sk is not a full socket. Fixes: edbf8c01de5a ("bpf: add skc_lookup_tcp helper") Fixes: f7355a6c0497 ("bpf: Check sk_fullsock() before returning from bpf_sk_lookup()") Signed-off-by: Lorenz Bauer <lmb@cloudflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Link: https://lore.kernel.org/bpf/20200110132336.26099-1-lmb@cloudflare.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index a0b68cbda7bc..1a78d64096bb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5306,8 +5306,7 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
- if (!sock_flag(sk, SOCK_RCU_FREE))
- sock_gen_put(sk);
+ sock_gen_put(sk);
return NULL;
}
}
@@ -5344,8 +5343,7 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
- if (!sock_flag(sk, SOCK_RCU_FREE))
- sock_gen_put(sk);
+ sock_gen_put(sk);
return NULL;
}
}
@@ -5412,7 +5410,8 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
BPF_CALL_1(bpf_sk_release, struct sock *, sk)
{
- if (!sock_flag(sk, SOCK_RCU_FREE))
+ /* Only full sockets have sk->sk_flags. */
+ if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
sock_gen_put(sk);
return 0;
}