summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-05-16 04:28:44 +0300
committerDavid S. Miller <davem@davemloft.net>2019-05-16 04:28:44 +0300
commitc7d5ec26ea4adf450d9ab2b794e7735761a93af1 (patch)
treea2c0e405c6cd4c90dcd3eda665e1488a98564341 /net
parent858f5017446764e8bca0b29589a3b164186ae471 (diff)
parent5fa2ca7c4a3fc176f31b495e1a704862d8188b53 (diff)
downloadlinux-c7d5ec26ea4adf450d9ab2b794e7735761a93af1.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2019-05-16 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix a use after free in __dev_map_entry_free(), from Eric. 2) Several sockmap related bug fixes: a splat in strparser if it was never initialized, remove duplicate ingress msg list purging which can race, fix msg->sg.size accounting upon skb to msg conversion, and last but not least fix a timeout bug in tcp_bpf_wait_data(), from John. 3) Fix LRU map to avoid messing with eviction heuristics upon syscall lookup, e.g. map walks from user space side will then lead to eviction of just recently created entries on updates as it would mark all map entries, from Daniel. 4) Don't bail out when libbpf feature probing fails. Also various smaller fixes to flow_dissector test, from Stanislav. 5) Fix missing brackets for BTF_INT_OFFSET() in UAPI, from Gary. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/skmsg.c7
-rw-r--r--net/ipv4/tcp_bpf.c7
2 files changed, 9 insertions, 5 deletions
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index cc94d921476c..93bffaad2135 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -411,6 +411,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
sk_mem_charge(sk, skb->len);
copied = skb->len;
msg->sg.start = 0;
+ msg->sg.size = copied;
msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
msg->skb = skb;
@@ -554,8 +555,10 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
/* No sk_callback_lock since already detached. */
- strp_stop(&psock->parser.strp);
- strp_done(&psock->parser.strp);
+
+ /* Parser has been stopped */
+ if (psock->progs.skb_parser)
+ strp_done(&psock->parser.strp);
cancel_work_sync(&psock->work);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 1bb7321a256d..3d1e15401384 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -27,7 +27,10 @@ static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
int flags, long timeo, int *err)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int ret;
+ int ret = 0;
+
+ if (!timeo)
+ return ret;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -528,8 +531,6 @@ static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
{
struct sk_psock_link *link;
- sk_psock_cork_free(psock);
- __sk_psock_purge_ingress_msg(psock);
while ((link = sk_psock_link_pop(psock))) {
sk_psock_unlink(sk, link);
sk_psock_free_link(link);