summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-09-01 04:44:24 +0300
committerJakub Kicinski <kuba@kernel.org>2023-09-01 04:44:24 +0300
commitddaa935d33fcd37961a71291b2eedf294ee8b924 (patch)
treeeb9bababbd7dd9295ca6bcb4ee01baf94ff5a069 /net/core
parent8aae7625ff3f0bd5484d01f1b8d5af82e44bec2d (diff)
parentbe8e754cbfac698d6304bb8382c8d18ac74424d3 (diff)
downloadlinux-ddaa935d33fcd37961a71291b2eedf294ee8b924.tar.xz
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2023-08-31 We've added 15 non-merge commits during the last 3 day(s) which contain a total of 17 files changed, 468 insertions(+), 97 deletions(-). The main changes are: 1) BPF selftest fixes: one flake and one related to clang18 testing, from Yonghong Song. 2) Fix a d_path BPF selftest failure after fast-forward from Linus' tree, from Jiri Olsa. 3) Fix a preempt_rt splat in sockmap when using raw_spin_lock_t, from John Fastabend. 4) Fix a xsk_diag_fill use-after-free race during socket cleanup, from Magnus Karlsson. 5) Fix xsk_build_skb to address a buggy dereference of an ERR_PTR(), from Tirthendu Sarkar. 6) Fix a bpftool build warning when compiled with -Wtype-limits, from Yafang Shao. 7) Several misc fixes and cleanups in standardization docs, from David Vernet. 8) Fix BPF selftest install to consider no_alu32/cpuv4/bpf-gcc flavors, from Björn Töpel. 9) Annotate a data race in bpf_long_memcpy for KCSAN, from Daniel Borkmann. 10) Extend documentation with a description for CO-RE relocations, from Eduard Zingerman. 11) Fix several invalid escape sequence warnings in bpf_doc.py script, from Vishal Chourasia. 12) Fix the instruction set doc wrt offset of BPF-to-BPF call, from Will Hawkins. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: selftests/bpf: Include build flavors for install target bpf: Annotate bpf_long_memcpy with data_race selftests/bpf: Fix d_path test bpf, docs: Fix invalid escape sequence warnings in bpf_doc.py xsk: Fix xsk_diag use-after-free error during socket cleanup bpf, docs: s/eBPF/BPF in standards documents bpf, docs: Add abi.rst document to standardization subdirectory bpf, docs: Move linux-notes.rst to root bpf docs tree bpf, sockmap: Fix preempt_rt splat when using raw_spin_lock_t docs/bpf: Add description for CO-RE relocations bpf, docs: Correct source of offset for program-local call selftests/bpf: Fix flaky cgroup_iter_sleepable subtest xsk: Fix xsk_build_skb() error: 'skb' dereferencing possible ERR_PTR() bpftool: Fix build warnings with -Wtype-limits bpf: Prevent inlining of bpf_fentry_test7() ==================== Link: https://lore.kernel.org/r/20230831210019.14417-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/sock_map.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 8f07fea39d9e..cb11750b1df5 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -18,7 +18,7 @@ struct bpf_stab {
struct bpf_map map;
struct sock **sks;
struct sk_psock_progs progs;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
#define SOCK_CREATE_FLAG_MASK \
@@ -44,7 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&stab->map, attr);
- raw_spin_lock_init(&stab->lock);
+ spin_lock_init(&stab->lock);
stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
sizeof(struct sock *),
@@ -411,7 +411,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock *sk;
int err = 0;
- raw_spin_lock_bh(&stab->lock);
+ spin_lock_bh(&stab->lock);
sk = *psk;
if (!sk_test || sk_test == sk)
sk = xchg(psk, NULL);
@@ -421,7 +421,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
else
err = -EINVAL;
- raw_spin_unlock_bh(&stab->lock);
+ spin_unlock_bh(&stab->lock);
return err;
}
@@ -487,7 +487,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
psock = sk_psock(sk);
WARN_ON_ONCE(!psock);
- raw_spin_lock_bh(&stab->lock);
+ spin_lock_bh(&stab->lock);
osk = stab->sks[idx];
if (osk && flags == BPF_NOEXIST) {
ret = -EEXIST;
@@ -501,10 +501,10 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
stab->sks[idx] = sk;
if (osk)
sock_map_unref(osk, &stab->sks[idx]);
- raw_spin_unlock_bh(&stab->lock);
+ spin_unlock_bh(&stab->lock);
return 0;
out_unlock:
- raw_spin_unlock_bh(&stab->lock);
+ spin_unlock_bh(&stab->lock);
if (psock)
sk_psock_put(sk, psock);
out_free:
@@ -835,7 +835,7 @@ struct bpf_shtab_elem {
struct bpf_shtab_bucket {
struct hlist_head head;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
struct bpf_shtab {
@@ -910,7 +910,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
* is okay since it's going away only after RCU grace period.
* However, we need to check whether it's still present.
*/
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
elem->key, map->key_size);
if (elem_probe && elem_probe == elem) {
@@ -918,7 +918,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
sock_map_unref(elem->sk, elem);
sock_hash_free_elem(htab, elem);
}
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
}
static long sock_hash_delete_elem(struct bpf_map *map, void *key)
@@ -932,7 +932,7 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
if (elem) {
hlist_del_rcu(&elem->node);
@@ -940,7 +940,7 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
sock_hash_free_elem(htab, elem);
ret = 0;
}
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
return ret;
}
@@ -1000,7 +1000,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
if (elem && flags == BPF_NOEXIST) {
ret = -EEXIST;
@@ -1026,10 +1026,10 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
sock_map_unref(elem->sk, elem);
sock_hash_free_elem(htab, elem);
}
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
return 0;
out_unlock:
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
sk_psock_put(sk, psock);
out_free:
sk_psock_free_link(link);
@@ -1115,7 +1115,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
for (i = 0; i < htab->buckets_num; i++) {
INIT_HLIST_HEAD(&htab->buckets[i].head);
- raw_spin_lock_init(&htab->buckets[i].lock);
+ spin_lock_init(&htab->buckets[i].lock);
}
return &htab->map;
@@ -1147,11 +1147,11 @@ static void sock_hash_free(struct bpf_map *map)
* exists, psock exists and holds a ref to socket. That
* lets us to grab a socket ref too.
*/
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
hlist_for_each_entry(elem, &bucket->head, node)
sock_hold(elem->sk);
hlist_move_list(&bucket->head, &unlink_list);
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
/* Process removed entries out of atomic context to
* block for socket lock before deleting the psock's