summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-11-10 02:44:47 +0300
committerJakub Kicinski <kuba@kernel.org>2021-11-10 02:44:48 +0300
commitfceb07950a7aac43d52d8c6ef580399a8b9b68fe (patch)
tree90953da74ef7db8fea74c57844931e79c3e9f2de /tools
parent9758aba8542bb43029d077303d05df1d00a8dbb5 (diff)
parentb2c4618162ec615a15883a804cce7e27afecfa58 (diff)
downloadlinux-fceb07950a7aac43d52d8c6ef580399a8b9b68fe.tar.xz
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2021-11-09 We've added 7 non-merge commits during the last 3 day(s) which contain a total of 10 files changed, 174 insertions(+), 48 deletions(-). The main changes are: 1) Various sockmap fixes, from John and Jussi. 2) Fix out-of-bound issue with bpf_pseudo_func, from Martin. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf, sockmap: sk_skb data_end access incorrect when src_reg = dst_reg bpf: sockmap, strparser, and tls are reusing qdisc_skb_cb and colliding bpf, sockmap: Fix race in ingress receive verdict with redirect to self bpf, sockmap: Remove unhash handler for BPF sockmap usage bpf, sockmap: Use stricter sk state checks in sk_lookup_assign bpf: selftest: Trigger a DCE on the whole subprog bpf: Stop caching subprog index in the bpf_pseudo_func insn ==================== Link: https://lore.kernel.org/r/20211109215702.38350-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_array_map_elem.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
index df918b2469da..52f6995ff29c 100644
--- a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
+++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
@@ -23,6 +23,16 @@ struct callback_ctx {
int output;
};
+const volatile int bypass_unused = 1;
+
+static __u64
+unused_subprog(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output = 0;
+ return 1;
+}
+
static __u64
check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
struct callback_ctx *data)
@@ -54,6 +64,8 @@ int test_pkt_access(struct __sk_buff *skb)
data.output = 0;
bpf_for_each_map_elem(&arraymap, check_array_elem, &data, 0);
+ if (!bypass_unused)
+ bpf_for_each_map_elem(&arraymap, unused_subprog, &data, 0);
arraymap_output = data.output;
bpf_for_each_map_elem(&percpu_map, check_percpu_elem, (void *)0, 0);