From c21de5fc5ffde70b21e4f370aebad79a7d7bdc0d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 11 Jul 2023 16:24:00 -0700 Subject: selftests/bpf: extend existing map resize tests for per-cpu use case Add a per-cpu array resizing use case and demonstrate how bpf_get_smp_processor_id() can be used to directly access proper data with no extra checks. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230711232400.1658562-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/global_map_resize.c | 14 +++++++++++--- tools/testing/selftests/bpf/progs/test_global_map_resize.c | 8 ++++++-- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c index fd41425d2e5c..56b5baef35c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c +++ b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c @@ -22,7 +22,7 @@ static void global_map_resize_bss_subtest(void) struct test_global_map_resize *skel; struct bpf_map *map; const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2; - size_t array_len, actual_sz; + size_t array_len, actual_sz, new_sz; skel = test_global_map_resize__open(); if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) @@ -42,6 +42,10 @@ static void global_map_resize_bss_subtest(void) if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize")) goto teardown; + new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); + err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); + ASSERT_OK(err, "percpu_arr_resize"); + /* set the expected number of elements based on the resized array */ array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]); if (!ASSERT_GT(array_len, 1, "array_len")) @@ -84,11 +88,11 @@ teardown: static void global_map_resize_data_subtest(void) { - int err; struct test_global_map_resize *skel; struct bpf_map *map; const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2; - size_t array_len, actual_sz; + size_t array_len, actual_sz, new_sz; + int err; skel = test_global_map_resize__open(); if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) @@ -108,6 +112,10 @@ static void global_map_resize_data_subtest(void) if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize")) goto teardown; + new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); + err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); + ASSERT_OK(err, "percpu_arr_resize"); + /* set the expected number of elements based on the resized array */ array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]); if (!ASSERT_GT(array_len, 1, "array_len")) diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c index 2588f2384246..1fbb73d3e5d5 100644 --- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -29,13 +29,16 @@ int my_int SEC(".data.non_array"); int my_array_first[1] SEC(".data.array_not_last"); int my_int_last SEC(".data.array_not_last"); +int percpu_arr[1] SEC(".data.percpu_arr"); + SEC("tp/syscalls/sys_enter_getpid") int bss_array_sum(void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; - sum = 0; + /* this will be zero, we just rely on verifier not rejecting this */ + sum = percpu_arr[bpf_get_smp_processor_id()]; for (size_t i = 0; i < bss_array_len; ++i) sum += array[i]; @@ -49,7 +52,8 @@ int data_array_sum(void *ctx) if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; - sum = 0; + /* this will be zero, we just rely on verifier not rejecting this */ + sum = percpu_arr[bpf_get_smp_processor_id()]; for (size_t i = 0; i < data_array_len; ++i) sum += my_array[i]; -- cgit v1.2.3