summaryrefslogtreecommitdiff
path: root/samples/bpf
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2022-09-03 00:10:46 +0300
committerDaniel Borkmann <daniel@iogearbox.net>2022-09-05 16:33:05 +0300
commit89dc8d0c38e0df27e580876a1681a55c686a51ff (patch)
treebc6607523f6ca1f49178bc1a7d5af506a53b1047 /samples/bpf
parent37521bffdd2d1efcb1dbdfd3ee89584c8943421c (diff)
downloadlinux-89dc8d0c38e0df27e580876a1681a55c686a51ff.tar.xz
samples/bpf: Reduce syscall overhead in map_perf_test.
Make map_perf_test for preallocated and non-preallocated hash map spend more time inside bpf program to focus performance analysis on the speed of update/lookup/delete operations performed by bpf program. It makes 'perf report' of bpf_mem_alloc look like: 11.76% map_perf_test [k] _raw_spin_lock_irqsave 11.26% map_perf_test [k] htab_map_update_elem 9.70% map_perf_test [k] _raw_spin_lock 9.47% map_perf_test [k] htab_map_delete_elem 8.57% map_perf_test [k] memcpy_erms 5.58% map_perf_test [k] alloc_htab_elem 4.09% map_perf_test [k] __htab_map_lookup_elem 3.44% map_perf_test [k] syscall_exit_to_user_mode 3.13% map_perf_test [k] lookup_nulls_elem_raw 3.05% map_perf_test [k] migrate_enable 3.04% map_perf_test [k] memcmp 2.67% map_perf_test [k] unit_free 2.39% map_perf_test [k] lookup_elem_raw Reduce default iteration count as well to make 'map_perf_test' quick enough even on debug kernels. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-5-alexei.starovoitov@gmail.com
Diffstat (limited to 'samples/bpf')
-rw-r--r--samples/bpf/map_perf_test_kern.c44
-rw-r--r--samples/bpf/map_perf_test_user.c2
2 files changed, 29 insertions, 17 deletions
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 8773f22b6a98..7342c5b2f278 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -108,11 +108,14 @@ int stress_hmap(struct pt_regs *ctx)
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
long *value;
+ int i;
- bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
- value = bpf_map_lookup_elem(&hash_map, &key);
- if (value)
- bpf_map_delete_elem(&hash_map, &key);
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&hash_map, &key);
+ if (value)
+ bpf_map_delete_elem(&hash_map, &key);
+ }
return 0;
}
@@ -123,11 +126,14 @@ int stress_percpu_hmap(struct pt_regs *ctx)
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
long *value;
+ int i;
- bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
- value = bpf_map_lookup_elem(&percpu_hash_map, &key);
- if (value)
- bpf_map_delete_elem(&percpu_hash_map, &key);
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&percpu_hash_map, &key);
+ if (value)
+ bpf_map_delete_elem(&percpu_hash_map, &key);
+ }
return 0;
}
@@ -137,11 +143,14 @@ int stress_hmap_alloc(struct pt_regs *ctx)
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
long *value;
+ int i;
- bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
- value = bpf_map_lookup_elem(&hash_map_alloc, &key);
- if (value)
- bpf_map_delete_elem(&hash_map_alloc, &key);
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&hash_map_alloc, &key);
+ if (value)
+ bpf_map_delete_elem(&hash_map_alloc, &key);
+ }
return 0;
}
@@ -151,11 +160,14 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
u32 key = bpf_get_current_pid_tgid();
long init_val = 1;
long *value;
+ int i;
- bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
- value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
- if (value)
- bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
+ if (value)
+ bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
+ }
return 0;
}
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index b6fc174ab1f2..1bb53f4b29e1 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -72,7 +72,7 @@ static int test_flags = ~0;
static uint32_t num_map_entries;
static uint32_t inner_lru_hash_size;
static int lru_hash_lookup_test_entries = 32;
-static uint32_t max_cnt = 1000000;
+static uint32_t max_cnt = 10000;
static int check_test_flags(enum test_type t)
{