summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/progs/profiler.inc.h
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-01-06 06:15:32 +0300
committerJakub Kicinski <kuba@kernel.org>2024-01-06 06:15:32 +0300
commit8158a50f9058ce882a6a020c41cef6bae2c3eae3 (patch)
tree7b0eef570ee9a02971813e074afa674a54e8e403 /tools/testing/selftests/bpf/progs/profiler.inc.h
parent795fd9342c628839ac9c3f3133d063ce2e577ea7 (diff)
parent5fe4ee6ae187523f710f1b93024437a073d88b17 (diff)
downloadlinux-8158a50f9058ce882a6a020c41cef6bae2c3eae3.tar.xz
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2024-01-05 We've added 40 non-merge commits during the last 2 day(s) which contain a total of 73 files changed, 1526 insertions(+), 951 deletions(-). The main changes are: 1) Fix a memory leak when streaming AF_UNIX sockets were inserted into multiple sockmap slots/maps, from John Fastabend. 2) Fix gotol in s390 BPF JIT with large offsets, from Ilya Leoshkevich. 3) Fix reattachment branch in bpf_tracing_prog_attach() and reject the request if there is no valid attach_btf, from Jiri Olsa. 4) Remove deprecated bpfilter kernel leftovers given the project is developed in user space (https://github.com/facebook/bpfilter), from Quentin Deslandes. 5) Relax tracing BPF program recursive attach rules given right now it is not possible to create tracing program call cycles, from Dmitrii Dolgov. 6) Fix excessive memory consumption for the bpf_global_percpu_ma for systems with a large number of CPUs, from Yonghong Song. 7) Small x86 BPF JIT cleanup to reuse emit_nops instead of open-coding memcpy of x86_nops, from Leon Hwang. 8) Follow-up for libbpf to support __arg_ctx global function argument tag semantics to complement the merged kernel side, from Andrii Nakryiko. 9) Introduce "volatile compare" macros for BPF selftests in order to make the latter more robust against compiler optimization, from Alexei Starovoitov. 10) Small simplification in verifier's size checking of helper accesses along with additional selftests, from Andrei Matei. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (40 commits) selftests/bpf: Test re-attachment fix for bpf_tracing_prog_attach bpf: Fix re-attachment branch in bpf_tracing_prog_attach selftests/bpf: Add test for recursive attachment of tracing progs bpf: Relax tracing prog recursive attach rules bpf, x86: Use emit_nops to replace memcpy x86_nops selftests/bpf: Test gotol with large offsets selftests/bpf: Double the size of test_loader log s390/bpf: Fix gotol with large offsets bpfilter: remove bpfilter bpf: Remove unnecessary cpu == 0 check in memalloc selftests/bpf: add __arg_ctx BTF rewrite test selftests/bpf: add arg:ctx cases to test_global_funcs tests libbpf: implement __arg_ctx fallback logic libbpf: move BTF loading step after relocation step libbpf: move exception callbacks assignment logic into relocation step libbpf: use stable map placeholder FDs libbpf: don't rely on map->fd as an indicator of map being created libbpf: use explicit map reuse flag to skip map creation steps libbpf: make uniform use of btf__fd() accessor inside libbpf selftests/bpf: Add a selftest with > 512-byte percpu allocation size ... ==================== Link: https://lore.kernel.org/r/20240105170105.21070-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/progs/profiler.inc.h')
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h68
1 files changed, 20 insertions, 48 deletions
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 897061930cb7..de3b6e4e4d0a 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -7,6 +7,7 @@
#include "profiler.h"
#include "err.h"
+#include "bpf_experimental.h"
#ifndef NULL
#define NULL 0
@@ -132,7 +133,7 @@ struct {
} disallowed_exec_inodes SEC(".maps");
#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+#define ARRAY_SIZE(arr) (int)(sizeof(arr) / sizeof(arr[0]))
#endif
static INLINE bool IS_ERR(const void* ptr)
@@ -221,8 +222,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
return payload;
if (cgroup_node == cgroup_root_node)
*root_pos = payload - payload_start;
- if (filepart_length <= MAX_PATH) {
- barrier_var(filepart_length);
+ if (bpf_cmp_likely(filepart_length, <=, MAX_PATH)) {
payload += filepart_length;
}
cgroup_node = BPF_CORE_READ(cgroup_node, parent);
@@ -305,9 +305,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
size_t cgroup_root_length =
bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(root_kernfs, name));
- barrier_var(cgroup_root_length);
- if (cgroup_root_length <= MAX_PATH) {
- barrier_var(cgroup_root_length);
+ if (bpf_cmp_likely(cgroup_root_length, <=, MAX_PATH)) {
cgroup_data->cgroup_root_length = cgroup_root_length;
payload += cgroup_root_length;
}
@@ -315,9 +313,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
size_t cgroup_proc_length =
bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(proc_kernfs, name));
- barrier_var(cgroup_proc_length);
- if (cgroup_proc_length <= MAX_PATH) {
- barrier_var(cgroup_proc_length);
+ if (bpf_cmp_likely(cgroup_proc_length, <=, MAX_PATH)) {
cgroup_data->cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length;
}
@@ -347,9 +343,7 @@ static INLINE void* populate_var_metadata(struct var_metadata_t* metadata,
metadata->comm_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
- barrier_var(comm_length);
- if (comm_length <= TASK_COMM_LEN) {
- barrier_var(comm_length);
+ if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
metadata->comm_length = comm_length;
payload += comm_length;
}
@@ -494,10 +488,9 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
filepart_length =
bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(filp_dentry, d_name.name));
- barrier_var(filepart_length);
- if (filepart_length > MAX_PATH)
+ bpf_nop_mov(filepart_length);
+ if (bpf_cmp_unlikely(filepart_length, >, MAX_PATH))
break;
- barrier_var(filepart_length);
payload += filepart_length;
length += filepart_length;
@@ -579,9 +572,7 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
size_t sysctl_val_length = bpf_probe_read_kernel_str(payload,
CTL_MAXNAME, buf);
- barrier_var(sysctl_val_length);
- if (sysctl_val_length <= CTL_MAXNAME) {
- barrier_var(sysctl_val_length);
+ if (bpf_cmp_likely(sysctl_val_length, <=, CTL_MAXNAME)) {
sysctl_data->sysctl_val_length = sysctl_val_length;
payload += sysctl_val_length;
}
@@ -590,9 +581,7 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(filp, f_path.dentry,
d_name.name));
- barrier_var(sysctl_path_length);
- if (sysctl_path_length <= MAX_PATH) {
- barrier_var(sysctl_path_length);
+ if (bpf_cmp_likely(sysctl_path_length, <=, MAX_PATH)) {
sysctl_data->sysctl_path_length = sysctl_path_length;
payload += sysctl_path_length;
}
@@ -645,7 +634,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
- if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
+ if (past_kill_data != NULL && past_kill_data->kill_target_pid == (pid_t)tpid) {
bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
past_kill_data);
void* payload = kill_data->payload;
@@ -658,9 +647,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
kill_data->kill_target_cgroup_proc_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
- barrier_var(comm_length);
- if (comm_length <= TASK_COMM_LEN) {
- barrier_var(comm_length);
+ if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
kill_data->kill_target_name_length = comm_length;
payload += comm_length;
}
@@ -669,9 +656,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
bpf_probe_read_kernel_str(payload,
KILL_TARGET_LEN,
BPF_CORE_READ(proc_kernfs, name));
- barrier_var(cgroup_proc_length);
- if (cgroup_proc_length <= KILL_TARGET_LEN) {
- barrier_var(cgroup_proc_length);
+ if (bpf_cmp_likely(cgroup_proc_length, <=, KILL_TARGET_LEN)) {
kill_data->kill_target_cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length;
}
@@ -731,9 +716,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
const char* filename = BPF_CORE_READ(bprm, filename);
size_t bin_path_length =
bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename);
- barrier_var(bin_path_length);
- if (bin_path_length <= MAX_FILENAME_LEN) {
- barrier_var(bin_path_length);
+ if (bpf_cmp_likely(bin_path_length, <=, MAX_FILENAME_LEN)) {
proc_exec_data->bin_path_length = bin_path_length;
payload += bin_path_length;
}
@@ -743,8 +726,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
unsigned int cmdline_length = probe_read_lim(payload, arg_start,
arg_end - arg_start, MAX_ARGS_LEN);
- if (cmdline_length <= MAX_ARGS_LEN) {
- barrier_var(cmdline_length);
+ if (bpf_cmp_likely(cmdline_length, <=, MAX_ARGS_LEN)) {
proc_exec_data->cmdline_length = cmdline_length;
payload += cmdline_length;
}
@@ -821,9 +803,7 @@ int kprobe_ret__do_filp_open(struct pt_regs* ctx)
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
@@ -876,17 +856,13 @@ int BPF_KPROBE(kprobe__vfs_link,
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(old_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->src_filepath_length = len;
}
len = read_absolute_file_path_from_dentry(new_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
@@ -936,16 +912,12 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH,
oldname);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->src_filepath_length = len;
}
len = read_absolute_file_path_from_dentry(dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}