diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
194 files changed, 29144 insertions, 734 deletions
diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c new file mode 100644 index 000000000000..e4bfbba6c193 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +long create_errs = 0; +long create_cnts = 0; +long kmalloc_cnts = 0; +__u32 bench_pid = 0; + +struct storage { + __u8 data[64]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct storage); +} sk_storage_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct storage); +} task_storage_map SEC(".maps"); + +SEC("raw_tp/kmalloc") +int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, + int node) +{ + __sync_fetch_and_add(&kmalloc_cnts, 1); + + return 0; +} + +SEC("tp_btf/sched_process_fork") +int BPF_PROG(sched_process_fork, struct task_struct *parent, struct task_struct *child) +{ + struct storage *stg; + + if (parent->tgid != bench_pid) + return 0; + + stg = bpf_task_storage_get(&task_storage_map, child, NULL, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (stg) + __sync_fetch_and_add(&create_cnts, 1); + else + __sync_fetch_and_add(&create_errs, 1); + + return 0; +} + +SEC("lsm.s/socket_post_create") +int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, + int protocol, int kern) +{ + struct storage *stg; + __u32 pid; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != bench_pid) + return 0; + + stg = bpf_sk_storage_get(&sk_storage_map, sock->sk, NULL, + BPF_LOCAL_STORAGE_GET_F_CREATE); + + if (stg) + __sync_fetch_and_add(&create_cnts, 1); + else + __sync_fetch_and_add(&create_errs, 1); + + return 0; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index a20c5ed5e454..b04e092fac94 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -337,7 +337,7 @@ PROG(IPV6)(struct __sk_buff *skb) keys->ip_proto = ip6h->nexthdr; keys->flow_label = ip6_flowlabel(ip6h); - if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) + if (keys->flow_label && keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) return export_flow_keys(keys, BPF_OK); return parse_ipv6_proto(skb, ip6h->nexthdr); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c index 9ba14c37bbcc..5ddcc46fd886 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c @@ -33,7 +33,6 @@ int dump_ksym(struct bpf_iter__ksym *ctx) __u32 seq_num = ctx->meta->seq_num; unsigned long value; char type; - int ret; if (!iter) return 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c index b77adfd55d73..ec7f91850dec 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c @@ -42,7 +42,6 @@ int change_tcp_cc(struct bpf_iter__tcp *ctx) char cur_cc[TCP_CA_NAME_MAX]; struct tcp_sock *tp; struct sock *sk; - int ret; if (!bpf_tcp_sk(ctx->sk_common)) return 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_loop.c b/tools/testing/selftests/bpf/progs/bpf_loop.c index de1fc82d2710..1d194455b109 100644 --- a/tools/testing/selftests/bpf/progs/bpf_loop.c +++ b/tools/testing/selftests/bpf/progs/bpf_loop.c @@ -138,8 +138,6 @@ static int callback_set_0f(int i, void *ctx) SEC("fentry/" SYS_PREFIX "sys_nanosleep") int prog_non_constant_callback(void *ctx) { - struct callback_ctx data = {}; - if (bpf_get_current_pid_tgid() >> 32 != pid) return 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 14e28f991451..d3c1217ba79a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -2,17 +2,89 @@ #ifndef __BPF_MISC_H__ #define __BPF_MISC_H__ +/* This set of attributes controls behavior of the + * test_loader.c:test_loader__run_subtests(). + * + * The test_loader sequentially loads each program in a skeleton. + * Programs could be loaded in privileged and unprivileged modes. + * - __success, __failure, __msg imply privileged mode; + * - __success_unpriv, __failure_unpriv, __msg_unpriv imply + * unprivileged mode. + * If combination of privileged and unprivileged attributes is present + * both modes are used. If none are present privileged mode is implied. + * + * See test_loader.c:drop_capabilities() for exact set of capabilities + * that differ between privileged and unprivileged modes. + * + * For test filtering purposes the name of the program loaded in + * unprivileged mode is derived from the usual program name by adding + * `@unpriv' suffix. + * + * __msg Message expected to be found in the verifier log. + * Multiple __msg attributes could be specified. + * __msg_unpriv Same as __msg but for unprivileged mode. + * + * __success Expect program load success in privileged mode. + * __success_unpriv Expect program load success in unprivileged mode. + * + * __failure Expect program load failure in privileged mode. + * __failure_unpriv Expect program load failure in unprivileged mode. + * + * __retval Execute the program using BPF_PROG_TEST_RUN command, + * expect return value to match passed parameter: + * - a decimal number + * - a hexadecimal number, when starts from 0x + * - literal INT_MIN + * - literal POINTER_VALUE (see definition below) + * - literal TEST_DATA_LEN (see definition below) + * __retval_unpriv Same, but load program in unprivileged mode. + * + * __description Text to be used instead of a program name for display + * and filtering purposes. + * + * __log_level Log level to use for the program, numeric value expected. + * + * __flag Adds one flag use for the program, the following values are valid: + * - BPF_F_STRICT_ALIGNMENT; + * - BPF_F_TEST_RND_HI32; + * - BPF_F_TEST_STATE_FREQ; + * - BPF_F_SLEEPABLE; + * - BPF_F_XDP_HAS_FRAGS; + * - A numeric value. + * Multiple __flag attributes could be specified, the final flags + * value is derived by applying binary "or" to all specified values. + * + * __auxiliary Annotated program is not a separate test, but used as auxiliary + * for some other test cases and should always be loaded. + * __auxiliary_unpriv Same, but load program in unprivileged mode. + */ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) +#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) +#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) +#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) +#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) +#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) +#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val))) +#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val))) +#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) +#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) #define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory" #define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory" #define __imm(name) [name]"i"(name) +#define __imm_const(name, expr) [name]"i"(expr) #define __imm_addr(name) [name]"i"(&name) +#define __imm_ptr(name) [name]"p"(&name) +#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) + +/* Magic constants used with __retval() */ +#define POINTER_VALUE 0xcafe4all +#define TEST_DATA_LEN 64 #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 @@ -52,5 +124,7 @@ #define FUNC_REG_ARG_CNT 5 #endif +/* make it look to compiler like value is read and written */ +#define __sink(expr) asm volatile("" : "+g"(expr)) #endif diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index 7653df1bc787..50f95ec61165 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -4,7 +4,7 @@ #include <bpf/bpf_helpers.h> struct map_value { - struct prog_test_ref_kfunc __kptr_ref *ptr; + struct prog_test_ref_kfunc __kptr *ptr; }; struct { @@ -52,7 +52,6 @@ int leak_prog(void *ctx) { struct prog_test_ref_kfunc *p; struct map_value *v; - unsigned long sl; v = bpf_map_lookup_elem(&array_map, &(int){0}); if (!v) diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c index 88638315c582..ac86a8a61605 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c @@ -66,7 +66,6 @@ static inline int is_allowed_peer_cg(struct __sk_buff *skb, SEC("cgroup_skb/ingress") int ingress_lookup(struct __sk_buff *skb) { - __u32 serv_port_key = 0; struct ipv6hdr ip6h; struct tcphdr tcph; diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h index 7d30855bfe78..22914a70db54 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h @@ -10,7 +10,7 @@ #include <bpf/bpf_tracing.h> struct __cgrps_kfunc_map_value { - struct cgroup __kptr_ref * cgrp; + struct cgroup __kptr * cgrp; }; struct hash_map { @@ -21,9 +21,11 @@ struct hash_map { } __cgrps_kfunc_map SEC(".maps"); struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; -struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym; void bpf_cgroup_release(struct cgroup *p) __ksym; struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp) { @@ -60,6 +62,11 @@ static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp) } acquired = bpf_cgroup_acquire(cgrp); + if (!acquired) { + bpf_map_delete_elem(&__cgrps_kfunc_map, &id); + return -ENOENT; + } + old = bpf_kptr_xchg(&v->cgrp, acquired); if (old) { bpf_cgroup_release(old); diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index 4ad7fe24966d..0fa564a5cc5b 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -41,6 +41,23 @@ int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path /* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */ acquired = bpf_cgroup_acquire(v->cgrp); + if (acquired) + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired; + + acquired = bpf_cgroup_acquire(cgrp); + /* + * Can't invoke bpf_cgroup_release() without checking the return value + * of bpf_cgroup_acquire(). + */ bpf_cgroup_release(acquired); return 0; @@ -54,7 +71,8 @@ int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path) /* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */ acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp); - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -67,7 +85,8 @@ int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp) /* Can't acquire an untrusted struct cgroup * pointer. */ acquired = bpf_cgroup_acquire(cgrp); - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -80,7 +99,8 @@ int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char /* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */ acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp); - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -93,9 +113,8 @@ int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path) /* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */ acquired = bpf_cgroup_acquire(NULL); - if (!acquired) - return 0; - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -109,57 +128,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat acquired = bpf_cgroup_acquire(cgrp); /* Acquired cgroup is never released. */ - - return 0; -} - -SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path) -{ - struct cgroup *kptr; - - /* Cannot use bpf_cgroup_kptr_get() on a non-kptr, even on a valid cgroup. */ - kptr = bpf_cgroup_kptr_get(&cgrp); - if (!kptr) - return 0; - - bpf_cgroup_release(kptr); - - return 0; -} - -SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path) -{ - struct cgroup *kptr, *acquired; - - acquired = bpf_cgroup_acquire(cgrp); - - /* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */ - kptr = bpf_cgroup_kptr_get(&acquired); - bpf_cgroup_release(acquired); - if (!kptr) - return 0; - - bpf_cgroup_release(kptr); - - return 0; -} - -SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path) -{ - struct cgroup *kptr; - - /* Cannot use bpf_cgroup_kptr_get() on a NULL pointer. */ - kptr = bpf_cgroup_kptr_get(NULL); - if (!kptr) - return 0; - - bpf_cgroup_release(kptr); + __sink(acquired); return 0; } @@ -185,8 +154,8 @@ int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("Unreleased reference") -int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) +__failure __msg("must be referenced or trusted") +int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path) { struct cgroup *kptr; struct __cgrps_kfunc_map_value *v; @@ -195,17 +164,18 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) if (!v) return 0; - kptr = bpf_cgroup_kptr_get(&v->cgrp); - if (!kptr) - return 0; - - /* Kptr acquired above is never released. */ + bpf_rcu_read_lock(); + kptr = v->cgrp; + if (kptr) + /* Can't release a cgroup kptr stored in a map. */ + bpf_cgroup_release(kptr); + bpf_rcu_read_unlock(); return 0; } SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value *v; @@ -233,7 +203,7 @@ int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value local, *v; @@ -255,6 +225,8 @@ int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) return -ENOENT; acquired = bpf_cgroup_acquire(cgrp); + if (!acquired) + return -ENOENT; old = bpf_kptr_xchg(&v->cgrp, acquired); diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c index 0c23ea32df9f..5354455a01be 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c @@ -38,7 +38,10 @@ int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char return 0; acquired = bpf_cgroup_acquire(cgrp); - bpf_cgroup_release(acquired); + if (!acquired) + err = 1; + else + bpf_cgroup_release(acquired); return 0; } @@ -61,7 +64,7 @@ int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *pa SEC("tp_btf/cgroup_mkdir") int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path) { - struct cgroup *kptr; + struct cgroup *kptr, *cg; struct __cgrps_kfunc_map_value *v; long status; @@ -80,6 +83,16 @@ int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path) return 0; } + kptr = v->cgrp; + if (!kptr) { + err = 4; + return 0; + } + + cg = bpf_cgroup_ancestor(kptr, 1); + if (cg) /* verifier only check */ + bpf_cgroup_release(cg); + kptr = bpf_kptr_xchg(&v->cgrp, NULL); if (!kptr) { err = 3; @@ -113,13 +126,11 @@ int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path) return 0; } - kptr = bpf_cgroup_kptr_get(&v->cgrp); - if (!kptr) { + bpf_rcu_read_lock(); + kptr = v->cgrp; + if (!kptr) err = 3; - return 0; - } - - bpf_cgroup_release(kptr); + bpf_rcu_read_unlock(); return 0; } @@ -168,3 +179,45 @@ int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path) return 0; } + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_from_id, struct cgroup *cgrp, const char *path) +{ + struct cgroup *parent, *res; + u64 parent_cgid; + + if (!is_test_kfunc_task()) + return 0; + + /* @cgrp's ID is not visible yet, let's test with the parent */ + parent = bpf_cgroup_ancestor(cgrp, cgrp->level - 1); + if (!parent) { + err = 1; + return 0; + } + + parent_cgid = parent->kn->id; + bpf_cgroup_release(parent); + + res = bpf_cgroup_from_id(parent_cgid); + if (!res) { + err = 2; + return 0; + } + + bpf_cgroup_release(res); + + if (res != parent) { + err = 3; + return 0; + } + + res = bpf_cgroup_from_id((u64)-1); + if (res) { + bpf_cgroup_release(res); + err = 4; + return 0; + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c index 6652d18465b2..8aeba1b75c83 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c @@ -84,7 +84,6 @@ int BPF_PROG(update_cookie_tracing, struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct socket_cookie *p; - struct tcp_sock *tcp_sk; if (uaddr->sa_family != AF_INET6) return 0; diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c index 2d11ed528b6f..4c7844e1dbfa 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c @@ -24,7 +24,6 @@ void bpf_rcu_read_unlock(void) __ksym; SEC("?iter.s/cgroup") int cgroup_iter(struct bpf_iter__cgroup *ctx) { - struct seq_file *seq = ctx->meta->seq; struct cgroup *cgrp = ctx->cgroup; long *ptr; @@ -49,7 +48,7 @@ int no_rcu_lock(void *ctx) if (task->pid != target_pid) return 0; - /* ptr_to_btf_id semantics. should work. */ + /* task->cgroups is untrusted in sleepable prog outside of RCU CS */ cgrp = task->cgroups->dfl_cgrp; ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); @@ -71,7 +70,7 @@ int yes_rcu_lock(void *ctx) bpf_rcu_read_lock(); cgrp = task->cgroups->dfl_cgrp; - /* cgrp is untrusted and cannot pass to bpf_cgrp_storage_get() helper. */ + /* cgrp is trusted under RCU CS */ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); if (ptr) cgroup_id = cgrp->kn->id; diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index ec25371de789..7ef49ec04838 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -32,7 +32,7 @@ #define IFNAMSIZ 16 #endif -__attribute__ ((noinline)) +__attribute__ ((noinline)) __weak int do_bind(struct bpf_sock_addr *ctx) { struct sockaddr_in sa = {}; diff --git a/tools/testing/selftests/bpf/progs/core_kern.c b/tools/testing/selftests/bpf/progs/core_kern.c index 2715fe27d4cf..004f2acef2eb 100644 --- a/tools/testing/selftests/bpf/progs/core_kern.c +++ b/tools/testing/selftests/bpf/progs/core_kern.c @@ -77,7 +77,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 14; diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index ad34f3b602be..0c5b785a93e4 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -9,8 +9,11 @@ int err; +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) +private(MASK) static struct bpf_cpumask __kptr * global_mask; + struct __cpumask_map_value { - struct bpf_cpumask __kptr_ref * cpumask; + struct bpf_cpumask __kptr * cpumask; }; struct array_map { @@ -23,7 +26,6 @@ struct array_map { struct bpf_cpumask *bpf_cpumask_create(void) __ksym; void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; -struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumask) __ksym; u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; @@ -51,6 +53,9 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym u32 bpf_cpumask_any(const struct cpumask *src) __ksym; u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + static inline const struct cpumask *cast(struct bpf_cpumask *cpumask) { return (const struct cpumask *)cpumask; diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index 33e8e86dd090..a9bf6ea336cf 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -23,6 +23,7 @@ int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags) struct bpf_cpumask *cpumask; cpumask = create_cpumask(); + __sink(cpumask); /* cpumask is never released. */ return 0; @@ -44,13 +45,14 @@ int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flag } SEC("tp_btf/task_newtask") -__failure __msg("bpf_cpumask_acquire args#0 expected pointer to STRUCT bpf_cpumask") +__failure __msg("must be referenced") int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; /* Can't acquire a non-struct bpf_cpumask. */ cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr); + __sink(cpumask); return 0; } @@ -63,6 +65,7 @@ int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) /* Can't set the CPU of a non-struct bpf_cpumask. */ bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr); + __sink(cpumask); return 0; } @@ -92,35 +95,98 @@ int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_ } SEC("tp_btf/task_newtask") -__failure __msg("Unreleased reference") -int BPF_PROG(test_kptr_get_no_release, struct task_struct *task, u64 clone_flags) +__failure __msg("NULL pointer passed to trusted arg0") +int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) { - struct bpf_cpumask *cpumask; - struct __cpumask_map_value *v; + /* NULL passed to KF_TRUSTED_ARGS kfunc. */ + bpf_cpumask_empty(NULL); - cpumask = create_cpumask(); - if (!cpumask) + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("R2 must be a rcu pointer") +int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *local, *prev; + + local = create_cpumask(); + if (!local) return 0; - if (cpumask_map_insert(cpumask)) + prev = bpf_kptr_xchg(&global_mask, local); + if (prev) { + bpf_cpumask_release(prev); + err = 3; return 0; + } - v = cpumask_map_value_lookup(); - if (!v) + bpf_rcu_read_lock(); + local = global_mask; + if (!local) { + err = 4; + bpf_rcu_read_unlock(); return 0; + } - cpumask = bpf_cpumask_kptr_get(&v->cpumask); + bpf_rcu_read_unlock(); + + /* RCU region is exited before calling KF_RCU kfunc. */ + + bpf_cpumask_test_cpu(0, (const struct cpumask *)local); - /* cpumask is never released. */ return 0; } SEC("tp_btf/task_newtask") -__failure __msg("NULL pointer passed to trusted arg0") -int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) +__failure __msg("NULL pointer passed to trusted arg1") +int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags) { - /* NULL passed to KF_TRUSTED_ARGS kfunc. */ - bpf_cpumask_empty(NULL); + struct bpf_cpumask *local, *prev; + + local = create_cpumask(); + if (!local) + return 0; + + prev = bpf_kptr_xchg(&global_mask, local); + if (prev) { + bpf_cpumask_release(prev); + err = 3; + return 0; + } + + bpf_rcu_read_lock(); + local = global_mask; + + /* No NULL check is performed on global cpumask kptr. */ + bpf_cpumask_test_cpu(0, (const struct cpumask *)local); + + bpf_rcu_read_unlock(); + + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("Possibly NULL pointer passed to helper arg2") +int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *prev, *curr; + + curr = bpf_cpumask_create(); + if (!curr) + return 0; + + prev = bpf_kptr_xchg(&global_mask, curr); + if (prev) + bpf_cpumask_release(prev); + + bpf_rcu_read_lock(); + curr = global_mask; + /* PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU passed to bpf_kptr_xchg() */ + prev = bpf_kptr_xchg(&global_mask, curr); + bpf_rcu_read_unlock(); + if (prev) + bpf_cpumask_release(prev); return 0; } diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 1d38bc65d4b0..2fcdd7f68ac7 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -353,7 +353,6 @@ SEC("tp_btf/task_newtask") int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; - struct __cpumask_map_value *v; cpumask = create_cpumask(); if (!cpumask) @@ -396,31 +395,34 @@ int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_fla } SEC("tp_btf/task_newtask") -int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_flags) +int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) { - struct bpf_cpumask *cpumask; - struct __cpumask_map_value *v; + struct bpf_cpumask *local, *prev; - cpumask = create_cpumask(); - if (!cpumask) + if (!is_test_task()) return 0; - if (cpumask_map_insert(cpumask)) { + local = create_cpumask(); + if (!local) + return 0; + + prev = bpf_kptr_xchg(&global_mask, local); + if (prev) { + bpf_cpumask_release(prev); err = 3; return 0; } - v = cpumask_map_value_lookup(); - if (!v) { + bpf_rcu_read_lock(); + local = global_mask; + if (!local) { err = 4; + bpf_rcu_read_unlock(); return 0; } - cpumask = bpf_cpumask_kptr_get(&v->cpumask); - if (cpumask) - bpf_cpumask_release(cpumask); - else - err = 5; + bpf_cpumask_test_cpu(0, (const struct cpumask *)local); + bpf_rcu_read_unlock(); return 0; } diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index aa5b69354b91..759eb5c245cd 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -5,7 +5,9 @@ #include <string.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include <linux/if_ether.h> #include "bpf_misc.h" +#include "bpf_kfuncs.h" char _license[] SEC("license") = "GPL"; @@ -244,11 +246,32 @@ done: return 0; } +/* A data slice can't be accessed out of bounds */ +SEC("?tc") +__failure __msg("value is outside of the allowed memory range") +int data_slice_out_of_bounds_skb(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + /* this should fail */ + *(__u8*)(hdr + 1) = 1; + + return SK_PASS; +} + SEC("?raw_tp") __failure __msg("value is outside of the allowed memory range") int data_slice_out_of_bounds_map_value(void *ctx) { - __u32 key = 0, map_val; + __u32 map_val; struct bpf_dynptr ptr; void *data; @@ -365,7 +388,6 @@ int data_slice_missing_null_check2(void *ctx) /* this should fail */ *data2 = 3; -done: bpf_ringbuf_discard_dynptr(&ptr, 0); return 0; } @@ -399,7 +421,6 @@ int invalid_helper2(void *ctx) /* this should fail */ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0); - return 0; } @@ -418,6 +439,7 @@ int invalid_write1(void *ctx) /* this should fail */ data = bpf_dynptr_data(&ptr, 0, 1); + __sink(data); return 0; } @@ -1044,6 +1066,193 @@ int dynptr_read_into_slot(void *ctx) return 0; } +/* bpf_dynptr_slice()s are read-only and cannot be written to */ +SEC("?tc") +__failure __msg("R0 cannot write into rdonly_mem") +int skb_invalid_slice_write(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + /* this should fail */ + hdr->h_proto = 1; + + return SK_PASS; +} + +/* The read-only data slice is invalidated whenever a helper changes packet data */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice1(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + val = hdr->h_proto; + + if (bpf_skb_pull_data(skb, skb->len)) + return SK_DROP; + + /* this should fail */ + val = hdr->h_proto; + + return SK_PASS; +} + +/* The read-write data slice is invalidated whenever a helper changes packet data */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice2(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + hdr->h_proto = 123; + + if (bpf_skb_pull_data(skb, skb->len)) + return SK_DROP; + + /* this should fail */ + hdr->h_proto = 1; + + return SK_PASS; +} + +/* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice3(struct __sk_buff *skb) +{ + char write_data[64] = "hello there, world!!"; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + val = hdr->h_proto; + + bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + + /* this should fail */ + val = hdr->h_proto; + + return SK_PASS; +} + +/* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice4(struct __sk_buff *skb) +{ + char write_data[64] = "hello there, world!!"; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + hdr->h_proto = 123; + + bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + + /* this should fail */ + hdr->h_proto = 1; + + return SK_PASS; +} + +/* The read-only data slice is invalidated whenever a helper changes packet data */ +SEC("?xdp") +__failure __msg("invalid mem access 'scalar'") +int xdp_invalid_data_slice1(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + val = hdr->h_proto; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr))) + return XDP_DROP; + + /* this should fail */ + val = hdr->h_proto; + + return XDP_PASS; +} + +/* The read-write data slice is invalidated whenever a helper changes packet data */ +SEC("?xdp") +__failure __msg("invalid mem access 'scalar'") +int xdp_invalid_data_slice2(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + hdr->h_proto = 9; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr))) + return XDP_DROP; + + /* this should fail */ + hdr->h_proto = 1; + + return XDP_PASS; +} + +/* Only supported prog type can create skb-type dynptrs */ +SEC("?raw_tp") +__failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed") +int skb_invalid_ctx(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_skb(ctx, 0, &ptr); + + return 0; +} + /* Reject writes to dynptr slot for uninit arg */ SEC("?raw_tp") __failure __msg("potential write to dynptr at off=-16") @@ -1061,6 +1270,61 @@ int uninit_write_into_slot(void *ctx) return 0; } +/* Only supported prog type can create xdp-type dynptrs */ +SEC("?raw_tp") +__failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed") +int xdp_invalid_ctx(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_xdp(ctx, 0, &ptr); + + return 0; +} + +__u32 hdr_size = sizeof(struct ethhdr); +/* Can't pass in variable-sized len to bpf_dynptr_slice */ +SEC("?tc") +__failure __msg("unbounded memory access") +int dynptr_slice_var_len1(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + /* this should fail */ + hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size); + if (!hdr) + return SK_DROP; + + return SK_PASS; +} + +/* Can't pass in variable-sized len to bpf_dynptr_slice */ +SEC("?tc") +__failure __msg("must be a known constant") +int dynptr_slice_var_len2(struct __sk_buff *skb) +{ + char buffer[sizeof(struct ethhdr)] = {}; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + if (hdr_size <= sizeof(buffer)) { + /* this should fail */ + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size); + if (!hdr) + return SK_DROP; + hdr->h_proto = 12; + } + + return SK_PASS; +} + static int callback(__u32 index, void *data) { *(__u32 *)data = 123; @@ -1092,3 +1356,25 @@ int invalid_data_slices(void *ctx) return 0; } + +/* Program types that don't allow writes to packet data should fail if + * bpf_dynptr_slice_rdwr is called + */ +SEC("cgroup_skb/ingress") +__failure __msg("the prog does not allow writes to packet data") +int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) +{ + char buffer[sizeof(struct ethhdr)] = {}; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + /* this should fail since cgroup_skb doesn't allow + * changing packet data + */ + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + __sink(hdr); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index 35db7c6c1fc7..b2fa6c47ecc0 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -5,6 +5,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "bpf_kfuncs.h" #include "errno.h" char _license[] SEC("license") = "GPL"; @@ -30,11 +31,11 @@ struct { __type(value, __u32); } array_map SEC(".maps"); -SEC("tp/syscalls/sys_enter_nanosleep") +SEC("?tp/syscalls/sys_enter_nanosleep") int test_read_write(void *ctx) { char write_data[64] = "hello there, world!!"; - char read_data[64] = {}, buf[64] = {}; + char read_data[64] = {}; struct bpf_dynptr ptr; int i; @@ -61,8 +62,8 @@ int test_read_write(void *ctx) return 0; } -SEC("tp/syscalls/sys_enter_nanosleep") -int test_data_slice(void *ctx) +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_data(void *ctx) { __u32 key = 0, val = 235, *map_val; struct bpf_dynptr ptr; @@ -131,7 +132,7 @@ static int ringbuf_callback(__u32 index, void *data) return 0; } -SEC("tp/syscalls/sys_enter_nanosleep") +SEC("?tp/syscalls/sys_enter_nanosleep") int test_ringbuf(void *ctx) { struct bpf_dynptr ptr; @@ -163,3 +164,46 @@ done: bpf_ringbuf_discard_dynptr(&ptr, 0); return 0; } + +SEC("?cgroup_skb/egress") +int test_skb_readonly(struct __sk_buff *skb) +{ + __u8 write_data[2] = {1, 2}; + struct bpf_dynptr ptr; + int ret; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* since cgroup skbs are read only, writes should fail */ + ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + if (ret != -EINVAL) { + err = 2; + return 1; + } + + return 1; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_data(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + __u64 *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This should return NULL. Must use bpf_dynptr_slice API */ + data = bpf_dynptr_data(&ptr, 0, 1); + if (data) { + err = 2; + return 1; + } + + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/err.h b/tools/testing/selftests/bpf/progs/err.h new file mode 100644 index 000000000000..d66d283d9e59 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/err.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ERR_H__ +#define __ERR_H__ + +#define MAX_ERRNO 4095 +#define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO + +static inline int IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} + +static inline long PTR_ERR(const void *ptr) +{ + return (long) ptr; +} + +#endif /* __ERR_H__ */ diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c index 4547b059d487..983b7c233382 100644 --- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -120,8 +120,6 @@ int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var) void *data = (void *)(long)skb->data; struct ipv6hdr ip6, *ip6p; int ifindex = skb->ifindex; - __u32 eth_proto; - __u32 nh_off; /* check that BPF extension can read packet via direct packet access */ if (data + 14 + sizeof(ip6) > data_end) diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c index 6dab9cffda13..7ba9a428f228 100644 --- a/tools/testing/selftests/bpf/progs/find_vma_fail1.c +++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c @@ -14,7 +14,7 @@ static long write_vma(struct task_struct *task, struct vm_area_struct *vma, struct callback_ctx *data) { /* writing to vma, which is illegal */ - vma->vm_flags |= 0x55; + vma->vm_start = 0xffffffffff600000; return 0; } diff --git a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c index bb2a77c5b62b..370a0e1922e0 100644 --- a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c @@ -23,7 +23,7 @@ struct { SEC("freplace/handle_kprobe") int new_handle_kprobe(struct pt_regs *ctx) { - struct hmap_elem zero = {}, *val; + struct hmap_elem *val; int key = 0; val = bpf_map_lookup_elem(&hash_map, &key); diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c new file mode 100644 index 000000000000..be16143ae292 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -0,0 +1,723 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <stdbool.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +static volatile int zero = 0; + +int my_pid; +int arr[256]; +int small_arr[16] SEC(".data.small_arr"); + +#ifdef REAL_TEST +#define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0 +#else +#define MY_PID_GUARD() ({ }) +#endif + +SEC("?raw_tp") +__failure __msg("math between map_value pointer and register with unbounded min value is not allowed") +int iter_err_unsafe_c_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i = zero; /* obscure initial value of i */ + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 1000); + while ((v = bpf_iter_num_next(&it))) { + i++; + } + bpf_iter_num_destroy(&it); + + small_arr[i] = 123; /* invalid */ + + return 0; +} + +SEC("?raw_tp") +__failure __msg("unbounded memory access") +int iter_err_unsafe_asm_loop(const void *ctx) +{ + struct bpf_iter_num it; + + MY_PID_GUARD(); + + asm volatile ( + "r6 = %[zero];" /* iteration counter */ + "r1 = %[it];" /* iterator state */ + "r2 = 0;" + "r3 = 1000;" + "r4 = 1;" + "call %[bpf_iter_num_new];" + "loop:" + "r1 = %[it];" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto out;" + "r6 += 1;" + "goto loop;" + "out:" + "r1 = %[it];" + "call %[bpf_iter_num_destroy];" + "r1 = %[small_arr];" + "r2 = r6;" + "r2 <<= 2;" + "r1 += r2;" + "*(u32 *)(r1 + 0) = r6;" /* invalid */ + : + : [it]"r"(&it), + [small_arr]"p"(small_arr), + [zero]"p"(zero), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy) + : __clobber_common, "r6" + ); + + return 0; +} + +SEC("raw_tp") +__success +int iter_while_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 3); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_while_loop_auto_cleanup(const void *ctx) +{ + __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it; + int *v; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 3); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); + } + /* (!) no explicit bpf_iter_num_destroy() */ + + return 0; +} + +SEC("raw_tp") +__success +int iter_for_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 5, 10); + for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_bpf_for_each_macro(const void *ctx) +{ + int *v; + + MY_PID_GUARD(); + + bpf_for_each(num, v, 5, 10) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); + } + + return 0; +} + +SEC("raw_tp") +__success +int iter_bpf_for_macro(const void *ctx) +{ + int i; + + MY_PID_GUARD(); + + bpf_for(i, 5, 10) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", i); + } + + return 0; +} + +SEC("raw_tp") +__success +int iter_pragma_unroll_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 2); +#pragma nounroll + for (i = 0; i < 3; i++) { + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); + } + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_manual_unroll_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 100, 200); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_multiple_sequential_loops(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 3); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + bpf_iter_num_new(&it, 5, 10); + for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + bpf_iter_num_new(&it, 0, 2); +#pragma nounroll + for (i = 0; i < 3; i++) { + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); + } + bpf_iter_num_destroy(&it); + + bpf_iter_num_new(&it, 100, 200); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_limit_cond_break_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i = 0, sum = 0; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 10); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v); + sum += *v; + + i++; + if (i > 3) + break; + } + bpf_iter_num_destroy(&it); + + bpf_printk("ITER_SIMPLE: sum=%d\n", sum); + + return 0; +} + +SEC("raw_tp") +__success +int iter_obfuscate_counter(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + /* Make i's initial value unknowable for verifier to prevent it from + * pruning if/else branch inside the loop body and marking i as precise. + */ + int i = zero; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 10); + while ((v = bpf_iter_num_next(&it))) { + int x; + + i += 1; + + /* If we initialized i as `int i = 0;` above, verifier would + * track that i becomes 1 on first iteration after increment + * above, and here verifier would eagerly prune else branch + * and mark i as precise, ruining open-coded iterator logic + * completely, as each next iteration would have a different + * *precise* value of i, and thus there would be no + * convergence of state. This would result in reaching maximum + * instruction limit, no matter what the limit is. + */ + if (i == 1) + x = 123; + else + x = i * 3 + 1; + + bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x); + + sum += x; + } + bpf_iter_num_destroy(&it); + + bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum); + + return 0; +} + +SEC("raw_tp") +__success +int iter_search_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, *elem = NULL; + bool found = false; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 10); + + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_SEARCH_LOOP: v=%d", *v); + + if (*v == 2) { + found = true; + elem = v; + barrier_var(elem); + } + } + + /* should fail to verify if bpf_iter_num_destroy() is here */ + + if (found) + /* here found element will be wrong, we should have copied + * value to a variable, but here we want to make sure we can + * access memory after the loop anyways + */ + bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem); + else + bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n"); + + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_array_fill(const void *ctx) +{ + int sum, i; + + MY_PID_GUARD(); + + bpf_for(i, 0, ARRAY_SIZE(arr)) { + arr[i] = i * 2; + } + + sum = 0; + bpf_for(i, 0, ARRAY_SIZE(arr)) { + sum += arr[i]; + } + + bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256); + + return 0; +} + +static int arr2d[4][5]; +static int arr2d_row_sums[4]; +static int arr2d_col_sums[5]; + +SEC("raw_tp") +__success +int iter_nested_iters(const void *ctx) +{ + int sum, row, col; + + MY_PID_GUARD(); + + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d[row][col] = row * col; + } + } + + /* zero-initialize sums */ + sum = 0; + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + arr2d_row_sums[row] = 0; + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d_col_sums[col] = 0; + } + + /* calculate sums */ + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + sum += arr2d[row][col]; + arr2d_row_sums[row] += arr2d[row][col]; + arr2d_col_sums[col] += arr2d[row][col]; + } + } + + bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum); + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]); + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s", + col, arr2d_col_sums[col], + col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); + } + + return 0; +} + +SEC("raw_tp") +__success +int iter_nested_deeply_iters(const void *ctx) +{ + int sum = 0; + + MY_PID_GUARD(); + + bpf_repeat(10) { + bpf_repeat(10) { + bpf_repeat(10) { + bpf_repeat(10) { + bpf_repeat(10) { + sum += 1; + } + } + } + } + /* validate that we can break from inside bpf_repeat() */ + break; + } + + return sum; +} + +static __noinline void fill_inner_dimension(int row) +{ + int col; + + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d[row][col] = row * col; + } +} + +static __noinline int sum_inner_dimension(int row) +{ + int sum = 0, col; + + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + sum += arr2d[row][col]; + arr2d_row_sums[row] += arr2d[row][col]; + arr2d_col_sums[col] += arr2d[row][col]; + } + + return sum; +} + +SEC("raw_tp") +__success +int iter_subprog_iters(const void *ctx) +{ + int sum, row, col; + + MY_PID_GUARD(); + + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + fill_inner_dimension(row); + } + + /* zero-initialize sums */ + sum = 0; + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + arr2d_row_sums[row] = 0; + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d_col_sums[col] = 0; + } + + /* calculate sums */ + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + sum += sum_inner_dimension(row); + } + + bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum); + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d", + row, arr2d_row_sums[row]); + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s", + col, arr2d_col_sums[col], + col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); + } + + return 0; +} + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1000); +} arr_map SEC(".maps"); + +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int iter_err_too_permissive1(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + + MY_PID_GUARD(); + + map_val = bpf_map_lookup_elem(&arr_map, &key); + if (!map_val) + return 0; + + bpf_repeat(1000000) { + map_val = NULL; + } + + *map_val = 123; + + return 0; +} + +SEC("?raw_tp") +__failure __msg("invalid mem access 'map_value_or_null'") +int iter_err_too_permissive2(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + + MY_PID_GUARD(); + + map_val = bpf_map_lookup_elem(&arr_map, &key); + if (!map_val) + return 0; + + bpf_repeat(1000000) { + map_val = bpf_map_lookup_elem(&arr_map, &key); + } + + *map_val = 123; + + return 0; +} + +SEC("?raw_tp") +__failure __msg("invalid mem access 'map_value_or_null'") +int iter_err_too_permissive3(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + bool found = false; + + MY_PID_GUARD(); + + bpf_repeat(1000000) { + map_val = bpf_map_lookup_elem(&arr_map, &key); + found = true; + } + + if (found) + *map_val = 123; + + return 0; +} + +SEC("raw_tp") +__success +int iter_tricky_but_fine(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + bool found = false; + + MY_PID_GUARD(); + + bpf_repeat(1000000) { + map_val = bpf_map_lookup_elem(&arr_map, &key); + if (map_val) { + found = true; + break; + } + } + + if (found) + *map_val = 123; + + return 0; +} + +#define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0) + +SEC("raw_tp") +__success +int iter_stack_array_loop(const void *ctx) +{ + long arr1[16], arr2[16], sum = 0; + int i; + + MY_PID_GUARD(); + + /* zero-init arr1 and arr2 in such a way that verifier doesn't know + * it's all zeros; if we don't do that, we'll make BPF verifier track + * all combination of zero/non-zero stack slots for arr1/arr2, which + * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different + * states + */ + __bpf_memzero(arr1, sizeof(arr1)); + __bpf_memzero(arr2, sizeof(arr1)); + + /* validate that we can break and continue when using bpf_for() */ + bpf_for(i, 0, ARRAY_SIZE(arr1)) { + if (i & 1) { + arr1[i] = i; + continue; + } else { + arr2[i] = i; + break; + } + } + + bpf_for(i, 0, ARRAY_SIZE(arr1)) { + sum += arr1[i] + arr2[i]; + } + + return sum; +} + +#define ARR_SZ 16 + +static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul) +{ + int *t; + __u64 i; + + while ((t = bpf_iter_num_next(it))) { + i = *t; + if (i >= ARR_SZ) + break; + arr[i] = i * mul; + } +} + +static __noinline int sum(struct bpf_iter_num *it, int *arr) +{ + int *t, sum = 0;; + __u64 i; + + while ((t = bpf_iter_num_next(it))) { + i = *t; + if (i >= ARR_SZ) + break; + sum += arr[i]; + } + + return sum; +} + +SEC("raw_tp") +__success +int iter_pass_iter_ptr_to_subprog(const void *ctx) +{ + int arr1[ARR_SZ], arr2[ARR_SZ]; + struct bpf_iter_num it; + int n, sum1, sum2; + + MY_PID_GUARD(); + + /* fill arr1 */ + n = ARRAY_SIZE(arr1); + bpf_iter_num_new(&it, 0, n); + fill(&it, arr1, 2); + bpf_iter_num_destroy(&it); + + /* fill arr2 */ + n = ARRAY_SIZE(arr2); + bpf_iter_num_new(&it, 0, n); + fill(&it, arr2, 10); + bpf_iter_num_destroy(&it); + + /* sum arr1 */ + n = ARRAY_SIZE(arr1); + bpf_iter_num_new(&it, 0, n); + sum1 = sum(&it, arr1); + bpf_iter_num_destroy(&it); + + /* sum arr2 */ + n = ARRAY_SIZE(arr2); + bpf_iter_num_new(&it, 0, n); + sum2 = sum(&it, arr2); + bpf_iter_num_destroy(&it); + + bpf_printk("sum1=%d, sum2=%d", sum1, sum2); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/iters_looping.c b/tools/testing/selftests/bpf/progs/iters_looping.c new file mode 100644 index 000000000000..05fa5ce7fc59 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_looping.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <errno.h> +#include <string.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +#define ITER_HELPERS \ + __imm(bpf_iter_num_new), \ + __imm(bpf_iter_num_next), \ + __imm(bpf_iter_num_destroy) + +SEC("?raw_tp") +__success +int force_clang_to_emit_btf_for_externs(void *ctx) +{ + /* we need this as a workaround to enforce compiler emitting BTF + * information for bpf_iter_num_{new,next,destroy}() kfuncs, + * as, apparently, it doesn't emit it for symbols only referenced from + * assembly (or cleanup attribute, for that matter, as well) + */ + bpf_repeat(0); + + return 0; +} + +SEC("?raw_tp") +__success +int consume_first_item_only(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* consume first item */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + "if r0 == 0 goto +1;" + "r0 = *(u32 *)(r0 + 0);" + + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("R0 invalid mem access 'scalar'") +int missing_null_check_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* consume first element */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + /* FAIL: deref with no NULL check */ + "r1 = *(u32 *)(r0 + 0);" + + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure +__msg("invalid access to memory, mem_size=4 off=0 size=8") +__msg("R0 min value is outside of the allowed memory range") +int wrong_sized_read_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* consume first element */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + "if r0 == 0 goto +1;" + /* FAIL: deref more than available 4 bytes */ + "r0 = *(u64 *)(r0 + 0);" + + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__success __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +int simplest_loop(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + "r6 = 0;" /* init sum */ + + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + + "1:" + /* consume next item */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + "if r0 == 0 goto 2f;" + "r0 = *(u32 *)(r0 + 0);" + "r6 += r0;" /* accumulate sum */ + "goto 1b;" + + "2:" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common, "r6" + ); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_num.c b/tools/testing/selftests/bpf/progs/iters_num.c new file mode 100644 index 000000000000..7a77a8daee0d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_num.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <limits.h> +#include <linux/errno.h> +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +const volatile __s64 exp_empty_zero = 0 + 1; +__s64 res_empty_zero; + +SEC("raw_tp/sys_enter") +int num_empty_zero(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, 0, 0) sum += i; + res_empty_zero = 1 + sum; + + return 0; +} + +const volatile __s64 exp_empty_int_min = 0 + 2; +__s64 res_empty_int_min; + +SEC("raw_tp/sys_enter") +int num_empty_int_min(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MIN, INT_MIN) sum += i; + res_empty_int_min = 2 + sum; + + return 0; +} + +const volatile __s64 exp_empty_int_max = 0 + 3; +__s64 res_empty_int_max; + +SEC("raw_tp/sys_enter") +int num_empty_int_max(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MAX, INT_MAX) sum += i; + res_empty_int_max = 3 + sum; + + return 0; +} + +const volatile __s64 exp_empty_minus_one = 0 + 4; +__s64 res_empty_minus_one; + +SEC("raw_tp/sys_enter") +int num_empty_minus_one(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, -1, -1) sum += i; + res_empty_minus_one = 4 + sum; + + return 0; +} + +const volatile __s64 exp_simple_sum = 9 * 10 / 2; +__s64 res_simple_sum; + +SEC("raw_tp/sys_enter") +int num_simple_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, 0, 10) sum += i; + res_simple_sum = sum; + + return 0; +} + +const volatile __s64 exp_neg_sum = -11 * 10 / 2; +__s64 res_neg_sum; + +SEC("raw_tp/sys_enter") +int num_neg_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, -10, 0) sum += i; + res_neg_sum = sum; + + return 0; +} + +const volatile __s64 exp_very_neg_sum = INT_MIN + (__s64)(INT_MIN + 1); +__s64 res_very_neg_sum; + +SEC("raw_tp/sys_enter") +int num_very_neg_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MIN, INT_MIN + 2) sum += i; + res_very_neg_sum = sum; + + return 0; +} + +const volatile __s64 exp_very_big_sum = (__s64)(INT_MAX - 1) + (__s64)(INT_MAX - 2); +__s64 res_very_big_sum; + +SEC("raw_tp/sys_enter") +int num_very_big_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MAX - 2, INT_MAX) sum += i; + res_very_big_sum = sum; + + return 0; +} + +const volatile __s64 exp_neg_pos_sum = -3; +__s64 res_neg_pos_sum; + +SEC("raw_tp/sys_enter") +int num_neg_pos_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, -3, 3) sum += i; + res_neg_pos_sum = sum; + + return 0; +} + +const volatile __s64 exp_invalid_range = -EINVAL; +__s64 res_invalid_range; + +SEC("raw_tp/sys_enter") +int num_invalid_range(const void *ctx) +{ + struct bpf_iter_num it; + + res_invalid_range = bpf_iter_num_new(&it, 1, 0); + bpf_iter_num_destroy(&it); + + return 0; +} + +const volatile __s64 exp_max_range = 0 + 10; +__s64 res_max_range; + +SEC("raw_tp/sys_enter") +int num_max_range(const void *ctx) +{ + struct bpf_iter_num it; + + res_max_range = 10 + bpf_iter_num_new(&it, 0, BPF_MAX_LOOPS); + bpf_iter_num_destroy(&it); + + return 0; +} + +const volatile __s64 exp_e2big_range = -E2BIG; +__s64 res_e2big_range; + +SEC("raw_tp/sys_enter") +int num_e2big_range(const void *ctx) +{ + struct bpf_iter_num it; + + res_e2big_range = bpf_iter_num_new(&it, -1, BPF_MAX_LOOPS); + bpf_iter_num_destroy(&it); + + return 0; +} + +const volatile __s64 exp_succ_elem_cnt = 10; +__s64 res_succ_elem_cnt; + +SEC("raw_tp/sys_enter") +int num_succ_elem_cnt(const void *ctx) +{ + struct bpf_iter_num it; + int cnt = 0, *v; + + bpf_iter_num_new(&it, 0, 10); + while ((v = bpf_iter_num_next(&it))) { + cnt++; + } + bpf_iter_num_destroy(&it); + + res_succ_elem_cnt = cnt; + + return 0; +} + +const volatile __s64 exp_overfetched_elem_cnt = 5; +__s64 res_overfetched_elem_cnt; + +SEC("raw_tp/sys_enter") +int num_overfetched_elem_cnt(const void *ctx) +{ + struct bpf_iter_num it; + int cnt = 0, *v, i; + + bpf_iter_num_new(&it, 0, 5); + for (i = 0; i < 10; i++) { + v = bpf_iter_num_next(&it); + if (v) + cnt++; + } + bpf_iter_num_destroy(&it); + + res_overfetched_elem_cnt = cnt; + + return 0; +} + +const volatile __s64 exp_fail_elem_cnt = 20 + 0; +__s64 res_fail_elem_cnt; + +SEC("raw_tp/sys_enter") +int num_fail_elem_cnt(const void *ctx) +{ + struct bpf_iter_num it; + int cnt = 0, *v, i; + + bpf_iter_num_new(&it, 100, 10); + for (i = 0; i < 10; i++) { + v = bpf_iter_num_next(&it); + if (v) + cnt++; + } + bpf_iter_num_destroy(&it); + + res_fail_elem_cnt = 20 + cnt; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c new file mode 100644 index 000000000000..d47e59aba6de --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include <errno.h> +#include <string.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +#define ITER_HELPERS \ + __imm(bpf_iter_num_new), \ + __imm(bpf_iter_num_next), \ + __imm(bpf_iter_num_destroy) + +SEC("?raw_tp") +__success +int force_clang_to_emit_btf_for_externs(void *ctx) +{ + /* we need this as a workaround to enforce compiler emitting BTF + * information for bpf_iter_num_{new,next,destroy}() kfuncs, + * as, apparently, it doesn't emit it for symbols only referenced from + * assembly (or cleanup attribute, for that matter, as well) + */ + bpf_repeat(0); + + return 0; +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)") +int create_and_destroy(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("Unreleased reference id=1") +int create_and_forget_to_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int destroy_without_creating_fail(void *ctx) +{ + /* init with zeros to stop verifier complaining about uninit stack */ + struct bpf_iter_num iter; + + asm volatile ( + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int compromise_iter_w_direct_write_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* directly write over first half of iter state */ + "*(u64 *)(%[iter] + 0) = r0;" + + /* (attempt to) destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("Unreleased reference id=1") +int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* directly write over first half of iter state */ + "*(u64 *)(%[iter] + 0) = r0;" + + /* don't destroy iter, leaking ref, which should fail */ + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int compromise_iter_w_helper_write_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* overwrite 8th byte with bpf_probe_read_kernel() */ + "r1 = %[iter];" + "r1 += 7;" + "r2 = 1;" + "r3 = 0;" /* NULL */ + "call %[bpf_probe_read_kernel];" + + /* (attempt to) destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel) + : __clobber_common + ); + + return 0; +} + +static __noinline void subprog_with_iter(void) +{ + struct bpf_iter_num iter; + + bpf_iter_num_new(&iter, 0, 1); + + return; +} + +SEC("?raw_tp") +__failure +/* ensure there was a call to subprog, which might happen without __noinline */ +__msg("returning from callee:") +__msg("Unreleased reference id=1") +int leak_iter_from_subprog_fail(void *ctx) +{ + subprog_with_iter(); + + return 0; +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)") +int valid_stack_reuse(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + + /* now reuse same stack slots */ + + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected uninitialized iter_num as arg #1") +int double_create_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* (attempt to) create iterator again */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int double_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + /* (attempt to) destroy iterator again */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int next_without_new_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* don't create iterator and try to iterate*/ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int next_after_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + /* don't create iterator and try to iterate*/ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("invalid read from stack") +int __naked read_from_iter_slot_fail(void) +{ + asm volatile ( + /* r6 points to struct bpf_iter_num on the stack */ + "r6 = r10;" + "r6 += -24;" + + /* create iterator */ + "r1 = r6;" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* attemp to leak bpf_iter_num state */ + "r7 = *(u64 *)(r6 + 0);" + "r8 = *(u64 *)(r6 + 8);" + + /* destroy iterator */ + "r1 = r6;" + "call %[bpf_iter_num_destroy];" + + /* leak bpf_iter_num state */ + "r0 = r7;" + "if r7 > r8 goto +1;" + "r0 = r8;" + "exit;" + : + : ITER_HELPERS + : __clobber_common, "r6", "r7", "r8" + ); +} + +int zero; + +SEC("?raw_tp") +__failure +__flag(BPF_F_TEST_STATE_FREQ) +__msg("Unreleased reference") +int stacksafe_should_not_conflate_stack_spill_and_iter(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* Create a fork in logic, with general setup as follows: + * - fallthrough (first) path is valid; + * - branch (second) path is invalid. + * Then depending on what we do in fallthrough vs branch path, + * we try to detect bugs in func_states_equal(), regsafe(), + * refsafe(), stack_safe(), and similar by tricking verifier + * into believing that branch state is a valid subset of + * a fallthrough state. Verifier should reject overall + * validation, unless there is a bug somewhere in verifier + * logic. + */ + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + + "if r6 > r7 goto bad;" /* fork */ + + /* spill r6 into stack slot of bpf_iter_num var */ + "*(u64 *)(%[iter] + 0) = r6;" + + "goto skip_bad;" + + "bad:" + /* create iterator in the same stack slot */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* but then forget about it and overwrite it back to r6 spill */ + "*(u64 *)(%[iter] + 0) = r6;" + + "skip_bad:" + "goto +0;" /* force checkpoint */ + + /* corrupt stack slots, if they are really dynptr */ + "*(u64 *)(%[iter] + 0) = r6;" + : + : __imm_ptr(iter), + __imm_addr(zero), + __imm(bpf_get_prandom_u32), + __imm(bpf_dynptr_from_mem), + ITER_HELPERS + : __clobber_common, "r6", "r7" + ); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c new file mode 100644 index 000000000000..3873fb6c292a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct bpf_iter_testmod_seq { + u64 :64; + u64 :64; +}; + +extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym; +extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym; +extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym; + +const volatile __s64 exp_empty = 0 + 1; +__s64 res_empty; + +SEC("raw_tp/sys_enter") +__success __log_level(2) +__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)") +__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)") +__msg("call bpf_iter_testmod_seq_destroy") +int testmod_seq_empty(const void *ctx) +{ + __s64 sum = 0, *i; + + bpf_for_each(testmod_seq, i, 1000, 0) sum += *i; + res_empty = 1 + sum; + + return 0; +} + +const volatile __s64 exp_full = 1000000; +__s64 res_full; + +SEC("raw_tp/sys_enter") +__success __log_level(2) +__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)") +__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)") +__msg("call bpf_iter_testmod_seq_destroy") +int testmod_seq_full(const void *ctx) +{ + __s64 sum = 0, *i; + + bpf_for_each(testmod_seq, i, 1000, 1000) sum += *i; + res_full = sum; + + return 0; +} + +const volatile __s64 exp_truncated = 10 * 1000000; +__s64 res_truncated; + +static volatile int zero = 0; + +SEC("raw_tp/sys_enter") +__success __log_level(2) +__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)") +__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)") +__msg("call bpf_iter_testmod_seq_destroy") +int testmod_seq_truncated(const void *ctx) +{ + __s64 sum = 0, *i; + int cnt = zero; + + bpf_for_each(testmod_seq, i, 10, 2000000) { + sum += *i; + cnt++; + if (cnt >= 1000000) + break; + } + res_truncated = sum; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c index 2d2e61470794..13f00ca2ed0a 100644 --- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c +++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c @@ -4,7 +4,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> -static struct prog_test_ref_kfunc __kptr_ref *v; +static struct prog_test_ref_kfunc __kptr *v; long total_sum = -1; extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c index b05571bc67d5..c4b49ceea967 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs1.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c @@ -5,6 +5,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> +#include "bpf_misc.h" /* weak and shared between two files */ const volatile int my_tid __weak; @@ -51,6 +52,7 @@ __weak int set_output_weak(int x) * cause problems for BPF static linker */ whatever = bpf_core_type_size(struct task_struct); + __sink(whatever); output_weak1 = x; return x; @@ -71,6 +73,7 @@ int BPF_PROG(handler1, struct pt_regs *regs, long id) /* make sure we have CO-RE relocations in main program */ whatever = bpf_core_type_size(struct task_struct); + __sink(whatever); set_output_val2(1000); set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */ diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c index ee7e3848ee4f..013ff0645f0c 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs2.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c @@ -5,6 +5,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> +#include "bpf_misc.h" /* weak and shared between both files */ const volatile int my_tid __weak; @@ -51,6 +52,7 @@ __weak int set_output_weak(int x) * cause problems for BPF static linker */ whatever = 2 * bpf_core_type_size(struct task_struct); + __sink(whatever); output_weak2 = x; return 2 * x; @@ -71,6 +73,7 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id) /* make sure we have CO-RE relocations in main program */ whatever = bpf_core_type_size(struct task_struct); + __sink(whatever); set_output_val1(2000); set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */ diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c index 4fa4a9b01bde..57440a554304 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.c +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -25,7 +25,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); bpf_obj_drop(f); return 3; } @@ -34,7 +34,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); bpf_obj_drop(f); return 4; } @@ -42,7 +42,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_lock(lock); f->data = 42; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); if (leave_in_map) return 0; @@ -51,7 +51,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_unlock(lock); if (!n) return 5; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 42) { bpf_obj_drop(f); return 6; @@ -59,14 +59,14 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_lock(lock); f->data = 13; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); bpf_spin_lock(lock); n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (!n) return 7; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 13) { bpf_obj_drop(f); return 8; @@ -77,7 +77,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 9; } @@ -85,7 +85,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 10; } return 0; @@ -119,8 +119,8 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea f[i + 1]->data = i + 1; bpf_spin_lock(lock); - bpf_list_push_front(head, &f[i]->node); - bpf_list_push_front(head, &f[i + 1]->node); + bpf_list_push_front(head, &f[i]->node2); + bpf_list_push_front(head, &f[i + 1]->node2); bpf_spin_unlock(lock); } @@ -130,13 +130,13 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea bpf_spin_unlock(lock); if (!n) return 3; - pf = container_of(n, struct foo, node); + pf = container_of(n, struct foo, node2); if (pf->data != (ARRAY_SIZE(f) - i - 1)) { bpf_obj_drop(pf); return 4; } bpf_spin_lock(lock); - bpf_list_push_back(head, &pf->node); + bpf_list_push_back(head, &pf->node2); bpf_spin_unlock(lock); } @@ -149,7 +149,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea bpf_spin_unlock(lock); if (!n) return 5; - pf = container_of(n, struct foo, node); + pf = container_of(n, struct foo, node2); if (pf->data != i) { bpf_obj_drop(pf); return 6; @@ -160,7 +160,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 7; } @@ -168,7 +168,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 8; } return 0; @@ -199,7 +199,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le bpf_spin_lock(lock); f->data = 42; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); if (leave_in_map) @@ -210,7 +210,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le bpf_spin_unlock(lock); if (!n) return 4; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 42) { bpf_obj_drop(f); return 5; @@ -313,7 +313,6 @@ SEC("tc") int map_list_push_pop_multiple(void *ctx) { struct map_value *v; - int ret; v = bpf_map_lookup_elem(&array_map, &(int){0}); if (!v) @@ -326,7 +325,6 @@ int inner_map_list_push_pop_multiple(void *ctx) { struct map_value *v; void *map; - int ret; map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); if (!map) @@ -352,7 +350,6 @@ SEC("tc") int map_list_in_list(void *ctx) { struct map_value *v; - int ret; v = bpf_map_lookup_elem(&array_map, &(int){0}); if (!v) @@ -365,7 +362,6 @@ int inner_map_list_in_list(void *ctx) { struct map_value *v; void *map; - int ret; map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); if (!map) diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h index 3fb2412552fc..c0f3609a7ffa 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.h +++ b/tools/testing/selftests/bpf/progs/linked_list.h @@ -22,7 +22,7 @@ struct foo { struct map_value { struct bpf_spin_lock lock; int data; - struct bpf_list_head head __contains(foo, node); + struct bpf_list_head head __contains(foo, node2); }; struct array_map { @@ -50,7 +50,7 @@ struct { #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; -private(A) struct bpf_list_head ghead __contains(foo, node); +private(A) struct bpf_list_head ghead __contains(foo, node2); private(B) struct bpf_spin_lock glock2; #endif diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c index 69cdc07cba13..f4c63daba229 100644 --- a/tools/testing/selftests/bpf/progs/linked_list_fail.c +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -73,22 +73,21 @@ CHECK(inner_map, pop_back, &iv->head); int test##_missing_lock_##op(void *ctx) \ { \ INIT; \ - void (*p)(void *, void *) = (void *)&bpf_list_##op; \ - p(hexpr, nexpr); \ + bpf_list_##op(hexpr, nexpr); \ return 0; \ } -CHECK(kptr, push_front, &f->head, b); -CHECK(kptr, push_back, &f->head, b); +CHECK(kptr, push_front, &f->head, &b->node); +CHECK(kptr, push_back, &f->head, &b->node); -CHECK(global, push_front, &ghead, f); -CHECK(global, push_back, &ghead, f); +CHECK(global, push_front, &ghead, &f->node2); +CHECK(global, push_back, &ghead, &f->node2); -CHECK(map, push_front, &v->head, f); -CHECK(map, push_back, &v->head, f); +CHECK(map, push_front, &v->head, &f->node2); +CHECK(map, push_back, &v->head, &f->node2); -CHECK(inner_map, push_front, &iv->head, f); -CHECK(inner_map, push_back, &iv->head, f); +CHECK(inner_map, push_front, &iv->head, &f->node2); +CHECK(inner_map, push_back, &iv->head, &f->node2); #undef CHECK @@ -135,32 +134,31 @@ CHECK_OP(pop_back); int test##_incorrect_lock_##op(void *ctx) \ { \ INIT; \ - void (*p)(void *, void*) = (void *)&bpf_list_##op; \ bpf_spin_lock(lexpr); \ - p(hexpr, nexpr); \ + bpf_list_##op(hexpr, nexpr); \ return 0; \ } #define CHECK_OP(op) \ - CHECK(kptr_kptr, op, &f1->lock, &f2->head, b); \ - CHECK(kptr_global, op, &f1->lock, &ghead, f); \ - CHECK(kptr_map, op, &f1->lock, &v->head, f); \ - CHECK(kptr_inner_map, op, &f1->lock, &iv->head, f); \ + CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \ + CHECK(kptr_global, op, &f1->lock, &ghead, &f->node2); \ + CHECK(kptr_map, op, &f1->lock, &v->head, &f->node2); \ + CHECK(kptr_inner_map, op, &f1->lock, &iv->head, &f->node2); \ \ - CHECK(global_global, op, &glock2, &ghead, f); \ - CHECK(global_kptr, op, &glock, &f1->head, b); \ - CHECK(global_map, op, &glock, &v->head, f); \ - CHECK(global_inner_map, op, &glock, &iv->head, f); \ + CHECK(global_global, op, &glock2, &ghead, &f->node2); \ + CHECK(global_kptr, op, &glock, &f1->head, &b->node); \ + CHECK(global_map, op, &glock, &v->head, &f->node2); \ + CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \ \ - CHECK(map_map, op, &v->lock, &v2->head, f); \ - CHECK(map_kptr, op, &v->lock, &f2->head, b); \ - CHECK(map_global, op, &v->lock, &ghead, f); \ - CHECK(map_inner_map, op, &v->lock, &iv->head, f); \ + CHECK(map_map, op, &v->lock, &v2->head, &f->node2); \ + CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \ + CHECK(map_global, op, &v->lock, &ghead, &f->node2); \ + CHECK(map_inner_map, op, &v->lock, &iv->head, &f->node2); \ \ - CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, f); \ - CHECK(inner_map_kptr, op, &iv->lock, &f2->head, b); \ - CHECK(inner_map_global, op, &iv->lock, &ghead, f); \ - CHECK(inner_map_map, op, &iv->lock, &v->head, f); + CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, &f->node2);\ + CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \ + CHECK(inner_map_global, op, &iv->lock, &ghead, &f->node2); \ + CHECK(inner_map_map, op, &iv->lock, &v->head, &f->node2); CHECK_OP(push_front); CHECK_OP(push_back); @@ -340,7 +338,7 @@ int direct_read_node(void *ctx) f = bpf_obj_new(typeof(*f)); if (!f) return 0; - return *(int *)&f->node; + return *(int *)&f->node2; } SEC("?tc") @@ -351,12 +349,12 @@ int direct_write_node(void *ctx) f = bpf_obj_new(typeof(*f)); if (!f) return 0; - *(int *)&f->node = 0; + *(int *)&f->node2 = 0; return 0; } static __always_inline -int use_after_unlock(void (*op)(void *head, void *node)) +int use_after_unlock(bool push_front) { struct foo *f; @@ -365,7 +363,10 @@ int use_after_unlock(void (*op)(void *head, void *node)) return 0; bpf_spin_lock(&glock); f->data = 42; - op(&ghead, &f->node); + if (push_front) + bpf_list_push_front(&ghead, &f->node2); + else + bpf_list_push_back(&ghead, &f->node2); bpf_spin_unlock(&glock); return f->data; @@ -374,17 +375,17 @@ int use_after_unlock(void (*op)(void *head, void *node)) SEC("?tc") int use_after_unlock_push_front(void *ctx) { - return use_after_unlock((void *)bpf_list_push_front); + return use_after_unlock(true); } SEC("?tc") int use_after_unlock_push_back(void *ctx) { - return use_after_unlock((void *)bpf_list_push_back); + return use_after_unlock(false); } static __always_inline -int list_double_add(void (*op)(void *head, void *node)) +int list_double_add(bool push_front) { struct foo *f; @@ -392,8 +393,13 @@ int list_double_add(void (*op)(void *head, void *node)) if (!f) return 0; bpf_spin_lock(&glock); - op(&ghead, &f->node); - op(&ghead, &f->node); + if (push_front) { + bpf_list_push_front(&ghead, &f->node2); + bpf_list_push_front(&ghead, &f->node2); + } else { + bpf_list_push_back(&ghead, &f->node2); + bpf_list_push_back(&ghead, &f->node2); + } bpf_spin_unlock(&glock); return 0; @@ -402,13 +408,13 @@ int list_double_add(void (*op)(void *head, void *node)) SEC("?tc") int double_push_front(void *ctx) { - return list_double_add((void *)bpf_list_push_front); + return list_double_add(true); } SEC("?tc") int double_push_back(void *ctx) { - return list_double_add((void *)bpf_list_push_back); + return list_double_add(false); } SEC("?tc") @@ -450,7 +456,7 @@ int incorrect_node_var_off(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, (void *)&f->node + ctx->protocol); + bpf_list_push_front(&ghead, (void *)&f->node2 + ctx->protocol); bpf_spin_unlock(&glock); return 0; @@ -465,7 +471,7 @@ int incorrect_node_off1(void *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, (void *)&f->node + 1); + bpf_list_push_front(&ghead, (void *)&f->node2 + 1); bpf_spin_unlock(&glock); return 0; @@ -480,7 +486,7 @@ int incorrect_node_off2(void *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, &f->node2); + bpf_list_push_front(&ghead, &f->node); bpf_spin_unlock(&glock); return 0; @@ -510,7 +516,7 @@ int incorrect_head_var_off1(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node); + bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node2); bpf_spin_unlock(&glock); return 0; @@ -525,7 +531,7 @@ int incorrect_head_var_off2(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node); + bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node2); bpf_spin_unlock(&glock); return 0; @@ -557,14 +563,13 @@ SEC("?tc") int incorrect_head_off2(void *ctx) { struct foo *f; - struct bar *b; f = bpf_obj_new(typeof(*f)); if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&ghead + 1, &f->node); + bpf_list_push_front((void *)&ghead + 1, &f->node2); bpf_spin_unlock(&glock); return 0; diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c new file mode 100644 index 000000000000..0ef286da092b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +struct node_data { + long key; + long data; + struct bpf_rb_node node; +}; + +struct map_value { + struct prog_test_ref_kfunc *not_kptr; + struct prog_test_ref_kfunc __kptr *val; + struct node_data __kptr *node; +}; + +/* This is necessary so that LLVM generates BTF for node_data struct + * If it's not included, a fwd reference for node_data will be generated but + * no struct. Example BTF of "node" field in map_value when not included: + * + * [10] PTR '(anon)' type_id=35 + * [34] FWD 'node_data' fwd_kind=struct + * [35] TYPE_TAG 'kptr_ref' type_id=34 + * + * (with no node_data struct defined) + * Had to do the same w/ bpf_kfunc_call_test_release below + */ +struct node_data *just_here_because_btf_bug; + +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 2); +} some_nodes SEC(".maps"); + +static int create_and_stash(int idx, int val) +{ + struct map_value *mapval; + struct node_data *res; + + mapval = bpf_map_lookup_elem(&some_nodes, &idx); + if (!mapval) + return 1; + + res = bpf_obj_new(typeof(*res)); + if (!res) + return 1; + res->key = val; + + res = bpf_kptr_xchg(&mapval->node, res); + if (res) + bpf_obj_drop(res); + return 0; +} + +SEC("tc") +long stash_rb_nodes(void *ctx) +{ + return create_and_stash(0, 41) ?: create_and_stash(1, 42); +} + +SEC("tc") +long unstash_rb_node(void *ctx) +{ + struct map_value *mapval; + struct node_data *res; + long retval; + int key = 1; + + mapval = bpf_map_lookup_elem(&some_nodes, &key); + if (!mapval) + return 1; + + res = bpf_kptr_xchg(&mapval->node, NULL); + if (res) { + retval = res->key; + bpf_obj_drop(res); + return retval; + } + return 1; +} + +SEC("tc") +long stash_test_ref_kfunc(void *ctx) +{ + struct prog_test_ref_kfunc *res; + struct map_value *mapval; + int key = 0; + + mapval = bpf_map_lookup_elem(&some_nodes, &key); + if (!mapval) + return 1; + + res = bpf_kptr_xchg(&mapval->val, NULL); + if (res) + bpf_kfunc_call_test_release(res); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c index 19423ed862e3..bc8ea56671a1 100644 --- a/tools/testing/selftests/bpf/progs/local_storage.c +++ b/tools/testing/selftests/bpf/progs/local_storage.c @@ -16,6 +16,7 @@ char _license[] SEC("license") = "GPL"; int monitored_pid = 0; int inode_storage_result = -1; int sk_storage_result = -1; +int task_storage_result = -1; struct local_storage { struct inode *exec_inode; @@ -50,26 +51,57 @@ struct { __type(value, struct local_storage); } task_storage_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct local_storage); +} task_storage_map2 SEC(".maps"); + SEC("lsm/inode_unlink") int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim) { __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct bpf_local_storage *local_storage; struct local_storage *storage; + struct task_struct *task; bool is_self_unlink; if (pid != monitored_pid) return 0; - storage = bpf_task_storage_get(&task_storage_map, - bpf_get_current_task_btf(), 0, 0); - if (storage) { - /* Don't let an executable delete itself */ - is_self_unlink = storage->exec_inode == victim->d_inode; - if (is_self_unlink) - return -EPERM; - } + task = bpf_get_current_task_btf(); + if (!task) + return 0; - return 0; + task_storage_result = -1; + + storage = bpf_task_storage_get(&task_storage_map, task, 0, 0); + if (!storage) + return 0; + + /* Don't let an executable delete itself */ + is_self_unlink = storage->exec_inode == victim->d_inode; + + storage = bpf_task_storage_get(&task_storage_map2, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage || storage->value) + return 0; + + if (bpf_task_storage_delete(&task_storage_map, task)) + return 0; + + /* Ensure that the task_storage_map is disconnected from the storage. + * The storage memory should not be freed back to the + * bpf_mem_alloc. + */ + local_storage = task->bpf_storage; + if (!local_storage || local_storage->smap) + return 0; + + task_storage_result = 0; + + return is_self_unlink ? -EPERM : 0; } SEC("lsm.s/inode_rename") @@ -77,7 +109,6 @@ int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - __u32 pid = bpf_get_current_pid_tgid() >> 32; struct local_storage *storage; int err; @@ -109,18 +140,17 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, { __u32 pid = bpf_get_current_pid_tgid() >> 32; struct local_storage *storage; - int err; if (pid != monitored_pid) return 0; - storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, - BPF_LOCAL_STORAGE_GET_F_CREATE); + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, 0); if (!storage) return 0; + sk_storage_result = -1; if (storage->value != DUMMY_STORAGE_VALUE) - sk_storage_result = -1; + return 0; /* This tests that we can associate multiple elements * with the local storage. @@ -130,14 +160,22 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, if (!storage) return 0; - err = bpf_sk_storage_delete(&sk_storage_map, sock->sk); - if (err) + if (bpf_sk_storage_delete(&sk_storage_map2, sock->sk)) return 0; - err = bpf_sk_storage_delete(&sk_storage_map2, sock->sk); - if (!err) - sk_storage_result = err; + storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + + if (bpf_sk_storage_delete(&sk_storage_map, sock->sk)) + return 0; + + /* Ensure that the sk_storage_map is disconnected from the storage. */ + if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap) + return 0; + sk_storage_result = 0; return 0; } diff --git a/tools/testing/selftests/bpf/progs/loop6.c b/tools/testing/selftests/bpf/progs/loop6.c index 38de0331e6b4..e4ff97fbcce1 100644 --- a/tools/testing/selftests/bpf/progs/loop6.c +++ b/tools/testing/selftests/bpf/progs/loop6.c @@ -5,6 +5,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; @@ -76,6 +77,7 @@ int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs, return 0; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) { + __sink(out_sgs); for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX); sgp = __sg_next(sgp)) { bpf_probe_read_kernel(&len, sizeof(len), &sgp->length); @@ -85,6 +87,7 @@ int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs, } for (i = 0; (i < VIRTIO_MAX_SGS) && (i < in_sgs); i++) { + __sink(in_sgs); for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX); sgp = __sg_next(sgp)) { bpf_probe_read_kernel(&len, sizeof(len), &sgp->length); diff --git a/tools/testing/selftests/bpf/progs/lru_bug.c b/tools/testing/selftests/bpf/progs/lru_bug.c index 687081a724b3..ad73029cb1e3 100644 --- a/tools/testing/selftests/bpf/progs/lru_bug.c +++ b/tools/testing/selftests/bpf/progs/lru_bug.c @@ -4,7 +4,7 @@ #include <bpf/bpf_helpers.h> struct map_value { - struct task_struct __kptr *ptr; + struct task_struct __kptr_untrusted *ptr; }; struct { diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c index dc93887ed34c..fadfdd98707c 100644 --- a/tools/testing/selftests/bpf/progs/lsm.c +++ b/tools/testing/selftests/bpf/progs/lsm.c @@ -4,12 +4,12 @@ * Copyright 2020 Google LLC. */ -#include "bpf_misc.h" #include "vmlinux.h" +#include <errno.h> #include <bpf/bpf_core_read.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include <errno.h> +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index 228ec45365a8..d7150041e5d1 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -4,8 +4,8 @@ #include <bpf/bpf_helpers.h> struct map_value { - struct prog_test_ref_kfunc __kptr *unref_ptr; - struct prog_test_ref_kfunc __kptr_ref *ref_ptr; + struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; + struct prog_test_ref_kfunc __kptr *ref_ptr; }; struct array_map { @@ -15,6 +15,13 @@ struct array_map { __uint(max_entries, 1); } array_map SEC(".maps"); +struct pcpu_array_map { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} pcpu_array_map SEC(".maps"); + struct hash_map { __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); @@ -22,6 +29,13 @@ struct hash_map { __uint(max_entries, 1); } hash_map SEC(".maps"); +struct pcpu_hash_map { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} pcpu_hash_map SEC(".maps"); + struct hash_malloc_map { __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); @@ -30,6 +44,14 @@ struct hash_malloc_map { __uint(map_flags, BPF_F_NO_PREALLOC); } hash_malloc_map SEC(".maps"); +struct pcpu_hash_malloc_map { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); + __uint(map_flags, BPF_F_NO_PREALLOC); +} pcpu_hash_malloc_map SEC(".maps"); + struct lru_hash_map { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, int); @@ -37,6 +59,41 @@ struct lru_hash_map { __uint(max_entries, 1); } lru_hash_map SEC(".maps"); +struct lru_pcpu_hash_map { + __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} lru_pcpu_hash_map SEC(".maps"); + +struct cgrp_ls_map { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} cgrp_ls_map SEC(".maps"); + +struct task_ls_map { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} task_ls_map SEC(".maps"); + +struct inode_ls_map { + __uint(type, BPF_MAP_TYPE_INODE_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} inode_ls_map SEC(".maps"); + +struct sk_ls_map { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} sk_ls_map SEC(".maps"); + #define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \ struct { \ __uint(type, map_type); \ @@ -58,9 +115,8 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_mallo DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps); extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; +void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val)) @@ -90,12 +146,23 @@ static void test_kptr_ref(struct map_value *v) WRITE_ONCE(v->unref_ptr, p); if (!p) return; + /* + * p is rcu_ptr_prog_test_ref_kfunc, + * because bpf prog is non-sleepable and runs in RCU CS. + * p can be passed to kfunc that requires KF_RCU. + */ + bpf_kfunc_call_test_ref(p); if (p->a + p->b > 100) return; /* store NULL */ p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) return; + /* + * p is trusted_ptr_prog_test_ref_kfunc. + * p can be passed to kfunc that requires KF_RCU. + */ + bpf_kfunc_call_test_ref(p); if (p->a + p->b > 100) { bpf_kfunc_call_test_release(p); return; @@ -118,25 +185,10 @@ static void test_kptr_ref(struct map_value *v) bpf_kfunc_call_test_release(p); } -static void test_kptr_get(struct map_value *v) -{ - struct prog_test_ref_kfunc *p; - - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return; - if (p->a + p->b > 100) { - bpf_kfunc_call_test_release(p); - return; - } - bpf_kfunc_call_test_release(p); -} - static void test_kptr(struct map_value *v) { test_kptr_unref(v); test_kptr_ref(v); - test_kptr_get(v); } SEC("tc") @@ -160,6 +212,58 @@ int test_map_kptr(struct __sk_buff *ctx) return 0; } +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path) +{ + struct map_value *v; + + v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + +SEC("lsm/inode_unlink") +int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim) +{ + struct task_struct *task; + struct map_value *v; + + task = bpf_get_current_task_btf(); + if (!task) + return 0; + v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + +SEC("lsm/inode_unlink") +int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim) +{ + struct map_value *v; + + v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + +SEC("tc") +int test_sk_map_kptr(struct __sk_buff *ctx) +{ + struct map_value *v; + struct bpf_sock *sk; + + sk = ctx->sk; + if (!sk) + return 0; + v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + SEC("tc") int test_map_in_map_kptr(struct __sk_buff *ctx) { @@ -189,106 +293,241 @@ int test_map_in_map_kptr(struct __sk_buff *ctx) return 0; } -SEC("tc") -int test_map_kptr_ref(struct __sk_buff *ctx) +int ref = 1; + +static __always_inline +int test_map_kptr_ref_pre(struct map_value *v) { struct prog_test_ref_kfunc *p, *p_st; unsigned long arg = 0; - struct map_value *v; - int key = 0, ret; + int ret; p = bpf_kfunc_call_test_acquire(&arg); if (!p) return 1; + ref++; p_st = p->next; - if (p_st->cnt.refs.counter != 2) { + if (p_st->cnt.refs.counter != ref) { ret = 2; goto end; } - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) { - ret = 3; - goto end; - } - p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { - ret = 4; - goto end; - } - if (p_st->cnt.refs.counter != 2) - return 5; - - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return 6; - if (p_st->cnt.refs.counter != 3) { - ret = 7; + ret = 3; goto end; } - bpf_kfunc_call_test_release(p); - if (p_st->cnt.refs.counter != 2) - return 8; + if (p_st->cnt.refs.counter != ref) + return 4; p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) - return 9; + return 5; bpf_kfunc_call_test_release(p); - if (p_st->cnt.refs.counter != 1) - return 10; + ref--; + if (p_st->cnt.refs.counter != ref) + return 6; p = bpf_kfunc_call_test_acquire(&arg); if (!p) - return 11; + return 7; + ref++; p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { - ret = 12; + ret = 8; goto end; } - if (p_st->cnt.refs.counter != 2) - return 13; + if (p_st->cnt.refs.counter != ref) + return 9; /* Leave in map */ return 0; end: + ref--; bpf_kfunc_call_test_release(p); return ret; } -SEC("tc") -int test_map_kptr_ref2(struct __sk_buff *ctx) +static __always_inline +int test_map_kptr_ref_post(struct map_value *v) { struct prog_test_ref_kfunc *p, *p_st; - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 1; p_st = v->ref_ptr; - if (!p_st || p_st->cnt.refs.counter != 2) - return 2; + if (!p_st || p_st->cnt.refs.counter != ref) + return 1; p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) - return 3; - if (p_st->cnt.refs.counter != 2) { + return 2; + if (p_st->cnt.refs.counter != ref) { bpf_kfunc_call_test_release(p); - return 4; + return 3; } p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { bpf_kfunc_call_test_release(p); - return 5; + return 4; } - if (p_st->cnt.refs.counter != 2) - return 6; + if (p_st->cnt.refs.counter != ref) + return 5; + + return 0; +} + +#define TEST(map) \ + v = bpf_map_lookup_elem(&map, &key); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_pre(v); \ + if (ret) \ + return ret; + +#define TEST_PCPU(map) \ + v = bpf_map_lookup_percpu_elem(&map, &key, 0); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_pre(v); \ + if (ret) \ + return ret; + +SEC("tc") +int test_map_kptr_ref1(struct __sk_buff *ctx) +{ + struct map_value *v, val = {}; + int key = 0, ret; + + bpf_map_update_elem(&hash_map, &key, &val, 0); + bpf_map_update_elem(&hash_malloc_map, &key, &val, 0); + bpf_map_update_elem(&lru_hash_map, &key, &val, 0); + + bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0); + bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0); + bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0); + + TEST(array_map); + TEST(hash_map); + TEST(hash_malloc_map); + TEST(lru_hash_map); + + TEST_PCPU(pcpu_array_map); + TEST_PCPU(pcpu_hash_map); + TEST_PCPU(pcpu_hash_malloc_map); + TEST_PCPU(lru_pcpu_hash_map); + + return 0; +} + +#undef TEST +#undef TEST_PCPU + +#define TEST(map) \ + v = bpf_map_lookup_elem(&map, &key); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_post(v); \ + if (ret) \ + return ret; + +#define TEST_PCPU(map) \ + v = bpf_map_lookup_percpu_elem(&map, &key, 0); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_post(v); \ + if (ret) \ + return ret; + +SEC("tc") +int test_map_kptr_ref2(struct __sk_buff *ctx) +{ + struct map_value *v; + int key = 0, ret; + + TEST(array_map); + TEST(hash_map); + TEST(hash_malloc_map); + TEST(lru_hash_map); + + TEST_PCPU(pcpu_array_map); + TEST_PCPU(pcpu_hash_map); + TEST_PCPU(pcpu_hash_malloc_map); + TEST_PCPU(lru_pcpu_hash_map); return 0; } +#undef TEST +#undef TEST_PCPU + +SEC("tc") +int test_map_kptr_ref3(struct __sk_buff *ctx) +{ + struct prog_test_ref_kfunc *p; + unsigned long sp = 0; + + p = bpf_kfunc_call_test_acquire(&sp); + if (!p) + return 1; + ref++; + if (p->cnt.refs.counter != ref) { + bpf_kfunc_call_test_release(p); + return 2; + } + bpf_kfunc_call_test_release(p); + ref--; + return 0; +} + +SEC("syscall") +int test_ls_map_kptr_ref1(void *ctx) +{ + struct task_struct *current; + struct map_value *v; + + current = bpf_get_current_task_btf(); + if (!current) + return 100; + v = bpf_task_storage_get(&task_ls_map, current, NULL, 0); + if (v) + return 150; + v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 200; + return test_map_kptr_ref_pre(v); +} + +SEC("syscall") +int test_ls_map_kptr_ref2(void *ctx) +{ + struct task_struct *current; + struct map_value *v; + + current = bpf_get_current_task_btf(); + if (!current) + return 100; + v = bpf_task_storage_get(&task_ls_map, current, NULL, 0); + if (!v) + return 200; + return test_map_kptr_ref_post(v); +} + +SEC("syscall") +int test_ls_map_kptr_ref_del(void *ctx) +{ + struct task_struct *current; + struct map_value *v; + + current = bpf_get_current_task_btf(); + if (!current) + return 100; + v = bpf_task_storage_get(&task_ls_map, current, NULL, 0); + if (!v) + return 200; + if (!v->ref_ptr) + return 300; + return bpf_task_storage_delete(&task_ls_map, current); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 760e41e1a632..da8c724f839b 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -7,9 +7,9 @@ struct map_value { char buf[8]; - struct prog_test_ref_kfunc __kptr *unref_ptr; - struct prog_test_ref_kfunc __kptr_ref *ref_ptr; - struct prog_test_member __kptr_ref *ref_memb_ptr; + struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; + struct prog_test_ref_kfunc __kptr *ref_ptr; + struct prog_test_member __kptr *ref_memb_ptr; }; struct array_map { @@ -20,8 +20,7 @@ struct array_map { } array_map SEC(".maps"); extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; SEC("?tc") __failure __msg("kptr access size must be BPF_DW") @@ -220,68 +219,7 @@ int reject_kptr_xchg_on_unref(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=0") -int reject_kptr_get_no_kptr(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)v, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=8") -int reject_kptr_get_on_unref(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("kernel function bpf_kfunc_call_test_kptr_get args#0") -int reject_kptr_get_bad_type_match(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_") +__failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_") int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx) { struct map_value *v; @@ -316,7 +254,7 @@ int reject_untrusted_store_to_ref(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("R2 type=untrusted_ptr_ expected=ptr_") +__failure __msg("R2 must be referenced") int reject_untrusted_xchg(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; @@ -428,9 +366,10 @@ int kptr_xchg_ref_state(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=") -int kptr_get_ref_state(struct __sk_buff *ctx) +__failure __msg("Possibly NULL pointer passed to helper arg2") +int kptr_xchg_possibly_null(struct __sk_buff *ctx) { + struct prog_test_ref_kfunc *p; struct map_value *v; int key = 0; @@ -438,7 +377,13 @@ int kptr_get_ref_state(struct __sk_buff *ctx) if (!v) return 0; - bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); + + /* PTR_TO_BTF_ID | PTR_MAYBE_NULL passed to bpf_kptr_xchg() */ + p = bpf_kptr_xchg(&v->ref_ptr, p); + if (p) + bpf_kfunc_call_test_release(p); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c index 14aff7676436..0d1aa6bbace4 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_failure.c +++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c @@ -17,7 +17,7 @@ char _license[] SEC("license") = "GPL"; */ SEC("tp_btf/task_newtask") -__failure __msg("R2 must be referenced or trusted") +__failure __msg("R2 must be") int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags) { bpf_cpumask_test_cpu(0, task->user_cpus_ptr); diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c index f718b2c212dc..f9ef8aee56f1 100644 --- a/tools/testing/selftests/bpf/progs/netcnt_prog.c +++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c @@ -26,7 +26,6 @@ SEC("cgroup/skb") int bpf_nextcnt(struct __sk_buff *skb) { union percpu_net_cnt *percpu_cnt; - char fmt[] = "%d %llu %llu\n"; union net_cnt *cnt; __u64 ts, dt; int ret; diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c index 1d8918dfbd3f..c0062645fc68 100644 --- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c +++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c @@ -53,7 +53,6 @@ static int __strncmp(const void *m1, const void *m2, size_t len) do { \ static const char _expectedval[EXPECTED_STRSIZE] = \ _expected; \ - static const char _ptrtype[64] = #_type; \ __u64 _hflags = _flags | BTF_F_COMPACT; \ static _type _ptrdata = __VA_ARGS__; \ static struct btf_ptr _ptr = { }; \ diff --git a/tools/testing/selftests/bpf/progs/perfbuf_bench.c b/tools/testing/selftests/bpf/progs/perfbuf_bench.c index 45204fe0c570..29c1639fc78a 100644 --- a/tools/testing/selftests/bpf/progs/perfbuf_bench.c +++ b/tools/testing/selftests/bpf/progs/perfbuf_bench.c @@ -22,7 +22,6 @@ long dropped __attribute__((aligned(128))) = 0; SEC("fentry/" SYS_PREFIX "sys_getpgid") int bench_perfbuf(void *ctx) { - __u64 *sample; int i; for (i = 0; i < batch_cnt; i++) { diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index 875513866032..f799d87e8700 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -6,6 +6,7 @@ #include <bpf/bpf_tracing.h> #include "profiler.h" +#include "err.h" #ifndef NULL #define NULL 0 @@ -16,7 +17,6 @@ #define O_DIRECTORY 00200000 #define __O_TMPFILE 020000000 #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY) -#define MAX_ERRNO 4095 #define S_IFMT 00170000 #define S_IFSOCK 0140000 #define S_IFLNK 0120000 @@ -34,7 +34,6 @@ #define S_ISBLK(m) (((m)&S_IFMT) == S_IFBLK) #define S_ISFIFO(m) (((m)&S_IFMT) == S_IFIFO) #define S_ISSOCK(m) (((m)&S_IFMT) == S_IFSOCK) -#define IS_ERR_VALUE(x) (unsigned long)(void*)(x) >= (unsigned long)-MAX_ERRNO #define KILL_DATA_ARRAY_SIZE 8 diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 6c7b1fb268d6..026d573ce179 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -7,6 +7,7 @@ #include <stdbool.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" #define FUNCTION_NAME_LEN 64 #define FILE_NAME_LEN 128 @@ -294,17 +295,22 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) if (ctx.done) return 0; #else -#ifdef NO_UNROLL +#if defined(USE_ITER) +/* no for loop, no unrolling */ +#elif defined(NO_UNROLL) #pragma clang loop unroll(disable) -#else -#ifdef UNROLL_COUNT +#elif defined(UNROLL_COUNT) #pragma clang loop unroll_count(UNROLL_COUNT) #else #pragma clang loop unroll(full) -#endif #endif /* NO_UNROLL */ /* Unwind python stack */ +#ifdef USE_ITER + int i; + bpf_for(i, 0, STACK_MAX_LEN) { +#else /* !USE_ITER */ for (int i = 0; i < STACK_MAX_LEN; ++i) { +#endif if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); @@ -339,7 +345,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) SEC("raw_tracepoint/kfree_skb") int on_event(struct bpf_raw_tracepoint_args* ctx) { - int i, ret = 0; + int ret = 0; ret |= __on_event(ctx); ret |= __on_event(ctx); ret |= __on_event(ctx); diff --git a/tools/testing/selftests/bpf/progs/pyperf600_iter.c b/tools/testing/selftests/bpf/progs/pyperf600_iter.c new file mode 100644 index 000000000000..d62e1b200c30 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf600_iter.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2023 Meta Platforms, Inc. and affiliates. +#define STACK_MAX_LEN 600 +#define SUBPROGS +#define NO_UNROLL +#define USE_ITER +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c index 6beff7502f4d..520b58c4f8db 100644 --- a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c +++ b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c @@ -2,7 +2,4 @@ // Copyright (c) 2019 Facebook #define STACK_MAX_LEN 600 #define NO_UNROLL -/* clang will not unroll at all. - * Total program size is around 2k insns - */ #include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index e5db1a4287e5..b09f4fffe57c 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -75,7 +75,7 @@ SEC("tc") long rbtree_add_and_remove(void *ctx) { struct bpf_rb_node *res = NULL; - struct node_data *n, *m; + struct node_data *n, *m = NULL; n = bpf_obj_new(typeof(*n)); if (!n) @@ -93,9 +93,11 @@ long rbtree_add_and_remove(void *ctx) res = bpf_rbtree_remove(&groot, &n->node); bpf_spin_unlock(&glock); + if (!res) + return 1; + n = container_of(res, struct node_data, node); removed_key = n->key; - bpf_obj_drop(n); return 0; @@ -148,9 +150,11 @@ long rbtree_first_and_remove(void *ctx) res = bpf_rbtree_remove(&groot, &o->node); bpf_spin_unlock(&glock); + if (!res) + return 5; + o = container_of(res, struct node_data, node); removed_key = o->key; - bpf_obj_drop(o); bpf_spin_lock(&glock); @@ -173,4 +177,70 @@ err_out: return 1; } +SEC("tc") +long rbtree_api_release_aliasing(void *ctx) +{ + struct node_data *n, *m, *o; + struct bpf_rb_node *res, *res2; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + n->key = 41; + n->data = 42; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + bpf_spin_lock(&glock); + + /* m and o point to the same node, + * but verifier doesn't know this + */ + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + o = container_of(res, struct node_data, node); + + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + m = container_of(res, struct node_data, node); + + res = bpf_rbtree_remove(&groot, &m->node); + /* Retval of previous remove returns an owning reference to m, + * which is the same node non-owning ref o is pointing at. + * We can safely try to remove o as the second rbtree_remove will + * return NULL since the node isn't in a tree. + * + * Previously we relied on the verifier type system + rbtree_remove + * invalidating non-owning refs to ensure that rbtree_remove couldn't + * fail, but now rbtree_remove does runtime checking so we no longer + * invalidate non-owning refs after remove. + */ + res2 = bpf_rbtree_remove(&groot, &o->node); + + bpf_spin_unlock(&glock); + + if (res) { + o = container_of(res, struct node_data, node); + first_data[0] = o->data; + bpf_obj_drop(o); + } + if (res2) { + /* The second remove fails, so res2 is null and this doesn't + * execute + */ + m = container_of(res2, struct node_data, node); + first_data[1] = m->data; + bpf_obj_drop(m); + } + return 0; + +err_out: + bpf_spin_unlock(&glock); + return 1; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c index 340f97da1084..7651843f5a80 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c +++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c @@ -16,17 +16,6 @@ struct node_data { struct bpf_list_node node; }; -static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) -{ - struct node_data *node_a; - struct node_data *node_b; - - node_a = container_of(a, struct node_data, node); - node_b = container_of(b, struct node_data, node); - - return node_a->key < node_b->key; -} - #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; private(A) struct bpf_rb_root groot __contains(node_data, node); diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index bf3cba115897..3fecf1c6dfe5 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=2 alloc_insn=11") +__failure __msg("Unreleased reference id=3 alloc_insn=10") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; @@ -118,10 +118,13 @@ long rbtree_api_remove_no_drop(void *ctx) res = bpf_rbtree_remove(&groot, res); - n = container_of(res, struct node_data, node); + if (res) { + n = container_of(res, struct node_data, node); + __sink(n); + } bpf_spin_unlock(&glock); - /* bpf_obj_drop(n) is missing here */ + /* if (res) { bpf_obj_drop(n); } is missing here */ return 0; unlock_err: @@ -149,35 +152,36 @@ long rbtree_api_add_to_multiple_trees(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") -long rbtree_api_add_release_unlock_escape(void *ctx) +__failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed") +long rbtree_api_use_unchecked_remove_retval(void *ctx) { - struct node_data *n; - - n = bpf_obj_new(typeof(*n)); - if (!n) - return 1; + struct bpf_rb_node *res; bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); + + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + res = bpf_rbtree_remove(&groot, res); + bpf_spin_unlock(&glock); bpf_spin_lock(&glock); - /* After add() in previous critical section, n should be - * release_on_unlock and released after previous spin_unlock, - * so should not be possible to use it here - */ - bpf_rbtree_remove(&groot, &n->node); + /* Must check res for NULL before using in rbtree_add below */ + bpf_rbtree_add(&groot, res, less); bpf_spin_unlock(&glock); return 0; + +err_out: + bpf_spin_unlock(&glock); + return 1; } SEC("?tc") __failure __msg("rbtree_remove node input must be non-owning ref") -long rbtree_api_release_aliasing(void *ctx) +long rbtree_api_add_release_unlock_escape(void *ctx) { - struct node_data *n, *m, *o; - struct bpf_rb_node *res; + struct node_data *n; n = bpf_obj_new(typeof(*n)); if (!n) @@ -188,37 +192,11 @@ long rbtree_api_release_aliasing(void *ctx) bpf_spin_unlock(&glock); bpf_spin_lock(&glock); - - /* m and o point to the same node, - * but verifier doesn't know this - */ - res = bpf_rbtree_first(&groot); - if (!res) - return 1; - o = container_of(res, struct node_data, node); - - res = bpf_rbtree_first(&groot); - if (!res) - return 1; - m = container_of(res, struct node_data, node); - - bpf_rbtree_remove(&groot, &m->node); - /* This second remove shouldn't be possible. Retval of previous - * remove returns owning reference to m, which is the same - * node o's non-owning ref is pointing at - * - * In order to preserve property - * * owning ref must not be in rbtree - * * non-owning ref must be in rbtree - * - * o's ref must be invalidated after previous remove. Otherwise - * we'd have non-owning ref to node that isn't in rbtree, and - * verifier wouldn't be able to use type system to prevent remove - * of ref that already isn't in any tree. Would have to do runtime - * checks in that case. + /* After add() in previous critical section, n should be + * release_on_unlock and released after previous spin_unlock, + * so should not be possible to use it here */ - bpf_rbtree_remove(&groot, &o->node); - + bpf_rbtree_remove(&groot, &n->node); bpf_spin_unlock(&glock); return 0; } @@ -232,8 +210,11 @@ long rbtree_api_first_release_unlock_escape(void *ctx) bpf_spin_lock(&glock); res = bpf_rbtree_first(&groot); - if (res) - n = container_of(res, struct node_data, node); + if (!res) { + bpf_spin_unlock(&glock); + return 1; + } + n = container_of(res, struct node_data, node); bpf_spin_unlock(&glock); bpf_spin_lock(&glock); diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 5cecbdbbb16e..14fb01437fb8 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -23,7 +23,7 @@ struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; void bpf_key_put(struct bpf_key *key) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; -struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) __ksym; +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; void bpf_task_release(struct task_struct *p) __ksym; SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") @@ -81,7 +81,7 @@ int no_lock(void *ctx) { struct task_struct *task, *real_parent; - /* no bpf_rcu_read_lock(), old code still works */ + /* old style ptr_to_btf_id is not allowed in sleepable */ task = bpf_get_current_task_btf(); real_parent = task->real_parent; (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); @@ -159,13 +159,8 @@ int task_acquire(void *ctx) goto out; /* acquire a reference which can be used outside rcu read lock region */ - gparent = bpf_task_acquire_not_zero(gparent); + gparent = bpf_task_acquire(gparent); if (!gparent) - /* Until we resolve the issues with using task->rcu_users, we - * expect bpf_task_acquire_not_zero() to return a NULL task. - * See the comment at the definition of - * bpf_task_acquire_not_zero() for more details. - */ goto out; (void)bpf_task_storage_get(&map_a, gparent, 0, 0); @@ -179,8 +174,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") int miss_lock(void *ctx) { struct task_struct *task; - struct css_set *cgroups; - struct cgroup *dfl_cgrp; /* missing bpf_rcu_read_lock() */ task = bpf_get_current_task_btf(); @@ -195,8 +188,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") int miss_unlock(void *ctx) { struct task_struct *task; - struct css_set *cgroups; - struct cgroup *dfl_cgrp; /* missing bpf_rcu_read_unlock() */ task = bpf_get_current_task_btf(); @@ -286,13 +277,13 @@ out: } SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") -int task_untrusted_non_rcuptr(void *ctx) +int task_trusted_non_rcuptr(void *ctx) { struct task_struct *task, *group_leader; task = bpf_get_current_task_btf(); bpf_rcu_read_lock(); - /* the pointer group_leader marked as untrusted */ + /* the pointer group_leader is explicitly marked as trusted */ group_leader = task->real_parent->group_leader; (void)bpf_task_storage_get(&map_a, group_leader, 0, 0); bpf_rcu_read_unlock(); diff --git a/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c new file mode 100644 index 000000000000..df4873558634 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +struct task_ls_map { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); +} task_ls_map SEC(".maps"); + +long gp_seq; + +SEC("syscall") +int do_call_rcu_tasks_trace(void *ctx) +{ + struct task_struct *current; + int *v; + + current = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 1; + /* Invoke call_rcu_tasks_trace */ + return bpf_task_storage_delete(&task_ls_map, current); +} + +SEC("kprobe/rcu_tasks_trace_postgp") +int rcu_tasks_trace_postgp(void *ctx) +{ + __sync_add_and_fetch(&gp_seq, 1); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c index a47bb0120719..76556e0b42b2 100644 --- a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c +++ b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c @@ -23,7 +23,6 @@ SEC("raw_tp/sys_enter") int BPF_PROG(read_bpf_task_storage_busy) { int *value; - int key; if (!CONFIG_PREEMPT) return 0; diff --git a/tools/testing/selftests/bpf/progs/recvmsg4_prog.c b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c index 3d1ae8b3402f..59748c95471a 100644 --- a/tools/testing/selftests/bpf/progs/recvmsg4_prog.c +++ b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c @@ -17,8 +17,6 @@ SEC("cgroup/recvmsg4") int recvmsg4_prog(struct bpf_sock_addr *ctx) { struct bpf_sock *sk; - __u32 user_ip4; - __u16 user_port; sk = ctx->sk; if (!sk) diff --git a/tools/testing/selftests/bpf/progs/recvmsg6_prog.c b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c index 27dfb21b21b4..d9a4016596d5 100644 --- a/tools/testing/selftests/bpf/progs/recvmsg6_prog.c +++ b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c @@ -20,8 +20,6 @@ SEC("cgroup/recvmsg6") int recvmsg6_prog(struct bpf_sock_addr *ctx) { struct bpf_sock *sk; - __u32 user_ip4; - __u16 user_port; sk = ctx->sk; if (!sk) diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c new file mode 100644 index 000000000000..1d348a225140 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + long key; + long list_data; + struct bpf_rb_node r; + struct bpf_list_node l; + struct bpf_refcount ref; +}; + +struct map_value { + struct node_data __kptr *node; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} stashed_nodes SEC(".maps"); + +struct node_acquire { + long key; + long data; + struct bpf_rb_node node; + struct bpf_refcount refcount; +}; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock lock; +private(A) struct bpf_rb_root root __contains(node_data, r); +private(A) struct bpf_list_head head __contains(node_data, l); + +private(B) struct bpf_spin_lock alock; +private(B) struct bpf_rb_root aroot __contains(node_acquire, node); + +static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b) +{ + struct node_data *a; + struct node_data *b; + + a = container_of(node_a, struct node_data, r); + b = container_of(node_b, struct node_data, r); + + return a->key < b->key; +} + +static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_acquire *node_a; + struct node_acquire *node_b; + + node_a = container_of(a, struct node_acquire, node); + node_b = container_of(b, struct node_acquire, node); + + return node_a->key < node_b->key; +} + +static long __insert_in_tree_and_list(struct bpf_list_head *head, + struct bpf_rb_root *root, + struct bpf_spin_lock *lock) +{ + struct node_data *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return -1; + + m = bpf_refcount_acquire(n); + m->key = 123; + m->list_data = 456; + + bpf_spin_lock(lock); + if (bpf_rbtree_add(root, &n->r, less)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + bpf_obj_drop(m); + return -2; + } + bpf_spin_unlock(lock); + + bpf_spin_lock(lock); + if (bpf_list_push_front(head, &m->l)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + return -3; + } + bpf_spin_unlock(lock); + return 0; +} + +static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root, + struct bpf_spin_lock *lock) +{ + struct map_value *mapval; + struct node_data *n, *m; + + mapval = bpf_map_lookup_elem(&stashed_nodes, &idx); + if (!mapval) + return -1; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return -2; + + n->key = val; + m = bpf_refcount_acquire(n); + + n = bpf_kptr_xchg(&mapval->node, n); + if (n) { + bpf_obj_drop(n); + bpf_obj_drop(m); + return -3; + } + + bpf_spin_lock(lock); + if (bpf_rbtree_add(root, &m->r, less)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + return -4; + } + bpf_spin_unlock(lock); + return 0; +} + +static long __read_from_tree(struct bpf_rb_root *root, + struct bpf_spin_lock *lock, + bool remove_from_tree) +{ + struct bpf_rb_node *rb; + struct node_data *n; + long res = -99; + + bpf_spin_lock(lock); + + rb = bpf_rbtree_first(root); + if (!rb) { + bpf_spin_unlock(lock); + return -1; + } + + n = container_of(rb, struct node_data, r); + res = n->key; + + if (!remove_from_tree) { + bpf_spin_unlock(lock); + return res; + } + + rb = bpf_rbtree_remove(root, rb); + bpf_spin_unlock(lock); + if (!rb) + return -2; + n = container_of(rb, struct node_data, r); + bpf_obj_drop(n); + return res; +} + +static long __read_from_list(struct bpf_list_head *head, + struct bpf_spin_lock *lock, + bool remove_from_list) +{ + struct bpf_list_node *l; + struct node_data *n; + long res = -99; + + bpf_spin_lock(lock); + + l = bpf_list_pop_front(head); + if (!l) { + bpf_spin_unlock(lock); + return -1; + } + + n = container_of(l, struct node_data, l); + res = n->list_data; + + if (!remove_from_list) { + if (bpf_list_push_back(head, &n->l)) { + bpf_spin_unlock(lock); + return -2; + } + } + + bpf_spin_unlock(lock); + + if (remove_from_list) + bpf_obj_drop(n); + return res; +} + +static long __read_from_unstash(int idx) +{ + struct node_data *n = NULL; + struct map_value *mapval; + long val = -99; + + mapval = bpf_map_lookup_elem(&stashed_nodes, &idx); + if (!mapval) + return -1; + + n = bpf_kptr_xchg(&mapval->node, n); + if (!n) + return -2; + + val = n->key; + bpf_obj_drop(n); + return val; +} + +#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(579) \ +long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \ +{ \ + long err, tree_data, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + err = __read_from_list(&head, &lock, rem_list); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + return tree_data + list_data; \ +} + +/* After successful insert of struct node_data into both collections: + * - it should have refcount = 2 + * - removing / not removing the node_data from a collection after + * reading should have no effect on ability to read / remove from + * the other collection + */ +INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list"); +INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither"); +INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree"); +INSERT_READ_BOTH(false, true, "insert_read_both: remove from list"); + +#undef INSERT_READ_BOTH +#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(579) \ +long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \ +{ \ + long err, tree_data, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_list(&head, &lock, rem_list); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + return tree_data + list_data; \ +} + +/* Similar to insert_read_both, but list data is read and possibly removed + * first + * + * Results should be no different than reading and possibly removing rbtree + * node first + */ +INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list"); +INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither"); +INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree"); +INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list"); + +#define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(-1) \ +long insert_double_##read_fn##_and_del_##read_root(void *ctx) \ +{ \ + long err, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = read_fn(&read_root, &lock, true); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + err = read_fn(&read_root, &lock, true); \ + if (err < 0) \ + return err; \ + \ + return err + list_data; \ +} + +/* Insert into both tree and list, then try reading-and-removing from either twice + * + * The second read-and-remove should fail on read step since the node has + * already been removed + */ +INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree"); +INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list"); + +#define INSERT_STASH_READ(rem_tree, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(84) \ +long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ +{ \ + long err, tree_data, map_data; \ + \ + err = __stash_map_insert_tree(0, 42, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + err = __read_from_unstash(0); \ + if (err < 0) \ + return err; \ + else \ + map_data = err; \ + \ + return tree_data + map_data; \ +} + +/* Stash a refcounted node in map_val, insert same node into tree, then try + * reading data from tree then unstashed map_val, possibly removing from tree + * + * Removing from tree should have no effect on map_val kptr validity + */ +INSERT_STASH_READ(true, "insert_stash_read: remove from tree"); +INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree"); + +SEC("tc") +__success +long rbtree_refcounted_node_ref_escapes(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&alock); + bpf_rbtree_add(&aroot, &n->node, less_a); + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&alock); + + m->key = 2; + bpf_obj_drop(m); + return 0; +} + +SEC("tc") +__success +long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + m = bpf_refcount_acquire(n); + m->key = 2; + + bpf_spin_lock(&alock); + bpf_rbtree_add(&aroot, &n->node, less_a); + bpf_spin_unlock(&alock); + + bpf_obj_drop(m); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c new file mode 100644 index 000000000000..efcb308f80ad --- /dev/null +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" +#include "bpf_misc.h" + +struct node_acquire { + long key; + long data; + struct bpf_rb_node node; + struct bpf_refcount refcount; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_rb_root groot __contains(node_acquire, node); + +static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_acquire *node_a; + struct node_acquire *node_b; + + node_a = container_of(a, struct node_acquire, node); + node_b = container_of(b, struct node_acquire, node); + + return node_a->key < node_b->key; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=3 alloc_insn=21") +long rbtree_refcounted_node_ref_escapes(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + /* m becomes an owning ref but is never drop'd or added to a tree */ + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&glock); + + m->key = 2; + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=3 alloc_insn=9") +long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + /* m becomes an owning ref but is never drop'd or added to a tree */ + m = bpf_refcount_acquire(n); + m->key = 2; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c index ea75a44cb7fc..351e79aef2fa 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c @@ -21,8 +21,6 @@ SEC("cgroup/sendmsg4") int sendmsg_v4_prog(struct bpf_sock_addr *ctx) { - int prio; - if (ctx->type != SOCK_DGRAM) return 0; diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c index e2468a6d01a5..0660f29dca95 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c @@ -1,6 +1,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); @@ -40,6 +41,9 @@ int bpf_prog2(struct __sk_buff *skb) __u8 *d = data; __u8 sk, map; + __sink(lport); + __sink(rport); + if (data + 8 > data_end) return SK_DROP; diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index c8d810010a94..fe1df4cd206e 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -32,6 +32,12 @@ int _getsockopt(struct bpf_sockopt *ctx) __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ @@ -131,6 +137,12 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index e562be6356f3..e02cfd380746 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -391,7 +391,6 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, struct strobe_map_raw map; void *location; uint64_t len; - int i; descr->tag_len = 0; /* presume no tag is set */ descr->cnt = -1; /* presume no value is set */ diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c index 7fab39a3bb12..99c8d1d8a187 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c @@ -2,6 +2,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_legacy.h" +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); @@ -20,6 +21,8 @@ int subprog_tail2(struct __sk_buff *skb) else bpf_tail_call_static(skb, &jmp_table, 1); + __sink(arr[sizeof(arr) - 1]); + return skb->len; } @@ -30,6 +33,8 @@ int subprog_tail(struct __sk_buff *skb) bpf_tail_call_static(skb, &jmp_table, 0); + __sink(arr[sizeof(arr) - 1]); + return skb->len * 2; } @@ -38,6 +43,8 @@ int classifier_0(struct __sk_buff *skb) { volatile char arr[128] = {}; + __sink(arr[sizeof(arr) - 1]); + return subprog_tail2(skb); } @@ -46,6 +53,8 @@ int classifier_1(struct __sk_buff *skb) { volatile char arr[128] = {}; + __sink(arr[sizeof(arr) - 1]); + return skb->len * 3; } @@ -54,6 +63,8 @@ int entry(struct __sk_buff *skb) { volatile char arr[128] = {}; + __sink(arr[sizeof(arr) - 1]); + return subprog_tail(skb); } diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c index 41ce83da78e8..4a9f63bea66c 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" #define __unused __attribute__((unused)) @@ -36,6 +37,8 @@ int entry(struct __sk_buff *skb) /* Have data on stack which size is not a multiple of 8 */ volatile char arr[1] = {}; + __sink(arr[0]); + return subprog_tail(skb); } diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h index c0ffd171743e..41f2d44f49cb 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h @@ -10,7 +10,7 @@ #include <bpf/bpf_tracing.h> struct __tasks_kfunc_map_value { - struct task_struct __kptr_ref * task; + struct task_struct __kptr * task; }; struct hash_map { @@ -21,9 +21,10 @@ struct hash_map { } __tasks_kfunc_map SEC(".maps"); struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; -struct task_struct *bpf_task_kptr_get(struct task_struct **pp) __ksym; void bpf_task_release(struct task_struct *p) __ksym; struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p) { @@ -60,6 +61,9 @@ static inline int tasks_kfunc_map_insert(struct task_struct *p) } acquired = bpf_task_acquire(p); + if (!acquired) + return -ENOENT; + old = bpf_kptr_xchg(&v->task, acquired); if (old) { bpf_task_release(old); diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index f19d54eda4f1..dcdea3127086 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -40,6 +40,9 @@ int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_f /* Can't invoke bpf_task_acquire() on an untrusted pointer. */ acquired = bpf_task_acquire(v->task); + if (!acquired) + return 0; + bpf_task_release(acquired); return 0; @@ -53,38 +56,49 @@ int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) /* Can't invoke bpf_task_acquire() on a random frame pointer. */ acquired = bpf_task_acquire((struct task_struct *)&stack_task); + if (!acquired) + return 0; + bpf_task_release(acquired); return 0; } SEC("kretprobe/free_task") -__failure __msg("reg type unsupported for arg#0 function") +__failure __msg("calling kernel function bpf_task_acquire is not allowed") int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; + /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */ acquired = bpf_task_acquire(task); - /* Can't release a bpf_task_acquire()'d task without a NULL check. */ + if (!acquired) + return 0; bpf_task_release(acquired); return 0; } -SEC("tp_btf/task_newtask") -__failure __msg("R1 must be referenced or trusted") -int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) +SEC("kretprobe/free_task") +__failure __msg("calling kernel function bpf_task_acquire is not allowed") +int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; - /* Can't invoke bpf_task_acquire() on a trusted pointer obtained from walking a struct. */ - acquired = bpf_task_acquire(task->group_leader); - bpf_task_release(acquired); + bpf_rcu_read_lock(); + if (!task) { + bpf_rcu_read_unlock(); + return 0; + } + /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */ + acquired = bpf_task_acquire(task); + if (acquired) + bpf_task_release(acquired); + bpf_rcu_read_unlock(); return 0; } - SEC("tp_btf/task_newtask") __failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) @@ -109,57 +123,7 @@ int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_ acquired = bpf_task_acquire(task); /* Acquired task is never released. */ - - return 0; -} - -SEC("tp_btf/task_newtask") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags) -{ - struct task_struct *kptr; - - /* Cannot use bpf_task_kptr_get() on a non-kptr, even on a valid task. */ - kptr = bpf_task_kptr_get(&task); - if (!kptr) - return 0; - - bpf_task_release(kptr); - - return 0; -} - -SEC("tp_btf/task_newtask") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags) -{ - struct task_struct *kptr, *acquired; - - acquired = bpf_task_acquire(task); - - /* Cannot use bpf_task_kptr_get() on a non-kptr, even if it was acquired. */ - kptr = bpf_task_kptr_get(&acquired); - bpf_task_release(acquired); - if (!kptr) - return 0; - - bpf_task_release(kptr); - - return 0; -} - -SEC("tp_btf/task_newtask") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags) -{ - struct task_struct *kptr; - - /* Cannot use bpf_task_kptr_get() on a NULL pointer. */ - kptr = bpf_task_kptr_get(NULL); - if (!kptr) - return 0; - - bpf_task_release(kptr); + __sink(acquired); return 0; } @@ -185,27 +149,20 @@ int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_fla } SEC("tp_btf/task_newtask") -__failure __msg("Unreleased reference") -int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags) +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags) { - struct task_struct *kptr; - struct __tasks_kfunc_map_value *v; - - v = insert_lookup_task(task); - if (!v) - return 0; - - kptr = bpf_task_kptr_get(&v->task); - if (!kptr) - return 0; + struct task_struct *acquired; - /* Kptr acquired above is never released. */ + acquired = bpf_task_acquire(task); + /* Can't invoke bpf_task_release() on an acquired task without a NULL check. */ + bpf_task_release(acquired); return 0; } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value *v; @@ -233,7 +190,7 @@ int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value local, *v; @@ -255,12 +212,13 @@ int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) return -ENOENT; acquired = bpf_task_acquire(task); + if (!acquired) + return -EEXIST; old = bpf_kptr_xchg(&v->task, acquired); /* old cannot be passed to bpf_task_release() without a NULL check. */ bpf_task_release(old); - bpf_task_release(old); return 0; } @@ -276,7 +234,7 @@ int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_ } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -297,6 +255,72 @@ int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) /* the argument of lsm task_free hook is untrusted. */ acquired = bpf_task_acquire(task); + if (!acquired) + return 0; + bpf_task_release(acquired); return 0; } + +SEC("tp_btf/task_newtask") +__failure __msg("access beyond the end of member comm") +int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags) +{ + bpf_strncmp(task->comm, 17, "foo"); + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("access beyond the end of member comm") +int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags) +{ + bpf_strncmp(task->comm + 1, 16, "foo"); + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("write into memory") +int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags) +{ + bpf_probe_read_kernel(task->comm, 16, task->comm); + return 0; +} + +SEC("fentry/__set_task_comm") +__failure __msg("R1 type=ptr_ expected") +int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec) +{ + /* + * task->comm is a legacy ptr_to_btf_id. The verifier cannot guarantee + * its safety. Hence it cannot be accessed with normal load insns. + */ + bpf_strncmp(task->comm, 16, "foo"); + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *local; + struct __tasks_kfunc_map_value *v; + + if (tasks_kfunc_map_insert(task)) + return 0; + + v = tasks_kfunc_map_value_lookup(task); + if (!v) + return 0; + + bpf_rcu_read_lock(); + local = v->task; + if (!local) { + bpf_rcu_read_unlock(); + return 0; + } + /* Can't release a kptr that's still stored in a map. */ + bpf_task_release(local); + bpf_rcu_read_unlock(); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index 9f359cfd29e7..b09371bba204 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -17,6 +17,10 @@ int err, pid; * TP_PROTO(struct task_struct *p, u64 clone_flags) */ +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; +void invalid_kfunc(void) __ksym __weak; +void bpf_testmod_test_mod_kfunc(int i) __ksym __weak; + static bool is_test_kfunc_task(void) { int cur_pid = bpf_get_current_pid_tgid() >> 32; @@ -26,10 +30,27 @@ static bool is_test_kfunc_task(void) static int test_acquire_release(struct task_struct *task) { - struct task_struct *acquired; + struct task_struct *acquired = NULL; + + if (!bpf_ksym_exists(bpf_task_acquire)) { + err = 3; + return 0; + } + if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) { + err = 4; + return 0; + } + if (bpf_ksym_exists(invalid_kfunc)) { + /* the verifier's dead code elimination should remove this */ + err = 5; + asm volatile ("goto -1"); /* for (;;); */ + } acquired = bpf_task_acquire(task); - bpf_task_release(acquired); + if (acquired) + bpf_task_release(acquired); + else + err = 6; return 0; } @@ -101,7 +122,7 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") -int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags) +int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags) { struct task_struct *kptr; struct __tasks_kfunc_map_value *v; @@ -122,18 +143,18 @@ int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags) return 0; } - kptr = bpf_task_kptr_get(&v->task); - if (kptr) { - /* Until we resolve the issues with using task->rcu_users, we - * expect bpf_task_kptr_get() to return a NULL task. See the - * comment at the definition of bpf_task_acquire_not_zero() for - * more details. - */ - bpf_task_release(kptr); + bpf_rcu_read_lock(); + kptr = v->task; + if (!kptr) { err = 3; - return 0; + } else { + kptr = bpf_task_acquire(kptr); + if (!kptr) + err = 4; + else + bpf_task_release(kptr); } - + bpf_rcu_read_unlock(); return 0; } @@ -148,7 +169,10 @@ int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 cl current = bpf_get_current_task_btf(); acquired = bpf_task_acquire(current); - bpf_task_release(acquired); + if (acquired) + bpf_task_release(acquired); + else + err = 1; return 0; } @@ -171,8 +195,6 @@ static void lookup_compare_pid(const struct task_struct *p) SEC("tp_btf/task_newtask") int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) { - struct task_struct *acquired; - if (!is_test_kfunc_task()) return 0; @@ -183,8 +205,6 @@ int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) SEC("tp_btf/task_newtask") int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags) { - struct task_struct *current, *acquired; - if (!is_test_kfunc_task()) return 0; @@ -208,11 +228,13 @@ static int is_pid_lookup_valid(s32 pid) SEC("tp_btf/task_newtask") int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags) { - struct task_struct *acquired; - if (!is_test_kfunc_task()) return 0; + bpf_strncmp(task->comm, 12, "foo"); + bpf_strncmp(task->comm, 16, "foo"); + bpf_strncmp(&task->comm[8], 4, "foo"); + if (is_pid_lookup_valid(-1)) { err = 1; return 0; @@ -225,3 +247,19 @@ int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_fla return 0; } + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + /* task->group_leader is listed as a trusted, non-NULL field of task struct. */ + acquired = bpf_task_acquire(task->group_leader); + if (acquired) + bpf_task_release(acquired); + else + err = 1; + + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_update.c b/tools/testing/selftests/bpf/progs/tcp_ca_update.c new file mode 100644 index 000000000000..b93a0ed33057 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tcp_ca_update.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int ca1_cnt = 0; +int ca2_cnt = 0; + +static inline struct tcp_sock *tcp_sk(const struct sock *sk) +{ + return (struct tcp_sock *)sk; +} + +SEC("struct_ops/ca_update_1_init") +void BPF_PROG(ca_update_1_init, struct sock *sk) +{ + ca1_cnt++; +} + +SEC("struct_ops/ca_update_2_init") +void BPF_PROG(ca_update_2_init, struct sock *sk) +{ + ca2_cnt++; +} + +SEC("struct_ops/ca_update_cong_control") +void BPF_PROG(ca_update_cong_control, struct sock *sk, + const struct rate_sample *rs) +{ +} + +SEC("struct_ops/ca_update_ssthresh") +__u32 BPF_PROG(ca_update_ssthresh, struct sock *sk) +{ + return tcp_sk(sk)->snd_ssthresh; +} + +SEC("struct_ops/ca_update_undo_cwnd") +__u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk) +{ + return tcp_sk(sk)->snd_cwnd; +} + +SEC(".struct_ops.link") +struct tcp_congestion_ops ca_update_1 = { + .init = (void *)ca_update_1_init, + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_update", +}; + +SEC(".struct_ops.link") +struct tcp_congestion_ops ca_update_2 = { + .init = (void *)ca_update_2_init, + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_update", +}; + +SEC(".struct_ops.link") +struct tcp_congestion_ops ca_wrong = { + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_wrong", +}; + +SEC(".struct_ops") +struct tcp_congestion_ops ca_no_link = { + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_no_link", +}; diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c index 43447704cf0e..0724a79cec78 100644 --- a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c +++ b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c @@ -16,6 +16,16 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) return (struct tcp_sock *)sk; } +static inline unsigned int tcp_left_out(const struct tcp_sock *tp) +{ + return tp->sacked_out + tp->lost_out; +} + +static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) +{ + return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; +} + SEC("struct_ops/write_sk_pacing_init") void BPF_PROG(write_sk_pacing_init, struct sock *sk) { @@ -31,11 +41,12 @@ SEC("struct_ops/write_sk_pacing_cong_control") void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk, const struct rate_sample *rs) { - const struct tcp_sock *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); unsigned long rate = ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) / (tp->srtt_us ?: 1U << 3); sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate); + tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1; } SEC("struct_ops/write_sk_pacing_ssthresh") diff --git a/tools/testing/selftests/bpf/progs/test_access_variable_array.c b/tools/testing/selftests/bpf/progs/test_access_variable_array.c new file mode 100644 index 000000000000..808c49b79889 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_access_variable_array.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +unsigned long span = 0; + +SEC("fentry/load_balance") +int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, + struct sched_domain *sd) +{ + span = sd->span[0]; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c new file mode 100644 index 000000000000..f548b7446218 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Facebook + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" + +int kprobe_res = 0; + +/** + * This program will be manually made sleepable on the userspace side + * and should thus be unattachable. + */ +SEC("kprobe/" SYS_PREFIX "sys_nanosleep") +int handle_kprobe_sleepable(struct pt_regs *ctx) +{ + kprobe_res = 1; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c index 3b5dc34d23e9..68466a6ad18c 100644 --- a/tools/testing/selftests/bpf/progs/test_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c @@ -7,12 +7,8 @@ #include <bpf/bpf_core_read.h> #include "bpf_misc.h" -int kprobe_res = 0; int kprobe2_res = 0; -int kretprobe_res = 0; int kretprobe2_res = 0; -int uprobe_res = 0; -int uretprobe_res = 0; int uprobe_byname_res = 0; int uretprobe_byname_res = 0; int uprobe_byname2_res = 0; @@ -23,13 +19,6 @@ int uretprobe_byname3_sleepable_res = 0; int uretprobe_byname3_res = 0; void *user_ptr = 0; -SEC("kprobe") -int handle_kprobe(struct pt_regs *ctx) -{ - kprobe_res = 1; - return 0; -} - SEC("ksyscall/nanosleep") int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem) { @@ -37,24 +26,6 @@ int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __ker return 0; } -/** - * This program will be manually made sleepable on the userspace side - * and should thus be unattachable. - */ -SEC("kprobe/" SYS_PREFIX "sys_nanosleep") -int handle_kprobe_sleepable(struct pt_regs *ctx) -{ - kprobe_res = 2; - return 0; -} - -SEC("kretprobe") -int handle_kretprobe(struct pt_regs *ctx) -{ - kretprobe_res = 2; - return 0; -} - SEC("kretsyscall/nanosleep") int BPF_KRETPROBE(handle_kretprobe_auto, int ret) { @@ -63,16 +34,14 @@ int BPF_KRETPROBE(handle_kretprobe_auto, int ret) } SEC("uprobe") -int handle_uprobe(struct pt_regs *ctx) +int handle_uprobe_ref_ctr(struct pt_regs *ctx) { - uprobe_res = 3; return 0; } SEC("uretprobe") -int handle_uretprobe(struct pt_regs *ctx) +int handle_uretprobe_ref_ctr(struct pt_regs *ctx) { - uretprobe_res = 4; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c new file mode 100644 index 000000000000..7f08bce94596 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Facebook + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" + +int kprobe_res = 0; +int kretprobe_res = 0; +int uprobe_res = 0; +int uretprobe_res = 0; +int uprobe_byname_res = 0; +void *user_ptr = 0; + +SEC("kprobe") +int handle_kprobe(struct pt_regs *ctx) +{ + kprobe_res = 1; + return 0; +} + +SEC("kretprobe") +int handle_kretprobe(struct pt_regs *ctx) +{ + kretprobe_res = 2; + return 0; +} + +SEC("uprobe") +int handle_uprobe(struct pt_regs *ctx) +{ + uprobe_res = 3; + return 0; +} + +SEC("uretprobe") +int handle_uretprobe(struct pt_regs *ctx) +{ + uretprobe_res = 4; + return 0; +} + +SEC("uprobe") +int handle_uprobe_byname(struct pt_regs *ctx) +{ + uprobe_byname_res = 5; + return 0; +} + + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c index 9fc603c9d673..77ad8adf68da 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c @@ -75,7 +75,6 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 }; struct bpf_sock_tuple bpf_tuple; struct nf_conn *ct; - int err; __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c new file mode 100644 index 000000000000..f41c81212ee9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -0,0 +1,979 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2019, 2020 Cloudflare + +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> +#include <string.h> + +#include <linux/bpf.h> +#include <linux/icmp.h> +#include <linux/icmpv6.h> +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/pkt_cls.h> +#include <linux/tcp.h> +#include <linux/udp.h> + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#include "test_cls_redirect.h" +#include "bpf_kfuncs.h" + +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER))) + +#define IP_OFFSET_MASK (0x1FFF) +#define IP_MF (0x2000) + +char _license[] SEC("license") = "Dual BSD/GPL"; + +/** + * Destination port and IP used for UDP encapsulation. + */ +volatile const __be16 ENCAPSULATION_PORT; +volatile const __be32 ENCAPSULATION_IP; + +typedef struct { + uint64_t processed_packets_total; + uint64_t l3_protocol_packets_total_ipv4; + uint64_t l3_protocol_packets_total_ipv6; + uint64_t l4_protocol_packets_total_tcp; + uint64_t l4_protocol_packets_total_udp; + uint64_t accepted_packets_total_syn; + uint64_t accepted_packets_total_syn_cookies; + uint64_t accepted_packets_total_last_hop; + uint64_t accepted_packets_total_icmp_echo_request; + uint64_t accepted_packets_total_established; + uint64_t forwarded_packets_total_gue; + uint64_t forwarded_packets_total_gre; + + uint64_t errors_total_unknown_l3_proto; + uint64_t errors_total_unknown_l4_proto; + uint64_t errors_total_malformed_ip; + uint64_t errors_total_fragmented_ip; + uint64_t errors_total_malformed_icmp; + uint64_t errors_total_unwanted_icmp; + uint64_t errors_total_malformed_icmp_pkt_too_big; + uint64_t errors_total_malformed_tcp; + uint64_t errors_total_malformed_udp; + uint64_t errors_total_icmp_echo_replies; + uint64_t errors_total_malformed_encapsulation; + uint64_t errors_total_encap_adjust_failed; + uint64_t errors_total_encap_buffer_too_small; + uint64_t errors_total_redirect_loop; + uint64_t errors_total_encap_mtu_violate; +} metrics_t; + +typedef enum { + INVALID = 0, + UNKNOWN, + ECHO_REQUEST, + SYN, + SYN_COOKIE, + ESTABLISHED, +} verdict_t; + +typedef struct { + uint16_t src, dst; +} flow_ports_t; + +_Static_assert( + sizeof(flow_ports_t) != + offsetofend(struct bpf_sock_tuple, ipv4.dport) - + offsetof(struct bpf_sock_tuple, ipv4.sport) - 1, + "flow_ports_t must match sport and dport in struct bpf_sock_tuple"); +_Static_assert( + sizeof(flow_ports_t) != + offsetofend(struct bpf_sock_tuple, ipv6.dport) - + offsetof(struct bpf_sock_tuple, ipv6.sport) - 1, + "flow_ports_t must match sport and dport in struct bpf_sock_tuple"); + +struct iphdr_info { + void *hdr; + __u64 len; +}; + +typedef int ret_t; + +/* This is a bit of a hack. We need a return value which allows us to + * indicate that the regular flow of the program should continue, + * while allowing functions to use XDP_PASS and XDP_DROP, etc. + */ +static const ret_t CONTINUE_PROCESSING = -1; + +/* Convenience macro to call functions which return ret_t. + */ +#define MAYBE_RETURN(x) \ + do { \ + ret_t __ret = x; \ + if (__ret != CONTINUE_PROCESSING) \ + return __ret; \ + } while (0) + +static bool ipv4_is_fragment(const struct iphdr *ip) +{ + uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK); + return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0; +} + +static int pkt_parse_ipv4(struct bpf_dynptr *dynptr, __u64 *offset, struct iphdr *iphdr) +{ + if (bpf_dynptr_read(iphdr, sizeof(*iphdr), dynptr, *offset, 0)) + return -1; + + *offset += sizeof(*iphdr); + + if (iphdr->ihl < 5) + return -1; + + /* skip ipv4 options */ + *offset += (iphdr->ihl - 5) * 4; + + return 0; +} + +/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */ +static bool pkt_parse_icmp_l4_ports(struct bpf_dynptr *dynptr, __u64 *offset, flow_ports_t *ports) +{ + if (bpf_dynptr_read(ports, sizeof(*ports), dynptr, *offset, 0)) + return false; + + *offset += sizeof(*ports); + + /* Ports in the L4 headers are reversed, since we are parsing an ICMP + * payload which is going towards the eyeball. + */ + uint16_t dst = ports->src; + ports->src = ports->dst; + ports->dst = dst; + return true; +} + +static uint16_t pkt_checksum_fold(uint32_t csum) +{ + /* The highest reasonable value for an IPv4 header + * checksum requires two folds, so we just do that always. + */ + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + return (uint16_t)~csum; +} + +static void pkt_ipv4_checksum(struct iphdr *iph) +{ + iph->check = 0; + + /* An IP header without options is 20 bytes. Two of those + * are the checksum, which we always set to zero. Hence, + * the maximum accumulated value is 18 / 2 * 0xffff = 0x8fff7, + * which fits in 32 bit. + */ + _Static_assert(sizeof(struct iphdr) == 20, "iphdr must be 20 bytes"); + uint32_t acc = 0; + uint16_t *ipw = (uint16_t *)iph; + + for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) + acc += ipw[i]; + + iph->check = pkt_checksum_fold(acc); +} + +static bool pkt_skip_ipv6_extension_headers(struct bpf_dynptr *dynptr, __u64 *offset, + const struct ipv6hdr *ipv6, uint8_t *upper_proto, + bool *is_fragment) +{ + /* We understand five extension headers. + * https://tools.ietf.org/html/rfc8200#section-4.1 states that all + * headers should occur once, except Destination Options, which may + * occur twice. Hence we give up after 6 headers. + */ + struct { + uint8_t next; + uint8_t len; + } exthdr = { + .next = ipv6->nexthdr, + }; + *is_fragment = false; + + for (int i = 0; i < 6; i++) { + switch (exthdr.next) { + case IPPROTO_FRAGMENT: + *is_fragment = true; + /* NB: We don't check that hdrlen == 0 as per spec. */ + /* fallthrough; */ + + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: + case IPPROTO_MH: + if (bpf_dynptr_read(&exthdr, sizeof(exthdr), dynptr, *offset, 0)) + return false; + + /* hdrlen is in 8-octet units, and excludes the first 8 octets. */ + *offset += (exthdr.len + 1) * 8; + + /* Decode next header */ + break; + + default: + /* The next header is not one of the known extension + * headers, treat it as the upper layer header. + * + * This handles IPPROTO_NONE. + * + * Encapsulating Security Payload (50) and Authentication + * Header (51) also end up here (and will trigger an + * unknown proto error later). They have a custom header + * format and seem too esoteric to care about. + */ + *upper_proto = exthdr.next; + return true; + } + } + + /* We never found an upper layer header. */ + return false; +} + +static int pkt_parse_ipv6(struct bpf_dynptr *dynptr, __u64 *offset, struct ipv6hdr *ipv6, + uint8_t *proto, bool *is_fragment) +{ + if (bpf_dynptr_read(ipv6, sizeof(*ipv6), dynptr, *offset, 0)) + return -1; + + *offset += sizeof(*ipv6); + + if (!pkt_skip_ipv6_extension_headers(dynptr, offset, ipv6, proto, is_fragment)) + return -1; + + return 0; +} + +/* Global metrics, per CPU + */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, unsigned int); + __type(value, metrics_t); +} metrics_map SEC(".maps"); + +static metrics_t *get_global_metrics(void) +{ + uint64_t key = 0; + return bpf_map_lookup_elem(&metrics_map, &key); +} + +static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap) +{ + const int payload_off = + sizeof(*encap) + + sizeof(struct in_addr) * encap->unigue.hop_count; + int32_t encap_overhead = payload_off - sizeof(struct ethhdr); + + /* Changing the ethertype if the encapsulated packet is ipv6 */ + if (encap->gue.proto_ctype == IPPROTO_IPV6) + encap->eth.h_proto = bpf_htons(ETH_P_IPV6); + + if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC, + BPF_F_ADJ_ROOM_FIXED_GSO | + BPF_F_ADJ_ROOM_NO_CSUM_RESET) || + bpf_csum_level(skb, BPF_CSUM_LEVEL_DEC)) + return TC_ACT_SHOT; + + return bpf_redirect(skb->ifindex, BPF_F_INGRESS); +} + +static ret_t forward_with_gre(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + encap_headers_t *encap, struct in_addr *next_hop, + metrics_t *metrics) +{ + const int payload_off = + sizeof(*encap) + + sizeof(struct in_addr) * encap->unigue.hop_count; + int32_t encap_overhead = + payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr); + int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead; + __u8 encap_buffer[sizeof(encap_gre_t)] = {}; + uint16_t proto = ETH_P_IP; + uint32_t mtu_len = 0; + encap_gre_t *encap_gre; + + metrics->forwarded_packets_total_gre++; + + /* Loop protection: the inner packet's TTL is decremented as a safeguard + * against any forwarding loop. As the only interesting field is the TTL + * hop limit for IPv6, it is easier to use bpf_skb_load_bytes/bpf_skb_store_bytes + * as they handle the split packets if needed (no need for the data to be + * in the linear section). + */ + if (encap->gue.proto_ctype == IPPROTO_IPV6) { + proto = ETH_P_IPV6; + uint8_t ttl; + int rc; + + rc = bpf_skb_load_bytes( + skb, payload_off + offsetof(struct ipv6hdr, hop_limit), + &ttl, 1); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (ttl == 0) { + metrics->errors_total_redirect_loop++; + return TC_ACT_SHOT; + } + + ttl--; + rc = bpf_skb_store_bytes( + skb, payload_off + offsetof(struct ipv6hdr, hop_limit), + &ttl, 1, 0); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + } else { + uint8_t ttl; + int rc; + + rc = bpf_skb_load_bytes( + skb, payload_off + offsetof(struct iphdr, ttl), &ttl, + 1); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (ttl == 0) { + metrics->errors_total_redirect_loop++; + return TC_ACT_SHOT; + } + + /* IPv4 also has a checksum to patch. While the TTL is only one byte, + * this function only works for 2 and 4 bytes arguments (the result is + * the same). + */ + rc = bpf_l3_csum_replace( + skb, payload_off + offsetof(struct iphdr, check), ttl, + ttl - 1, 2); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + ttl--; + rc = bpf_skb_store_bytes( + skb, payload_off + offsetof(struct iphdr, ttl), &ttl, 1, + 0); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + } + + if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) { + metrics->errors_total_encap_mtu_violate++; + return TC_ACT_SHOT; + } + + if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET, + BPF_F_ADJ_ROOM_FIXED_GSO | + BPF_F_ADJ_ROOM_NO_CSUM_RESET) || + bpf_csum_level(skb, BPF_CSUM_LEVEL_INC)) { + metrics->errors_total_encap_adjust_failed++; + return TC_ACT_SHOT; + } + + if (bpf_skb_pull_data(skb, sizeof(encap_gre_t))) { + metrics->errors_total_encap_buffer_too_small++; + return TC_ACT_SHOT; + } + + encap_gre = bpf_dynptr_slice_rdwr(dynptr, 0, encap_buffer, sizeof(encap_buffer)); + if (!encap_gre) { + metrics->errors_total_encap_buffer_too_small++; + return TC_ACT_SHOT; + } + + encap_gre->ip.protocol = IPPROTO_GRE; + encap_gre->ip.daddr = next_hop->s_addr; + encap_gre->ip.saddr = ENCAPSULATION_IP; + encap_gre->ip.tot_len = + bpf_htons(bpf_ntohs(encap_gre->ip.tot_len) + delta); + encap_gre->gre.flags = 0; + encap_gre->gre.protocol = bpf_htons(proto); + pkt_ipv4_checksum((void *)&encap_gre->ip); + + if (encap_gre == encap_buffer) + bpf_dynptr_write(dynptr, 0, encap_buffer, sizeof(encap_buffer), 0); + + return bpf_redirect(skb->ifindex, 0); +} + +static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + encap_headers_t *encap, struct in_addr *next_hop, + metrics_t *metrics) +{ + /* swap L2 addresses */ + /* This assumes that packets are received from a router. + * So just swapping the MAC addresses here will make the packet go back to + * the router, which will send it to the appropriate machine. + */ + unsigned char temp[ETH_ALEN]; + memcpy(temp, encap->eth.h_dest, sizeof(temp)); + memcpy(encap->eth.h_dest, encap->eth.h_source, + sizeof(encap->eth.h_dest)); + memcpy(encap->eth.h_source, temp, sizeof(encap->eth.h_source)); + + if (encap->unigue.next_hop == encap->unigue.hop_count - 1 && + encap->unigue.last_hop_gre) { + return forward_with_gre(skb, dynptr, encap, next_hop, metrics); + } + + metrics->forwarded_packets_total_gue++; + uint32_t old_saddr = encap->ip.saddr; + encap->ip.saddr = encap->ip.daddr; + encap->ip.daddr = next_hop->s_addr; + if (encap->unigue.next_hop < encap->unigue.hop_count) { + encap->unigue.next_hop++; + } + + /* Remove ip->saddr, add next_hop->s_addr */ + const uint64_t off = offsetof(typeof(*encap), ip.check); + int ret = bpf_l3_csum_replace(skb, off, old_saddr, next_hop->s_addr, 4); + if (ret < 0) { + return TC_ACT_SHOT; + } + + return bpf_redirect(skb->ifindex, 0); +} + +static ret_t skip_next_hops(__u64 *offset, int n) +{ + switch (n) { + case 1: + *offset += sizeof(struct in_addr); + case 0: + return CONTINUE_PROCESSING; + + default: + return TC_ACT_SHOT; + } +} + +/* Get the next hop from the GLB header. + * + * Sets next_hop->s_addr to 0 if there are no more hops left. + * pkt is positioned just after the variable length GLB header + * iff the call is successful. + */ +static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_headers_t *encap, + struct in_addr *next_hop) +{ + if (encap->unigue.next_hop > encap->unigue.hop_count) + return TC_ACT_SHOT; + + /* Skip "used" next hops. */ + MAYBE_RETURN(skip_next_hops(offset, encap->unigue.next_hop)); + + if (encap->unigue.next_hop == encap->unigue.hop_count) { + /* No more next hops, we are at the end of the GLB header. */ + next_hop->s_addr = 0; + return CONTINUE_PROCESSING; + } + + if (bpf_dynptr_read(next_hop, sizeof(*next_hop), dynptr, *offset, 0)) + return TC_ACT_SHOT; + + *offset += sizeof(*next_hop); + + /* Skip the remainig next hops (may be zero). */ + return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1); +} + +/* Fill a bpf_sock_tuple to be used with the socket lookup functions. + * This is a kludge that let's us work around verifier limitations: + * + * fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321) + * + * clang will substitue a costant for sizeof, which allows the verifier + * to track it's value. Based on this, it can figure out the constant + * return value, and calling code works while still being "generic" to + * IPv4 and IPv6. + */ +static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph, + uint64_t iphlen, uint16_t sport, uint16_t dport) +{ + switch (iphlen) { + case sizeof(struct iphdr): { + struct iphdr *ipv4 = (struct iphdr *)iph; + tuple->ipv4.daddr = ipv4->daddr; + tuple->ipv4.saddr = ipv4->saddr; + tuple->ipv4.sport = sport; + tuple->ipv4.dport = dport; + return sizeof(tuple->ipv4); + } + + case sizeof(struct ipv6hdr): { + struct ipv6hdr *ipv6 = (struct ipv6hdr *)iph; + memcpy(&tuple->ipv6.daddr, &ipv6->daddr, + sizeof(tuple->ipv6.daddr)); + memcpy(&tuple->ipv6.saddr, &ipv6->saddr, + sizeof(tuple->ipv6.saddr)); + tuple->ipv6.sport = sport; + tuple->ipv6.dport = dport; + return sizeof(tuple->ipv6); + } + + default: + return 0; + } +} + +static verdict_t classify_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, + uint64_t tuplen, void *iph, struct tcphdr *tcp) +{ + struct bpf_sock *sk = + bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0); + + if (sk == NULL) + return UNKNOWN; + + if (sk->state != BPF_TCP_LISTEN) { + bpf_sk_release(sk); + return ESTABLISHED; + } + + if (iph != NULL && tcp != NULL) { + /* Kludge: we've run out of arguments, but need the length of the ip header. */ + uint64_t iphlen = sizeof(struct iphdr); + + if (tuplen == sizeof(tuple->ipv6)) + iphlen = sizeof(struct ipv6hdr); + + if (bpf_tcp_check_syncookie(sk, iph, iphlen, tcp, + sizeof(*tcp)) == 0) { + bpf_sk_release(sk); + return SYN_COOKIE; + } + } + + bpf_sk_release(sk); + return UNKNOWN; +} + +static verdict_t classify_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, uint64_t tuplen) +{ + struct bpf_sock *sk = + bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0); + + if (sk == NULL) + return UNKNOWN; + + if (sk->state == BPF_TCP_ESTABLISHED) { + bpf_sk_release(sk); + return ESTABLISHED; + } + + bpf_sk_release(sk); + return UNKNOWN; +} + +static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto, struct bpf_sock_tuple *tuple, + uint64_t tuplen, metrics_t *metrics) +{ + switch (proto) { + case IPPROTO_TCP: + return classify_tcp(skb, tuple, tuplen, NULL, NULL); + + case IPPROTO_UDP: + return classify_udp(skb, tuple, tuplen); + + default: + metrics->errors_total_malformed_icmp++; + return INVALID; + } +} + +static verdict_t process_icmpv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr, __u64 *offset, + metrics_t *metrics) +{ + struct icmphdr icmp; + struct iphdr ipv4; + + if (bpf_dynptr_read(&icmp, sizeof(icmp), dynptr, *offset, 0)) { + metrics->errors_total_malformed_icmp++; + return INVALID; + } + + *offset += sizeof(icmp); + + /* We should never receive encapsulated echo replies. */ + if (icmp.type == ICMP_ECHOREPLY) { + metrics->errors_total_icmp_echo_replies++; + return INVALID; + } + + if (icmp.type == ICMP_ECHO) + return ECHO_REQUEST; + + if (icmp.type != ICMP_DEST_UNREACH || icmp.code != ICMP_FRAG_NEEDED) { + metrics->errors_total_unwanted_icmp++; + return INVALID; + } + + if (pkt_parse_ipv4(dynptr, offset, &ipv4)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + /* The source address in the outer IP header is from the entity that + * originated the ICMP message. Use the original IP header to restore + * the correct flow tuple. + */ + struct bpf_sock_tuple tuple; + tuple.ipv4.saddr = ipv4.daddr; + tuple.ipv4.daddr = ipv4.saddr; + + if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv4.sport)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + return classify_icmp(skb, ipv4.protocol, &tuple, + sizeof(tuple.ipv4), metrics); +} + +static verdict_t process_icmpv6(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb, + metrics_t *metrics) +{ + struct bpf_sock_tuple tuple; + struct ipv6hdr ipv6; + struct icmp6hdr icmp6; + bool is_fragment; + uint8_t l4_proto; + + if (bpf_dynptr_read(&icmp6, sizeof(icmp6), dynptr, *offset, 0)) { + metrics->errors_total_malformed_icmp++; + return INVALID; + } + + /* We should never receive encapsulated echo replies. */ + if (icmp6.icmp6_type == ICMPV6_ECHO_REPLY) { + metrics->errors_total_icmp_echo_replies++; + return INVALID; + } + + if (icmp6.icmp6_type == ICMPV6_ECHO_REQUEST) { + return ECHO_REQUEST; + } + + if (icmp6.icmp6_type != ICMPV6_PKT_TOOBIG) { + metrics->errors_total_unwanted_icmp++; + return INVALID; + } + + if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + if (is_fragment) { + metrics->errors_total_fragmented_ip++; + return INVALID; + } + + /* Swap source and dest addresses. */ + memcpy(&tuple.ipv6.saddr, &ipv6.daddr, sizeof(tuple.ipv6.saddr)); + memcpy(&tuple.ipv6.daddr, &ipv6.saddr, sizeof(tuple.ipv6.daddr)); + + if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv6.sport)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + return classify_icmp(skb, l4_proto, &tuple, sizeof(tuple.ipv6), + metrics); +} + +static verdict_t process_tcp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb, + struct iphdr_info *info, metrics_t *metrics) +{ + struct bpf_sock_tuple tuple; + struct tcphdr tcp; + uint64_t tuplen; + + metrics->l4_protocol_packets_total_tcp++; + + if (bpf_dynptr_read(&tcp, sizeof(tcp), dynptr, *offset, 0)) { + metrics->errors_total_malformed_tcp++; + return INVALID; + } + + *offset += sizeof(tcp); + + if (tcp.syn) + return SYN; + + tuplen = fill_tuple(&tuple, info->hdr, info->len, tcp.source, tcp.dest); + return classify_tcp(skb, &tuple, tuplen, info->hdr, &tcp); +} + +static verdict_t process_udp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb, + struct iphdr_info *info, metrics_t *metrics) +{ + struct bpf_sock_tuple tuple; + struct udphdr udph; + uint64_t tuplen; + + metrics->l4_protocol_packets_total_udp++; + + if (bpf_dynptr_read(&udph, sizeof(udph), dynptr, *offset, 0)) { + metrics->errors_total_malformed_udp++; + return INVALID; + } + *offset += sizeof(udph); + + tuplen = fill_tuple(&tuple, info->hdr, info->len, udph.source, udph.dest); + return classify_udp(skb, &tuple, tuplen); +} + +static verdict_t process_ipv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + __u64 *offset, metrics_t *metrics) +{ + struct iphdr ipv4; + struct iphdr_info info = { + .hdr = &ipv4, + .len = sizeof(ipv4), + }; + + metrics->l3_protocol_packets_total_ipv4++; + + if (pkt_parse_ipv4(dynptr, offset, &ipv4)) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (ipv4.version != 4) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (ipv4_is_fragment(&ipv4)) { + metrics->errors_total_fragmented_ip++; + return INVALID; + } + + switch (ipv4.protocol) { + case IPPROTO_ICMP: + return process_icmpv4(skb, dynptr, offset, metrics); + + case IPPROTO_TCP: + return process_tcp(dynptr, offset, skb, &info, metrics); + + case IPPROTO_UDP: + return process_udp(dynptr, offset, skb, &info, metrics); + + default: + metrics->errors_total_unknown_l4_proto++; + return INVALID; + } +} + +static verdict_t process_ipv6(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + __u64 *offset, metrics_t *metrics) +{ + struct ipv6hdr ipv6; + struct iphdr_info info = { + .hdr = &ipv6, + .len = sizeof(ipv6), + }; + uint8_t l4_proto; + bool is_fragment; + + metrics->l3_protocol_packets_total_ipv6++; + + if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (ipv6.version != 6) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (is_fragment) { + metrics->errors_total_fragmented_ip++; + return INVALID; + } + + switch (l4_proto) { + case IPPROTO_ICMPV6: + return process_icmpv6(dynptr, offset, skb, metrics); + + case IPPROTO_TCP: + return process_tcp(dynptr, offset, skb, &info, metrics); + + case IPPROTO_UDP: + return process_udp(dynptr, offset, skb, &info, metrics); + + default: + metrics->errors_total_unknown_l4_proto++; + return INVALID; + } +} + +SEC("tc") +int cls_redirect(struct __sk_buff *skb) +{ + __u8 encap_buffer[sizeof(encap_headers_t)] = {}; + struct bpf_dynptr dynptr; + struct in_addr next_hop; + /* Tracks offset of the dynptr. This will be unnecessary once + * bpf_dynptr_advance() is available. + */ + __u64 off = 0; + ret_t ret; + + bpf_dynptr_from_skb(skb, 0, &dynptr); + + metrics_t *metrics = get_global_metrics(); + if (metrics == NULL) + return TC_ACT_SHOT; + + metrics->processed_packets_total++; + + /* Pass bogus packets as long as we're not sure they're + * destined for us. + */ + if (skb->protocol != bpf_htons(ETH_P_IP)) + return TC_ACT_OK; + + encap_headers_t *encap; + + /* Make sure that all encapsulation headers are available in + * the linear portion of the skb. This makes it easy to manipulate them. + */ + if (bpf_skb_pull_data(skb, sizeof(*encap))) + return TC_ACT_OK; + + encap = bpf_dynptr_slice_rdwr(&dynptr, 0, encap_buffer, sizeof(encap_buffer)); + if (!encap) + return TC_ACT_OK; + + off += sizeof(*encap); + + if (encap->ip.ihl != 5) + /* We never have any options. */ + return TC_ACT_OK; + + if (encap->ip.daddr != ENCAPSULATION_IP || + encap->ip.protocol != IPPROTO_UDP) + return TC_ACT_OK; + + /* TODO Check UDP length? */ + if (encap->udp.dest != ENCAPSULATION_PORT) + return TC_ACT_OK; + + /* We now know that the packet is destined to us, we can + * drop bogus ones. + */ + if (ipv4_is_fragment((void *)&encap->ip)) { + metrics->errors_total_fragmented_ip++; + return TC_ACT_SHOT; + } + + if (encap->gue.variant != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->gue.control != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->gue.flags != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->gue.hlen != + sizeof(encap->unigue) / 4 + encap->unigue.hop_count) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->unigue.version != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->unigue.reserved != 0) + return TC_ACT_SHOT; + + MAYBE_RETURN(get_next_hop(&dynptr, &off, encap, &next_hop)); + + if (next_hop.s_addr == 0) { + metrics->accepted_packets_total_last_hop++; + return accept_locally(skb, encap); + } + + verdict_t verdict; + switch (encap->gue.proto_ctype) { + case IPPROTO_IPIP: + verdict = process_ipv4(skb, &dynptr, &off, metrics); + break; + + case IPPROTO_IPV6: + verdict = process_ipv6(skb, &dynptr, &off, metrics); + break; + + default: + metrics->errors_total_unknown_l3_proto++; + return TC_ACT_SHOT; + } + + switch (verdict) { + case INVALID: + /* metrics have already been bumped */ + return TC_ACT_SHOT; + + case UNKNOWN: + return forward_to_next_hop(skb, &dynptr, encap, &next_hop, metrics); + + case ECHO_REQUEST: + metrics->accepted_packets_total_icmp_echo_request++; + break; + + case SYN: + if (encap->unigue.forward_syn) { + return forward_to_next_hop(skb, &dynptr, encap, &next_hop, + metrics); + } + + metrics->accepted_packets_total_syn++; + break; + + case SYN_COOKIE: + metrics->accepted_packets_total_syn_cookies++; + break; + + case ESTABLISHED: + metrics->accepted_packets_total_established++; + break; + } + + ret = accept_locally(skb, encap); + + if (encap == encap_buffer) + bpf_dynptr_write(&dynptr, 0, encap_buffer, sizeof(encap_buffer), 0); + + return ret; +} diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c index ab1e647aeb31..b86fdda2a6ea 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c @@ -42,7 +42,6 @@ int test_core_bitfields(void *ctx) { struct core_reloc_bitfields *in = (void *)&data.in; struct core_reloc_bitfields_output *out = (void *)&data.out; - uint64_t res; out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1); out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2); diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index 23970a20b324..b85fc8c423ba 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return f0(0, skb) + skb->len; } @@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->ifindex * val * var; } diff --git a/tools/testing/selftests/bpf/progs/test_global_func2.c b/tools/testing/selftests/bpf/progs/test_global_func2.c index 3dce97fb52a4..2beab9c3b68a 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func2.c +++ b/tools/testing/selftests/bpf/progs/test_global_func2.c @@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return f0(0, skb) + skb->len; } @@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->ifindex * val * var; } diff --git a/tools/testing/selftests/bpf/progs/test_hash_large_key.c b/tools/testing/selftests/bpf/progs/test_hash_large_key.c index 473a22794a62..8b438128f46b 100644 --- a/tools/testing/selftests/bpf/progs/test_hash_large_key.c +++ b/tools/testing/selftests/bpf/progs/test_hash_large_key.c @@ -28,7 +28,7 @@ struct bigelement { SEC("raw_tracepoint/sys_enter") int bpf_hash_large_key_test(void *ctx) { - int zero = 0, err = 1, value = 42; + int zero = 0, value = 42; struct bigelement *key; key = bpf_map_lookup_elem(&key_map, &zero); diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index 2fbef3cc7ad8..2dde8e3fe4c9 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -48,7 +48,7 @@ SEC("?lsm.s/bpf") __failure __msg("arg#0 expected pointer to stack or dynptr_ptr") int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) { - unsigned long val; + unsigned long val = 0; return bpf_verify_pkcs7_signature((struct bpf_dynptr *)val, (struct bpf_dynptr *)val, NULL); diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c index a72a5bf3812a..27109b877714 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c @@ -35,7 +35,6 @@ SEC("raw_tp/sys_enter") int handler2(const void *ctx) { int *active; - __u32 cpu; active = bpf_this_cpu_ptr(&bpf_prog_active); write_active(active); diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c index 5f8379aadb29..d00268c91e19 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c @@ -20,6 +20,8 @@ __u64 out__non_existent_typed = -1; /* test existing weak symbols can be resolved. */ extern const struct rq runqueues __ksym __weak; /* typed */ extern const void bpf_prog_active __ksym __weak; /* typeless */ +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; +void bpf_testmod_test_mod_kfunc(int i) __ksym __weak; /* non-existent weak symbols. */ @@ -29,6 +31,7 @@ extern const void bpf_link_fops1 __ksym __weak; /* typed symbols, default to zero. */ extern const int bpf_link_fops2 __ksym __weak; +void invalid_kfunc(void) __ksym __weak; SEC("raw_tp/sys_enter") int pass_handler(const void *ctx) @@ -37,7 +40,7 @@ int pass_handler(const void *ctx) /* tests existing symbols. */ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0); - if (rq) + if (rq && bpf_ksym_exists(&runqueues)) out__existing_typed = rq->cpu; out__existing_typeless = (__u64)&bpf_prog_active; @@ -50,6 +53,18 @@ int pass_handler(const void *ctx) if (&bpf_link_fops2) /* can't happen */ out__non_existent_typed = (__u64)bpf_per_cpu_ptr(&bpf_link_fops2, 0); + if (!bpf_ksym_exists(bpf_task_acquire)) + /* dead code won't be seen by the verifier */ + bpf_task_acquire(0); + + if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) + /* dead code won't be seen by the verifier */ + bpf_testmod_test_mod_kfunc(0); + + if (bpf_ksym_exists(invalid_kfunc)) + /* dead code won't be seen by the verifier */ + invalid_kfunc(); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c new file mode 100644 index 000000000000..f997f5080748 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Facebook +#include <stddef.h> +#include <stdbool.h> +#include <string.h> +#include <linux/pkt_cls.h> +#include <linux/bpf.h> +#include <linux/in.h> +#include <linux/if_ether.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/icmp.h> +#include <linux/icmpv6.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <bpf/bpf_helpers.h> +#include "test_iptunnel_common.h" +#include <bpf/bpf_endian.h> + +#include "bpf_kfuncs.h" + +static __always_inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> ((-shift) & 31)); +} + +/* copy paste of jhash from kernel sources to make sure llvm + * can compile it into valid sequence of bpf instructions + */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +#define JHASH_INITVAL 0xdeadbeef + +typedef unsigned int u32; + +static __noinline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const unsigned char *k = key; + + a = b = c = JHASH_INITVAL + length + initval; + + while (length > 12) { + a += *(u32 *)(k); + b += *(u32 *)(k + 4); + c += *(u32 *)(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + __jhash_final(a, b, c); + return c; +} + +static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); +} + +#define PCKT_FRAGMENTED 65343 +#define IPV4_HDR_LEN_NO_OPT 20 +#define IPV4_PLUS_ICMP_HDR 28 +#define IPV6_PLUS_ICMP_HDR 48 +#define RING_SIZE 2 +#define MAX_VIPS 12 +#define MAX_REALS 5 +#define CTL_MAP_SIZE 16 +#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE) +#define F_IPV6 (1 << 0) +#define F_HASH_NO_SRC_PORT (1 << 0) +#define F_ICMP (1 << 0) +#define F_SYN_SET (1 << 1) + +struct packet_description { + union { + __be32 src; + __be32 srcv6[4]; + }; + union { + __be32 dst; + __be32 dstv6[4]; + }; + union { + __u32 ports; + __u16 port16[2]; + }; + __u8 proto; + __u8 flags; +}; + +struct ctl_value { + union { + __u64 value; + __u32 ifindex; + __u8 mac[6]; + }; +}; + +struct vip_meta { + __u32 flags; + __u32 vip_num; +}; + +struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; +}; + +struct vip_stats { + __u64 bytes; + __u64 pkts; +}; + +struct eth_hdr { + unsigned char eth_dest[ETH_ALEN]; + unsigned char eth_source[ETH_ALEN]; + unsigned short eth_proto; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_VIPS); + __type(key, struct vip); + __type(value, struct vip_meta); +} vip_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, CH_RINGS_SIZE); + __type(key, __u32); + __type(value, __u32); +} ch_rings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, MAX_REALS); + __type(key, __u32); + __type(value, struct real_definition); +} reals SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, MAX_VIPS); + __type(key, __u32); + __type(value, struct vip_stats); +} stats SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, CTL_MAP_SIZE); + __type(key, __u32); + __type(value, struct ctl_value); +} ctl_array SEC(".maps"); + +static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6) +{ + if (ipv6) + return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS), + pckt->ports, CH_RINGS_SIZE); + else + return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE); +} + +static __noinline bool get_packet_dst(struct real_definition **real, + struct packet_description *pckt, + struct vip_meta *vip_info, + bool is_ipv6) +{ + __u32 hash = get_packet_hash(pckt, is_ipv6); + __u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE; + __u32 *real_pos; + + if (hash != 0x358459b7 /* jhash of ipv4 packet */ && + hash != 0x2f4bc6bb /* jhash of ipv6 packet */) + return false; + + real_pos = bpf_map_lookup_elem(&ch_rings, &key); + if (!real_pos) + return false; + key = *real_pos; + *real = bpf_map_lookup_elem(&reals, &key); + if (!(*real)) + return false; + return true; +} + +static __noinline int parse_icmpv6(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer[sizeof(struct ipv6hdr)] = {}; + struct icmp6hdr *icmp_hdr; + struct ipv6hdr *ip6h; + + icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!icmp_hdr) + return TC_ACT_SHOT; + + if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG) + return TC_ACT_OK; + off += sizeof(struct icmp6hdr); + ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!ip6h) + return TC_ACT_SHOT; + pckt->proto = ip6h->nexthdr; + pckt->flags |= F_ICMP; + memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16); + memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16); + return TC_ACT_UNSPEC; +} + +static __noinline int parse_icmp(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer_icmp[sizeof(struct iphdr)] = {}; + __u8 buffer_ip[sizeof(struct iphdr)] = {}; + struct icmphdr *icmp_hdr; + struct iphdr *iph; + + icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer_icmp, sizeof(buffer_icmp)); + if (!icmp_hdr) + return TC_ACT_SHOT; + if (icmp_hdr->type != ICMP_DEST_UNREACH || + icmp_hdr->code != ICMP_FRAG_NEEDED) + return TC_ACT_OK; + off += sizeof(struct icmphdr); + iph = bpf_dynptr_slice(skb_ptr, off, buffer_ip, sizeof(buffer_ip)); + if (!iph || iph->ihl != 5) + return TC_ACT_SHOT; + pckt->proto = iph->protocol; + pckt->flags |= F_ICMP; + pckt->src = iph->daddr; + pckt->dst = iph->saddr; + return TC_ACT_UNSPEC; +} + +static __noinline bool parse_udp(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer[sizeof(struct udphdr)] = {}; + struct udphdr *udp; + + udp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!udp) + return false; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = udp->source; + pckt->port16[1] = udp->dest; + } else { + pckt->port16[0] = udp->dest; + pckt->port16[1] = udp->source; + } + return true; +} + +static __noinline bool parse_tcp(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer[sizeof(struct tcphdr)] = {}; + struct tcphdr *tcp; + + tcp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!tcp) + return false; + + if (tcp->syn) + pckt->flags |= F_SYN_SET; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = tcp->source; + pckt->port16[1] = tcp->dest; + } else { + pckt->port16[0] = tcp->dest; + pckt->port16[1] = tcp->source; + } + return true; +} + +static __noinline int process_packet(struct bpf_dynptr *skb_ptr, + struct eth_hdr *eth, __u64 off, + bool is_ipv6, struct __sk_buff *skb) +{ + struct packet_description pckt = {}; + struct bpf_tunnel_key tkey = {}; + struct vip_stats *data_stats; + struct real_definition *dst; + struct vip_meta *vip_info; + struct ctl_value *cval; + __u32 v4_intf_pos = 1; + __u32 v6_intf_pos = 2; + struct ipv6hdr *ip6h; + struct vip vip = {}; + struct iphdr *iph; + int tun_flag = 0; + __u16 pkt_bytes; + __u64 iph_len; + __u32 ifindex; + __u8 protocol; + __u32 vip_num; + int action; + + tkey.tunnel_ttl = 64; + if (is_ipv6) { + __u8 buffer[sizeof(struct ipv6hdr)] = {}; + + ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!ip6h) + return TC_ACT_SHOT; + + iph_len = sizeof(struct ipv6hdr); + protocol = ip6h->nexthdr; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(ip6h->payload_len); + off += iph_len; + if (protocol == IPPROTO_FRAGMENT) { + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_ICMPV6) { + action = parse_icmpv6(skb_ptr, off, &pckt); + if (action >= 0) + return action; + off += IPV6_PLUS_ICMP_HDR; + } else { + memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16); + memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16); + } + } else { + __u8 buffer[sizeof(struct iphdr)] = {}; + + iph = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!iph || iph->ihl != 5) + return TC_ACT_SHOT; + + protocol = iph->protocol; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(iph->tot_len); + off += IPV4_HDR_LEN_NO_OPT; + + if (iph->frag_off & PCKT_FRAGMENTED) + return TC_ACT_SHOT; + if (protocol == IPPROTO_ICMP) { + action = parse_icmp(skb_ptr, off, &pckt); + if (action >= 0) + return action; + off += IPV4_PLUS_ICMP_HDR; + } else { + pckt.src = iph->saddr; + pckt.dst = iph->daddr; + } + } + protocol = pckt.proto; + + if (protocol == IPPROTO_TCP) { + if (!parse_tcp(skb_ptr, off, &pckt)) + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_UDP) { + if (!parse_udp(skb_ptr, off, &pckt)) + return TC_ACT_SHOT; + } else { + return TC_ACT_SHOT; + } + + if (is_ipv6) + memcpy(vip.daddr.v6, pckt.dstv6, 16); + else + vip.daddr.v4 = pckt.dst; + + vip.dport = pckt.port16[1]; + vip.protocol = pckt.proto; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) { + vip.dport = 0; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) + return TC_ACT_SHOT; + pckt.port16[1] = 0; + } + + if (vip_info->flags & F_HASH_NO_SRC_PORT) + pckt.port16[0] = 0; + + if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6)) + return TC_ACT_SHOT; + + if (dst->flags & F_IPV6) { + cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + memcpy(tkey.remote_ipv6, dst->dstv6, 16); + tun_flag = BPF_F_TUNINFO_IPV6; + } else { + cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + tkey.remote_ipv4 = dst->dst; + } + vip_num = vip_info->vip_num; + data_stats = bpf_map_lookup_elem(&stats, &vip_num); + if (!data_stats) + return TC_ACT_SHOT; + data_stats->pkts++; + data_stats->bytes += pkt_bytes; + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag); + *(u32 *)eth->eth_dest = tkey.remote_ipv4; + return bpf_redirect(ifindex, 0); +} + +SEC("tc") +int balancer_ingress(struct __sk_buff *ctx) +{ + __u8 buffer[sizeof(struct eth_hdr)] = {}; + struct bpf_dynptr ptr; + struct eth_hdr *eth; + __u32 eth_proto; + __u32 nh_off; + int err; + + nh_off = sizeof(struct eth_hdr); + + bpf_dynptr_from_skb(ctx, 0, &ptr); + eth = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!eth) + return TC_ACT_SHOT; + eth_proto = eth->eth_proto; + if (eth_proto == bpf_htons(ETH_P_IP)) + err = process_packet(&ptr, eth, nh_off, false, ctx); + else if (eth_proto == bpf_htons(ETH_P_IPV6)) + err = process_packet(&ptr, eth, nh_off, true, ctx); + else + return TC_ACT_SHOT; + + if (eth == buffer) + bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); + + return err; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_legacy_printk.c b/tools/testing/selftests/bpf/progs/test_legacy_printk.c index 64c2d9ced529..42718cd8e6a4 100644 --- a/tools/testing/selftests/bpf/progs/test_legacy_printk.c +++ b/tools/testing/selftests/bpf/progs/test_legacy_printk.c @@ -56,7 +56,7 @@ int handle_legacy(void *ctx) SEC("tp/raw_syscalls/sys_enter") int handle_modern(void *ctx) { - int zero = 0, cur_pid; + int cur_pid; cur_pid = bpf_get_current_pid_tgid() >> 32; if (cur_pid != my_pid_var) diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c index 60450cb0e72e..1bd48feaaa42 100644 --- a/tools/testing/selftests/bpf/progs/test_log_fixup.c +++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c @@ -61,4 +61,14 @@ int use_missing_map(const void *ctx) return value != NULL; } +extern int bpf_nonexistent_kfunc(void) __ksym __weak; + +SEC("?raw_tp/sys_enter") +int use_missing_kfunc(const void *ctx) +{ + bpf_nonexistent_kfunc(); + + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c index acf073db9e8b..1c02511b73cd 100644 --- a/tools/testing/selftests/bpf/progs/test_map_lock.c +++ b/tools/testing/selftests/bpf/progs/test_map_lock.c @@ -33,7 +33,7 @@ struct { SEC("cgroup/skb") int bpf_map_lock_test(struct __sk_buff *skb) { - struct hmap_elem zero = {}, *val; + struct hmap_elem *val; int rnd = bpf_get_prandom_u32(); int key = 0, err = 1, i; struct array_elem *q; diff --git a/tools/testing/selftests/bpf/progs/test_map_ops.c b/tools/testing/selftests/bpf/progs/test_map_ops.c new file mode 100644 index 000000000000..b53b46a090c8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_map_ops.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_STACK); + __uint(max_entries, 1); + __type(value, int); +} stack_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} array_map SEC(".maps"); + +const volatile pid_t pid; +long err = 0; + +static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags) +{ + return 0; +} + +SEC("tp/syscalls/sys_enter_getpid") +int map_update(void *ctx) +{ + const int key = 0; + const int val = 1; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_update_elem(&hash_map, &key, &val, BPF_NOEXIST); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getppid") +int map_delete(void *ctx) +{ + const int key = 0; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_delete_elem(&hash_map, &key); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getuid") +int map_push(void *ctx) +{ + const int val = 1; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_push_elem(&stack_map, &val, 0); + + return 0; +} + +SEC("tp/syscalls/sys_enter_geteuid") +int map_pop(void *ctx) +{ + int val; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_pop_elem(&stack_map, &val); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getgid") +int map_peek(void *ctx) +{ + int val; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_peek_elem(&stack_map, &val); + + return 0; +} + +SEC("tp/syscalls/sys_enter_gettid") +int map_for_each_pass(void *ctx) +{ + const int key = 0; + const int val = 1; + const u64 flags = 0; + int callback_ctx; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + bpf_map_update_elem(&array_map, &key, &val, flags); + + err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getpgid") +int map_for_each_fail(void *ctx) +{ + const int key = 0; + const int val = 1; + const u64 flags = BPF_NOEXIST; + int callback_ctx; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + bpf_map_update_elem(&array_map, &key, &val, flags); + + /* calling for_each with non-zero flags will return error */ + err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c index ded71b3ff6b4..2850ae788a91 100644 --- a/tools/testing/selftests/bpf/progs/test_obj_id.c +++ b/tools/testing/selftests/bpf/progs/test_obj_id.c @@ -4,6 +4,7 @@ #include <stddef.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -19,6 +20,7 @@ int test_obj_id(void *ctx) __u64 *value; value = bpf_map_lookup_elem(&test_map_id, &key); + __sink(value); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c new file mode 100644 index 000000000000..d9b2ba7ac340 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* This parsing logic is taken from the open source library katran, a layer 4 + * load balancer. + * + * This code logic using dynptrs can be found in test_parse_tcp_hdr_opt_dynptr.c + * + * https://github.com/facebookincubator/katran/blob/main/katran/lib/bpf/pckt_parsing.h + */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <linux/tcp.h> +#include <stdbool.h> +#include <linux/ipv6.h> +#include <linux/if_ether.h> +#include "test_tcp_hdr_options.h" + +char _license[] SEC("license") = "GPL"; + +/* Kind number used for experiments */ +const __u32 tcp_hdr_opt_kind_tpr = 0xFD; +/* Length of the tcp header option */ +const __u32 tcp_hdr_opt_len_tpr = 6; +/* maximum number of header options to check to lookup server_id */ +const __u32 tcp_hdr_opt_max_opt_checks = 15; + +__u32 server_id; + +struct hdr_opt_state { + __u32 server_id; + __u8 byte_offset; + __u8 hdr_bytes_remaining; +}; + +static int parse_hdr_opt(const struct xdp_md *xdp, struct hdr_opt_state *state) +{ + const void *data = (void *)(long)xdp->data; + const void *data_end = (void *)(long)xdp->data_end; + __u8 *tcp_opt, kind, hdr_len; + + tcp_opt = (__u8 *)(data + state->byte_offset); + if (tcp_opt + 1 > data_end) + return -1; + + kind = tcp_opt[0]; + + if (kind == TCPOPT_EOL) + return -1; + + if (kind == TCPOPT_NOP) { + state->hdr_bytes_remaining--; + state->byte_offset++; + return 0; + } + + if (state->hdr_bytes_remaining < 2 || + tcp_opt + sizeof(__u8) + sizeof(__u8) > data_end) + return -1; + + hdr_len = tcp_opt[1]; + if (hdr_len > state->hdr_bytes_remaining) + return -1; + + if (kind == tcp_hdr_opt_kind_tpr) { + if (hdr_len != tcp_hdr_opt_len_tpr) + return -1; + + if (tcp_opt + tcp_hdr_opt_len_tpr > data_end) + return -1; + + state->server_id = *(__u32 *)&tcp_opt[2]; + return 1; + } + + state->hdr_bytes_remaining -= hdr_len; + state->byte_offset += hdr_len; + return 0; +} + +SEC("xdp") +int xdp_ingress_v6(struct xdp_md *xdp) +{ + const void *data = (void *)(long)xdp->data; + const void *data_end = (void *)(long)xdp->data_end; + struct hdr_opt_state opt_state = {}; + __u8 tcp_hdr_opt_len = 0; + struct tcphdr *tcp_hdr; + __u64 tcp_offset = 0; + int err; + + tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr); + tcp_hdr = (struct tcphdr *)(data + tcp_offset); + if (tcp_hdr + 1 > data_end) + return XDP_DROP; + + tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr); + if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr) + return XDP_DROP; + + opt_state.hdr_bytes_remaining = tcp_hdr_opt_len; + opt_state.byte_offset = sizeof(struct tcphdr) + tcp_offset; + + /* max number of bytes of options in tcp header is 40 bytes */ + for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) { + err = parse_hdr_opt(xdp, &opt_state); + + if (err || !opt_state.hdr_bytes_remaining) + break; + } + + if (!opt_state.server_id) + return XDP_DROP; + + server_id = opt_state.server_id; + + return XDP_PASS; +} diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c new file mode 100644 index 000000000000..dc6e43bc6a62 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* This logic is lifted from a real-world use case of packet parsing, used in + * the open source library katran, a layer 4 load balancer. + * + * This test demonstrates how to parse packet contents using dynptrs. The + * original code (parsing without dynptrs) can be found in test_parse_tcp_hdr_opt.c + */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <linux/tcp.h> +#include <stdbool.h> +#include <linux/ipv6.h> +#include <linux/if_ether.h> +#include "test_tcp_hdr_options.h" +#include "bpf_kfuncs.h" + +char _license[] SEC("license") = "GPL"; + +/* Kind number used for experiments */ +const __u32 tcp_hdr_opt_kind_tpr = 0xFD; +/* Length of the tcp header option */ +const __u32 tcp_hdr_opt_len_tpr = 6; +/* maximum number of header options to check to lookup server_id */ +const __u32 tcp_hdr_opt_max_opt_checks = 15; + +__u32 server_id; + +static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining, + __u32 *server_id) +{ + __u8 kind, hdr_len; + __u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)]; + __u8 *data; + + __builtin_memset(buffer, 0, sizeof(buffer)); + + data = bpf_dynptr_slice(ptr, *off, buffer, sizeof(buffer)); + if (!data) + return -1; + + kind = data[0]; + + if (kind == TCPOPT_EOL) + return -1; + + if (kind == TCPOPT_NOP) { + *off += 1; + *hdr_bytes_remaining -= 1; + return 0; + } + + if (*hdr_bytes_remaining < 2) + return -1; + + hdr_len = data[1]; + if (hdr_len > *hdr_bytes_remaining) + return -1; + + if (kind == tcp_hdr_opt_kind_tpr) { + if (hdr_len != tcp_hdr_opt_len_tpr) + return -1; + + __builtin_memcpy(server_id, (__u32 *)(data + 2), sizeof(*server_id)); + return 1; + } + + *off += hdr_len; + *hdr_bytes_remaining -= hdr_len; + return 0; +} + +SEC("xdp") +int xdp_ingress_v6(struct xdp_md *xdp) +{ + __u8 buffer[sizeof(struct tcphdr)] = {}; + __u8 hdr_bytes_remaining; + struct tcphdr *tcp_hdr; + __u8 tcp_hdr_opt_len; + int err = 0; + __u32 off; + + struct bpf_dynptr ptr; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + + off = sizeof(struct ethhdr) + sizeof(struct ipv6hdr); + + tcp_hdr = bpf_dynptr_slice(&ptr, off, buffer, sizeof(buffer)); + if (!tcp_hdr) + return XDP_DROP; + + tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr); + if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr) + return XDP_DROP; + + hdr_bytes_remaining = tcp_hdr_opt_len; + + off += sizeof(struct tcphdr); + + /* max number of bytes of options in tcp header is 40 bytes */ + for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) { + err = parse_hdr_opt(&ptr, &off, &hdr_bytes_remaining, &server_id); + + if (err || !hdr_bytes_remaining) + break; + } + + if (!server_id) + return XDP_DROP; + + return XDP_PASS; +} diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 5cd7c096f62d..bce7173152c6 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -13,6 +13,7 @@ #include <linux/pkt_cls.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_misc.h" /* llvm will optimize both subprograms into exactly the same BPF assembly * @@ -51,6 +52,8 @@ int get_skb_len(struct __sk_buff *skb) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->len; } @@ -73,6 +76,8 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->ifindex * val * var; } diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c index 5bdc0d38efc0..501cefa97633 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c @@ -41,7 +41,6 @@ int test_ringbuf(void *ctx) { int cur_pid = bpf_get_current_pid_tgid() >> 32; struct sample *sample; - int zero = 0; if (cur_pid != pid) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c index 2760bf60d05a..21bb7da90ea5 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c @@ -53,6 +53,7 @@ int test_ringbuf_mem_map_key(void *ctx) /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg */ lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample); + __sink(lookup_val); /* workaround - memcpy is necessary so that verifier doesn't * complain with: diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c index e416e0ce12b7..9626baa6779c 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c @@ -59,7 +59,6 @@ int test_ringbuf(void *ctx) int cur_pid = bpf_get_current_pid_tgid() >> 32; struct sample *sample; void *rb; - int zero = 0; if (cur_pid != pid) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index 7d56ed47cd4d..5eb25c6ad75b 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -64,7 +64,7 @@ SEC("sk_reuseport") int _select_by_skb_data(struct sk_reuseport_md *reuse_md) { __u32 linum, index = 0, flags = 0, index_zero = 0; - __u32 *result_cnt, *linum_value; + __u32 *result_cnt; struct data_check data_check = {}; struct cmd *cmd, cmd_copy; void *data, *data_end; diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c index 21b19b758c4e..3079244c7f96 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_assign.c +++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c @@ -15,6 +15,7 @@ #include <sys/socket.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_misc.h" #if defined(IPROUTE2_HAVE_LIBBPF) /* Use a new-style map definition. */ @@ -57,7 +58,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp) void *data = (void *)(long)skb->data; struct bpf_sock_tuple *result; struct ethhdr *eth; - __u64 tuple_len; __u8 proto = 0; __u64 ihl_len; @@ -94,6 +94,7 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp) return NULL; *tcp = (proto == IPPROTO_TCP); + __sink(ihl_len); return result; } @@ -173,7 +174,6 @@ int bpf_sk_assign_test(struct __sk_buff *skb) struct bpf_sock_tuple *tuple; bool ipv4 = false; bool tcp = false; - int tuple_len; int ret = 0; tuple = get_tuple(skb, &ipv4, &tcp); diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c index 6058dcb11b36..71f844b9b902 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -391,7 +391,6 @@ SEC("sk_lookup") int ctx_narrow_access(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; - int err, family; __u32 val_u32; bool v4; @@ -645,9 +644,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx) SEC("sk_lookup") int multi_prog_redir1(struct bpf_sk_lookup *ctx) { - int ret; - - ret = select_server_a(ctx); + (void)select_server_a(ctx); bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); return SK_PASS; } @@ -655,9 +652,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx) SEC("sk_lookup") int multi_prog_redir2(struct bpf_sk_lookup *ctx) { - int ret; - - ret = select_server_a(ctx); + (void)select_server_a(ctx); bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); return SK_PASS; } diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c index b502e5c92e33..e9efc3263022 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c @@ -23,8 +23,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off, bool *ipv4) { struct bpf_sock_tuple *result; + __u64 ihl_len = 0; __u8 proto = 0; - __u64 ihl_len; if (eth_proto == bpf_htons(ETH_P_IP)) { struct iphdr *iph = (struct iphdr *)(data + nh_off); @@ -110,7 +110,6 @@ int err_modify_sk_pointer(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; - __u32 family; sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); if (sk) { @@ -125,7 +124,6 @@ int err_modify_sk_or_null_pointer(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; - __u32 family; sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); sk += 1; diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c index 6dc1f28fc4b6..02e718f06e0f 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c +++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c @@ -92,4 +92,20 @@ int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern, return 0; } +SEC("tp_btf/tcp_retransmit_synack") +int BPF_PROG(tcp_retransmit_synack, struct sock* sk, struct request_sock* req) +{ + /* load only test */ + bpf_sk_storage_get(&sk_stg_map, sk, 0, 0); + bpf_sk_storage_get(&sk_stg_map, req->sk, 0, 0); + return 0; +} + +SEC("tp_btf/tcp_bad_csum") +int BPF_PROG(tcp_bad_csum, struct sk_buff* skb) +{ + bpf_sk_storage_get(&sk_stg_map, skb->sk, 0, 0); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 9f4b8f9f1181..bbad3c2d9aa5 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -121,7 +121,7 @@ static void tpcpy(struct bpf_tcp_sock *dst, SEC("cgroup_skb/egress") int egress_read_sock_fields(struct __sk_buff *skb) { - struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F }; + struct bpf_spinlock_cnt cli_cnt_init = { .lock = {}, .cnt = 0xeB9F }; struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10; struct bpf_tcp_sock *tp, *tp_ret; struct bpf_sock *sk, *sk_ret; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 6c85b00f27b2..baf9ebc6d903 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -14,6 +14,7 @@ #include <sys/socket.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_misc.h" /* Sockmap sample program connects a client and a backend together * using cgroups. @@ -111,12 +112,15 @@ int bpf_prog2(struct __sk_buff *skb) int len, *f, ret, zero = 0; __u64 flags = 0; + __sink(rport); if (lport == 10000) ret = 10; else ret = 1; len = (__u32)skb->data_end - (__u32)skb->data; + __sink(len); + f = bpf_map_lookup_elem(&sock_skb_opts, &zero); if (f && *f) { ret = 3; @@ -180,7 +184,6 @@ int bpf_prog3(struct __sk_buff *skb) if (err) return SK_DROP; bpf_write_pass(skb, 13); -tls_out: return ret; } @@ -188,8 +191,7 @@ SEC("sockops") int bpf_sockmap(struct bpf_sock_ops *skops) { __u32 lport, rport; - int op, err = 0, index, key, ret; - + int op, err, ret; op = (int) skops->op; @@ -228,6 +230,8 @@ int bpf_sockmap(struct bpf_sock_ops *skops) break; } + __sink(err); + return 0; } @@ -321,6 +325,10 @@ int bpf_prog8(struct sk_msg_md *msg) } else { return SK_DROP; } + + __sink(data_end); + __sink(data); + return SK_PASS; } SEC("sk_msg4") diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index 5bd10409285b..b2440a0ff422 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -3,6 +3,7 @@ #include <linux/bpf.h> #include <linux/version.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" struct hmap_elem { volatile int cnt; @@ -89,6 +90,8 @@ int bpf_spin_lock_test(struct __sk_buff *skb) credit = q->credit; bpf_spin_unlock(&q->lock); + __sink(credit); + /* spin_lock in cgroup local storage */ cls = bpf_get_local_storage(&cls_map, 0); bpf_spin_lock(&cls->lock); diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c index 728dbd39eff0..47568007b668 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c @@ -38,7 +38,7 @@ struct { __type(value, stack_trace_t); } stack_amap SEC(".maps"); -/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ +/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */ struct sched_switch_args { unsigned long long pad; char prev_comm[TASK_COMM_LEN]; diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c index 125beec31834..74ec09f040b7 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c +++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c @@ -163,9 +163,9 @@ static int skb_get_type(struct __sk_buff *skb) ip6h = data + sizeof(struct ethhdr); if (ip6h + 1 > data_end) return -1; - if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_src)) + if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_src}})) ns = SRC_NS; - else if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_dst)) + else if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_dst}})) ns = DST_NS; inet_proto = ip6h->nexthdr; trans = ip6h + 1; diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c index 3e32ea375ab4..de15155f2609 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c +++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c @@ -94,7 +94,7 @@ int tc_dst(struct __sk_buff *skb) redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src)); break; case __bpf_constant_htons(ETH_P_IPV6): - redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_src); + redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_src}}); break; } @@ -119,7 +119,7 @@ int tc_src(struct __sk_buff *skb) redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst)); break; case __bpf_constant_htons(ETH_P_IPV6): - redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_dst); + redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_dst}}); break; } diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 3ded05280757..cf7ed8cbb1fe 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -46,8 +46,6 @@ int bpf_testcb(struct bpf_sock_ops *skops) struct bpf_sock_ops *reuse = skops; struct tcphdr *thdr; int window_clamp = 9216; - int good_call_rv = 0; - int bad_call_rv = 0; int save_syn = 1; int rv = -1; int v = 0; diff --git a/tools/testing/selftests/bpf/progs/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c index 43bd7a20cc50..4cb8bbb6a320 100644 --- a/tools/testing/selftests/bpf/progs/test_tracepoint.c +++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c @@ -4,7 +4,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ +/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */ struct sched_switch_args { unsigned long long pad; char prev_comm[TASK_COMM_LEN]; diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 508da4a23c4f..f66af753bbbb 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -52,6 +52,21 @@ struct vxlan_metadata { __u32 gbp; }; +struct bpf_fou_encap { + __be16 sport; + __be16 dport; +}; + +enum bpf_fou_encap_type { + FOU_BPF_ENCAP_FOU, + FOU_BPF_ENCAP_GUE, +}; + +int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx, + struct bpf_fou_encap *encap, int type) __ksym; +int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, + struct bpf_fou_encap *encap) __ksym; + struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); @@ -209,7 +224,6 @@ int erspan_get_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key; struct erspan_metadata md; - __u32 index; int ret; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); @@ -289,7 +303,6 @@ int ip4ip6erspan_get_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key; struct erspan_metadata md; - __u32 index; int ret; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), @@ -324,11 +337,11 @@ int ip4ip6erspan_get_tunnel(struct __sk_buff *skb) SEC("tc") int vxlan_set_tunnel_dst(struct __sk_buff *skb) { - int ret; struct bpf_tunnel_key key; struct vxlan_metadata md; __u32 index = 0; __u32 *local_ip = NULL; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -363,11 +376,11 @@ int vxlan_set_tunnel_dst(struct __sk_buff *skb) SEC("tc") int vxlan_set_tunnel_src(struct __sk_buff *skb) { - int ret; struct bpf_tunnel_key key; struct vxlan_metadata md; __u32 index = 0; __u32 *local_ip = NULL; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -405,8 +418,6 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb) int ret; struct bpf_tunnel_key key; struct vxlan_metadata md; - __u32 orig_daddr; - __u32 index = 0; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_FLAGS); @@ -443,9 +454,7 @@ int veth_set_outer_dst(struct __sk_buff *skb) void *data_end = (void *)(long)skb->data_end; struct udphdr *udph; struct iphdr *iph; - __u32 index = 0; int ret = 0; - int shrink; __s64 csum; if ((void *)eth + sizeof(*eth) > data_end) { @@ -494,9 +503,9 @@ SEC("tc") int ip6vxlan_set_tunnel_dst(struct __sk_buff *skb) { struct bpf_tunnel_key key; - int ret; __u32 index = 0; __u32 *local_ip; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -525,9 +534,9 @@ SEC("tc") int ip6vxlan_set_tunnel_src(struct __sk_buff *skb) { struct bpf_tunnel_key key; - int ret; __u32 index = 0; __u32 *local_ip; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -556,9 +565,9 @@ SEC("tc") int ip6vxlan_get_tunnel_src(struct __sk_buff *skb) { struct bpf_tunnel_key key; - int ret; __u32 index = 0; __u32 *local_ip; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -756,6 +765,108 @@ int ipip_get_tunnel(struct __sk_buff *skb) } SEC("tc") +int ipip_gue_set_tunnel(struct __sk_buff *skb) +{ + struct bpf_tunnel_key key = {}; + struct bpf_fou_encap encap = {}; + void *data = (void *)(long)skb->data; + struct iphdr *iph = data; + void *data_end = (void *)(long)skb->data_end; + int ret; + + if (data + sizeof(*iph) > data_end) { + log_err(1); + return TC_ACT_SHOT; + } + + key.tunnel_ttl = 64; + if (iph->protocol == IPPROTO_ICMP) + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + encap.sport = 0; + encap.dport = bpf_htons(5555); + + ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_GUE); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +SEC("tc") +int ipip_fou_set_tunnel(struct __sk_buff *skb) +{ + struct bpf_tunnel_key key = {}; + struct bpf_fou_encap encap = {}; + void *data = (void *)(long)skb->data; + struct iphdr *iph = data; + void *data_end = (void *)(long)skb->data_end; + int ret; + + if (data + sizeof(*iph) > data_end) { + log_err(1); + return TC_ACT_SHOT; + } + + key.tunnel_ttl = 64; + if (iph->protocol == IPPROTO_ICMP) + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + encap.sport = 0; + encap.dport = bpf_htons(5555); + + ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_FOU); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +SEC("tc") +int ipip_encap_get_tunnel(struct __sk_buff *skb) +{ + int ret; + struct bpf_tunnel_key key = {}; + struct bpf_fou_encap encap = {}; + + ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + ret = bpf_skb_get_fou_encap(skb, &encap); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + if (bpf_ntohs(encap.dport) != 5555) + return TC_ACT_SHOT; + + bpf_printk("%d remote ip 0x%x, sport %d, dport %d\n", ret, + key.remote_ipv4, bpf_ntohs(encap.sport), + bpf_ntohs(encap.dport)); + return TC_ACT_OK; +} + +SEC("tc") int ipip6_set_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key = {}; diff --git a/tools/testing/selftests/bpf/progs/test_usdt_multispec.c b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c index aa6de32b50d1..962f3462066a 100644 --- a/tools/testing/selftests/bpf/progs/test_usdt_multispec.c +++ b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c @@ -18,8 +18,6 @@ int usdt_100_sum; SEC("usdt//proc/self/exe:test:usdt_100") int BPF_USDT(usdt_100, int x) { - long tmp; - if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale1.c b/tools/testing/selftests/bpf/progs/test_verif_scale1.c index ac6135d9374c..323a73fb2e8c 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale1.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale1.c @@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 14; diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c index f90ffcafd1e8..f5318f757084 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale2.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale2.c @@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 14; diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale3.c b/tools/testing/selftests/bpf/progs/test_verif_scale3.c index ca33a9b711c4..2e06dbb1ad5c 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale3.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale3.c @@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 32; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index 297c260fc364..81bb38d72ced 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -5,8 +5,6 @@ SEC("xdp") int _xdp_adjust_tail_grow(struct xdp_md *xdp) { - void *data_end = (void *)(long)xdp->data_end; - void *data = (void *)(long)xdp->data; int data_len = bpf_xdp_get_buff_len(xdp); int offset = 0; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */ diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c index 3379d303f41a..ee48c4963971 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c @@ -45,8 +45,6 @@ SEC("fentry/FUNC") int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) { struct meta meta; - void *data_end = (void *)(long)xdp->data_end; - void *data = (void *)(long)xdp->data; meta.ifindex = xdp->rxq->dev->ifindex; meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c index 77a123071940..5baaafed0d2d 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c @@ -4,6 +4,19 @@ #define ETH_ALEN 6 #define HDR_SZ (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr)) + +/** + * enum frame_mark - magics to distinguish page/packet paths + * @MARK_XMIT: page was recycled due to the frame being "xmitted" by the NIC. + * @MARK_IN: frame is being processed by the input XDP prog. + * @MARK_SKB: frame did hit the TC ingress hook as an skb. + */ +enum frame_mark { + MARK_XMIT = 0U, + MARK_IN = 0x42, + MARK_SKB = 0x45, +}; + const volatile int ifindex_out; const volatile int ifindex_in; const volatile __u8 expect_dst[ETH_ALEN]; @@ -34,12 +47,12 @@ int xdp_redirect(struct xdp_md *xdp) if (*metadata != 0x42) return XDP_ABORTED; - if (*payload == 0) { - *payload = 0x42; + if (*payload == MARK_XMIT) pkts_seen_zero++; - } - if (bpf_xdp_adjust_meta(xdp, 4)) + *payload = MARK_IN; + + if (bpf_xdp_adjust_meta(xdp, sizeof(__u64))) return XDP_ABORTED; if (retcode > XDP_PASS) @@ -51,7 +64,7 @@ int xdp_redirect(struct xdp_md *xdp) return ret; } -static bool check_pkt(void *data, void *data_end) +static bool check_pkt(void *data, void *data_end, const __u32 mark) { struct ipv6hdr *iph = data + sizeof(struct ethhdr); __u8 *payload = data + HDR_SZ; @@ -59,13 +72,13 @@ static bool check_pkt(void *data, void *data_end) if (payload + 1 > data_end) return false; - if (iph->nexthdr != IPPROTO_UDP || *payload != 0x42) + if (iph->nexthdr != IPPROTO_UDP || *payload != MARK_IN) return false; /* reset the payload so the same packet doesn't get counted twice when * it cycles back through the kernel path and out the dst veth */ - *payload = 0; + *payload = mark; return true; } @@ -75,11 +88,11 @@ int xdp_count_pkts(struct xdp_md *xdp) void *data = (void *)(long)xdp->data; void *data_end = (void *)(long)xdp->data_end; - if (check_pkt(data, data_end)) + if (check_pkt(data, data_end, MARK_XMIT)) pkts_seen_xdp++; - /* Return XDP_DROP to make sure the data page is recycled, like when it - * exits a physical NIC. Recycled pages will be counted in the + /* Return %XDP_DROP to recycle the data page with %MARK_XMIT, like + * it exited a physical NIC. Those pages will be counted in the * pkts_seen_zero counter above. */ return XDP_DROP; @@ -91,9 +104,12 @@ int tc_count_pkts(struct __sk_buff *skb) void *data = (void *)(long)skb->data; void *data_end = (void *)(long)skb->data_end; - if (check_pkt(data, data_end)) + if (check_pkt(data, data_end, MARK_SKB)) pkts_seen_tc++; + /* Will be either recycled or freed, %MARK_SKB makes sure it won't + * hit any of the counters above. + */ return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c new file mode 100644 index 000000000000..25ee4a22e48d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta */ +#include <stddef.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/in.h> +#include <linux/udp.h> +#include <linux/tcp.h> +#include <linux/pkt_cls.h> +#include <sys/socket.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +#include "test_iptunnel_common.h" +#include "bpf_kfuncs.h" + +const size_t tcphdr_sz = sizeof(struct tcphdr); +const size_t udphdr_sz = sizeof(struct udphdr); +const size_t ethhdr_sz = sizeof(struct ethhdr); +const size_t iphdr_sz = sizeof(struct iphdr); +const size_t ipv6hdr_sz = sizeof(struct ipv6hdr); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 256); + __type(key, __u32); + __type(value, __u64); +} rxcnt SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_IPTNL_ENTRIES); + __type(key, struct vip); + __type(value, struct iptnl_info); +} vip2tnl SEC(".maps"); + +static __always_inline void count_tx(__u32 protocol) +{ + __u64 *rxcnt_count; + + rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol); + if (rxcnt_count) + *rxcnt_count += 1; +} + +static __always_inline int get_dport(void *trans_data, __u8 protocol) +{ + struct tcphdr *th; + struct udphdr *uh; + + switch (protocol) { + case IPPROTO_TCP: + th = (struct tcphdr *)trans_data; + return th->dest; + case IPPROTO_UDP: + uh = (struct udphdr *)trans_data; + return uh->dest; + default: + return 0; + } +} + +static __always_inline void set_ethhdr(struct ethhdr *new_eth, + const struct ethhdr *old_eth, + const struct iptnl_info *tnl, + __be16 h_proto) +{ + memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source)); + memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest)); + new_eth->h_proto = h_proto; +} + +static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr) +{ + __u8 eth_buffer[ethhdr_sz + iphdr_sz + ethhdr_sz]; + __u8 iph_buffer_tcp[iphdr_sz + tcphdr_sz]; + __u8 iph_buffer_udp[iphdr_sz + udphdr_sz]; + struct bpf_dynptr new_xdp_ptr; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct iphdr *iph; + __u16 *next_iph; + __u16 payload_len; + struct vip vip = {}; + int dport; + __u32 csum = 0; + int i; + + __builtin_memset(eth_buffer, 0, sizeof(eth_buffer)); + __builtin_memset(iph_buffer_tcp, 0, sizeof(iph_buffer_tcp)); + __builtin_memset(iph_buffer_udp, 0, sizeof(iph_buffer_udp)); + + if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data) + iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_udp, sizeof(iph_buffer_udp)); + else + iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_tcp, sizeof(iph_buffer_tcp)); + + if (!iph) + return XDP_DROP; + + dport = get_dport(iph + 1, iph->protocol); + if (dport == -1) + return XDP_DROP; + + vip.protocol = iph->protocol; + vip.family = AF_INET; + vip.daddr.v4 = iph->daddr; + vip.dport = dport; + payload_len = bpf_ntohs(iph->tot_len); + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v4-in-v4 */ + if (!tnl || tnl->family != AF_INET) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)iphdr_sz)) + return XDP_DROP; + + bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr); + new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer)); + if (!new_eth) + return XDP_DROP; + + iph = (struct iphdr *)(new_eth + 1); + old_eth = (struct ethhdr *)(iph + 1); + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP)); + + if (new_eth == eth_buffer) + bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0); + + iph->version = 4; + iph->ihl = iphdr_sz >> 2; + iph->frag_off = 0; + iph->protocol = IPPROTO_IPIP; + iph->check = 0; + iph->tos = 0; + iph->tot_len = bpf_htons(payload_len + iphdr_sz); + iph->daddr = tnl->daddr.v4; + iph->saddr = tnl->saddr.v4; + iph->ttl = 8; + + next_iph = (__u16 *)iph; + for (i = 0; i < iphdr_sz >> 1; i++) + csum += *next_iph++; + + iph->check = ~((csum & 0xffff) + (csum >> 16)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr) +{ + __u8 eth_buffer[ethhdr_sz + ipv6hdr_sz + ethhdr_sz]; + __u8 ip6h_buffer_tcp[ipv6hdr_sz + tcphdr_sz]; + __u8 ip6h_buffer_udp[ipv6hdr_sz + udphdr_sz]; + struct bpf_dynptr new_xdp_ptr; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct ipv6hdr *ip6h; + __u16 payload_len; + struct vip vip = {}; + int dport; + + __builtin_memset(eth_buffer, 0, sizeof(eth_buffer)); + __builtin_memset(ip6h_buffer_tcp, 0, sizeof(ip6h_buffer_tcp)); + __builtin_memset(ip6h_buffer_udp, 0, sizeof(ip6h_buffer_udp)); + + if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data) + ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_udp, sizeof(ip6h_buffer_udp)); + else + ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_tcp, sizeof(ip6h_buffer_tcp)); + + if (!ip6h) + return XDP_DROP; + + dport = get_dport(ip6h + 1, ip6h->nexthdr); + if (dport == -1) + return XDP_DROP; + + vip.protocol = ip6h->nexthdr; + vip.family = AF_INET6; + memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr)); + vip.dport = dport; + payload_len = ip6h->payload_len; + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v6-in-v6 */ + if (!tnl || tnl->family != AF_INET6) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)ipv6hdr_sz)) + return XDP_DROP; + + bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr); + new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer)); + if (!new_eth) + return XDP_DROP; + + ip6h = (struct ipv6hdr *)(new_eth + 1); + old_eth = (struct ethhdr *)(ip6h + 1); + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6)); + + if (new_eth == eth_buffer) + bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0); + + ip6h->version = 6; + ip6h->priority = 0; + memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); + ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + ipv6hdr_sz); + ip6h->nexthdr = IPPROTO_IPV6; + ip6h->hop_limit = 8; + memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6)); + memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +SEC("xdp") +int _xdp_tx_iptunnel(struct xdp_md *xdp) +{ + __u8 buffer[ethhdr_sz]; + struct bpf_dynptr ptr; + struct ethhdr *eth; + __u16 h_proto; + + __builtin_memset(buffer, 0, sizeof(buffer)); + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + eth = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!eth) + return XDP_DROP; + + h_proto = eth->h_proto; + + if (h_proto == bpf_htons(ETH_P_IP)) + return handle_ipv4(xdp, &ptr); + else if (h_proto == bpf_htons(ETH_P_IPV6)) + + return handle_ipv6(xdp, &ptr); + else + return XDP_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index ba48fcb98ab2..42c8f6ded0e4 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -372,45 +372,6 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, } static __attribute__ ((noinline)) -bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4) -{ - struct eth_hdr *new_eth; - struct eth_hdr *old_eth; - - old_eth = *data; - new_eth = *data + sizeof(struct ipv6hdr); - memcpy(new_eth->eth_source, old_eth->eth_source, 6); - memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); - if (inner_v4) - new_eth->eth_proto = 8; - else - new_eth->eth_proto = 56710; - if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr))) - return false; - *data = (void *)(long)xdp->data; - *data_end = (void *)(long)xdp->data_end; - return true; -} - -static __attribute__ ((noinline)) -bool decap_v4(struct xdp_md *xdp, void **data, void **data_end) -{ - struct eth_hdr *new_eth; - struct eth_hdr *old_eth; - - old_eth = *data; - new_eth = *data + sizeof(struct iphdr); - memcpy(new_eth->eth_source, old_eth->eth_source, 6); - memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); - new_eth->eth_proto = 8; - if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) - return false; - *data = (void *)(long)xdp->data; - *data_end = (void *)(long)xdp->data_end; - return true; -} - -static __attribute__ ((noinline)) int swap_mac_and_send(void *data, void *data_end) { unsigned char tmp_mac[6]; @@ -430,7 +391,6 @@ int send_icmp_reply(void *data, void *data_end) __u16 *next_iph_u16; __u32 tmp_addr = 0; struct iphdr *iph; - __u32 csum1 = 0; __u32 csum = 0; __u64 off = 0; @@ -662,7 +622,6 @@ static int process_l3_headers_v4(struct packet_description *pckt, void *data_end) { struct iphdr *iph; - __u64 iph_len; int action; iph = data + off; @@ -696,7 +655,6 @@ static int process_packet(void *data, __u64 off, void *data_end, struct packet_description pckt = { }; struct vip_definition vip = { }; struct lb_stats *data_stats; - struct eth_hdr *eth = data; void *lru_map = &lru_cache; struct vip_meta *vip_info; __u32 lru_stats_key = 513; @@ -704,7 +662,6 @@ static int process_packet(void *data, __u64 off, void *data_end, __u32 stats_key = 512; struct ctl_value *cval; __u16 pkt_bytes; - __u64 iph_len; __u8 protocol; __u32 vip_num; int action; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c index 4ddcb6dfe500..f3ec8086482d 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c @@ -210,19 +210,6 @@ int xdp_prognum2(struct xdp_md *ctx) } static __always_inline -void shift_mac_4bytes_16bit(void *data) -{ - __u16 *p = data; - - p[7] = p[5]; /* delete p[7] was vlan_hdr->h_vlan_TCI */ - p[6] = p[4]; /* delete p[6] was ethhdr->h_proto */ - p[5] = p[3]; - p[4] = p[2]; - p[3] = p[1]; - p[2] = p[0]; -} - -static __always_inline void shift_mac_4bytes_32bit(void *data) { __u32 *p = data; diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c index acda5c9cea93..9a16d95213e1 100644 --- a/tools/testing/selftests/bpf/progs/timer.c +++ b/tools/testing/selftests/bpf/progs/timer.c @@ -46,7 +46,15 @@ struct { __type(value, struct elem); } lru SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} abs_timer SEC(".maps"); + __u64 bss_data; +__u64 abs_data; __u64 err; __u64 ok; __u64 callback_check = 52; @@ -284,3 +292,40 @@ int BPF_PROG2(test2, int, a, int, b) return bpf_timer_test(); } + +/* callback for absolute timer */ +static int timer_cb3(void *map, int *key, struct bpf_timer *timer) +{ + abs_data += 6; + + if (abs_data < 12) { + bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000, + BPF_F_TIMER_ABS); + } else { + /* Re-arm timer ~35 seconds in future */ + bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35), + BPF_F_TIMER_ABS); + } + + return 0; +} + +SEC("fentry/bpf_fentry_test3") +int BPF_PROG2(test3, int, a) +{ + int key = 0; + struct bpf_timer *timer; + + bpf_printk("test3"); + + timer = bpf_map_lookup_elem(&abs_timer, &key); + if (timer) { + if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0) + err |= 2048; + bpf_timer_set_callback(timer, timer_cb3); + bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000, + BPF_F_TIMER_ABS); + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c index e718f0ebee7d..c435a3a8328a 100644 --- a/tools/testing/selftests/bpf/progs/tracing_struct.c +++ b/tools/testing/selftests/bpf/progs/tracing_struct.c @@ -13,12 +13,18 @@ struct bpf_testmod_struct_arg_2 { long b; }; +struct bpf_testmod_struct_arg_3 { + int a; + int b[]; +}; + long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs; __u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3; long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret; long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret; long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret; long t5_ret; +int t6; SEC("fentry/bpf_testmod_test_struct_arg_1") int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c) @@ -117,4 +123,11 @@ int BPF_PROG2(test_struct_arg_10, int, ret) return 0; } +SEC("fentry/bpf_testmod_test_struct_arg_6") +int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a) +{ + t6 = a->b[0]; + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c index eb78e6f03129..a9629ac230fd 100644 --- a/tools/testing/selftests/bpf/progs/type_cast.c +++ b/tools/testing/selftests/bpf/progs/type_cast.c @@ -63,7 +63,6 @@ SEC("?tp_btf/sys_enter") int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) { struct task_struct *task, *task_dup; - long *ptr; task = bpf_get_current_task_btf(); task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct)); diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c index 165e3c2dd9a3..4767451b59ac 100644 --- a/tools/testing/selftests/bpf/progs/udp_limit.c +++ b/tools/testing/selftests/bpf/progs/udp_limit.c @@ -17,7 +17,6 @@ SEC("cgroup/sock_create") int sock(struct bpf_sock *ctx) { int *sk_storage; - __u32 key; if (ctx->type != SOCK_DGRAM) return 1; @@ -46,7 +45,6 @@ SEC("cgroup/sock_release") int sock_release(struct bpf_sock *ctx) { int *sk_storage; - __u32 key; if (ctx->type != SOCK_DGRAM) return 1; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c index b39093dd5715..dd3bdf672633 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c @@ -162,8 +162,6 @@ SEC("fentry/" SYS_PREFIX "sys_prctl") int test_user_ringbuf_protocol(void *ctx) { long status = 0; - struct sample *sample = NULL; - struct bpf_dynptr ptr; if (!is_test_process()) return 0; @@ -183,10 +181,6 @@ int test_user_ringbuf_protocol(void *ctx) SEC("fentry/" SYS_PREFIX "sys_getpgid") int test_user_ringbuf(void *ctx) { - int status = 0; - struct sample *sample = NULL; - struct bpf_dynptr ptr; - if (!is_test_process()) return 0; @@ -202,7 +196,7 @@ do_nothing_cb(struct bpf_dynptr *dynptr, void *context) return 0; } -SEC("fentry/" SYS_PREFIX "sys_getrlimit") +SEC("fentry/" SYS_PREFIX "sys_prlimit64") int test_user_ringbuf_epoll(void *ctx) { long num_samples; diff --git a/tools/testing/selftests/bpf/progs/verifier_and.c b/tools/testing/selftests/bpf/progs/verifier_and.c new file mode 100644 index 000000000000..e97e518516b6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_and.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/and.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("invalid and of negative number") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void invalid_and_of_negative_number(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= -4; \ + r1 <<= 2; \ + r0 += r1; \ +l0_%=: r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid range check") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void invalid_range_check(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r9 = 1; \ + w1 %%= 2; \ + w1 += 1; \ + w9 &= w1; \ + w9 += 1; \ + w9 >>= 1; \ + w3 = 1; \ + w3 -= w9; \ + w3 *= 0x10000000; \ + r0 += r3; \ + *(u32*)(r0 + 0) = r3; \ +l0_%=: r0 = r0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("check known subreg with unknown reg") +__success __failure_unpriv __msg_unpriv("R1 !read_ok") +__retval(0) +__naked void known_subreg_with_unknown_reg(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 <<= 32; \ + r0 += 1; \ + r0 &= 0xFFFF1234; \ + /* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */\ + if w0 < 1 goto l0_%=; \ + r1 = *(u32*)(r1 + 512); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c new file mode 100644 index 000000000000..95d7ecc12963 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/array_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); + __uint(map_flags, BPF_F_RDONLY_PROG); +} map_array_ro SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); + __uint(map_flags, BPF_F_WRONLY_PROG); +} map_array_wo SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("valid map access into an array with a constant") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void an_array_with_a_constant_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid map access into an array with a register") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_register_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid map access into an array with a variable") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_variable_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 >= %[max_entries] goto l0_%=; \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid map access into an array with a signed variable") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void array_with_a_signed_variable(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if w1 s> 0xffffffff goto l1_%=; \ + w1 = 0; \ +l1_%=: w2 = %[max_entries]; \ + if r2 s> r1 goto l2_%=; \ + w1 = 0; \ +l2_%=: w1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a constant") +__failure __msg("invalid access to map value, value_size=48 off=48 size=8") +__failure_unpriv +__naked void an_array_with_a_constant_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + %[__imm_0]) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, (MAX_ENTRIES + 1) << 2), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a register") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_register_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, MAX_ENTRIES + 1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a variable") +__failure +__msg("R0 unbounded memory access, make sure to bounds check any such access") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_variable_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with no floor check") +__failure __msg("R0 unbounded memory access") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void array_with_no_floor_check(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + w2 = %[max_entries]; \ + if r2 s> r1 goto l1_%=; \ + w1 = 0; \ +l1_%=: w1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a invalid max check") +__failure __msg("invalid access to map value, value_size=48 off=44 size=8") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void with_a_invalid_max_check_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + w2 = %[__imm_0]; \ + if r2 > r1 goto l1_%=; \ + w1 = 0; \ +l1_%=: w1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, MAX_ENTRIES + 1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a invalid max check") +__failure __msg("R0 pointer += pointer") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void with_a_invalid_max_check_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r8 = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += r8; \ + r0 = *(u32*)(r0 + %[test_val_foo]); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid read map access into a read-only array 1") +__success __success_unpriv __retval(28) +__naked void a_read_only_array_1_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("tc") +__description("valid read map access into a read-only array 2") +__success __retval(65507) +__naked void a_read_only_array_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 &= 0xffff; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("socket") +__description("invalid write map access into a read-only array 1") +__failure __msg("write into map forbidden") +__failure_unpriv +__naked void a_read_only_array_1_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("tc") +__description("invalid write map access into a read-only array 2") +__failure __msg("write into map forbidden") +__naked void a_read_only_array_2_2(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 0; \ + r3 = r0; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_skb_load_bytes), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("socket") +__description("valid write map access into a write-only array 1") +__success __success_unpriv __retval(1) +__naked void a_write_only_array_1_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_wo) + : __clobber_all); +} + +SEC("tc") +__description("valid write map access into a write-only array 2") +__success __retval(0) +__naked void a_write_only_array_2_1(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 0; \ + r3 = r0; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_skb_load_bytes), + __imm_addr(map_array_wo) + : __clobber_all); +} + +SEC("socket") +__description("invalid read map access into a write-only array 1") +__failure __msg("read from map forbidden") +__failure_unpriv +__naked void a_write_only_array_1_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_wo) + : __clobber_all); +} + +SEC("tc") +__description("invalid read map access into a write-only array 2") +__failure __msg("read from map forbidden") +__naked void a_write_only_array_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_array_wo) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c new file mode 100644 index 000000000000..359df865a8f3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/basic_stack.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("stack out of bounds") +__failure __msg("invalid write to stack") +__failure_unpriv +__naked void stack_out_of_bounds(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 + 8) = r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("uninitialized stack1") +__failure __msg("invalid indirect read from stack") +__failure_unpriv +__naked void uninitialized_stack1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("uninitialized stack2") +__failure __msg("invalid read from stack") +__failure_unpriv +__naked void uninitialized_stack2(void) +{ + asm volatile (" \ + r2 = r10; \ + r0 = *(u64*)(r2 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("invalid fp arithmetic") +__failure __msg("R1 subtraction from stack pointer") +__failure_unpriv +__naked void invalid_fp_arithmetic(void) +{ + /* If this gets ever changed, make sure JITs can deal with it. */ + asm volatile (" \ + r0 = 0; \ + r1 = r10; \ + r1 -= 8; \ + *(u64*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("non-invalid fp arithmetic") +__success __success_unpriv __retval(0) +__naked void non_invalid_fp_arithmetic(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("misaligned read from stack") +__failure __msg("misaligned stack access") +__failure_unpriv +__naked void misaligned_read_from_stack(void) +{ + asm volatile (" \ + r2 = r10; \ + r0 = *(u64*)(r2 - 4); \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c new file mode 100644 index 000000000000..c5588a14fe2e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -0,0 +1,1076 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("subtraction bounds (map value) variant 1") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__naked void bounds_map_value_variant_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + if r1 > 0xff goto l0_%=; \ + r3 = *(u8*)(r0 + 1); \ + if r3 > 0xff goto l0_%=; \ + r1 -= r3; \ + r1 >>= 56; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("subtraction bounds (map value) variant 2") +__failure +__msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.") +__msg_unpriv("R1 has unknown scalar with mixed signed bounds") +__naked void bounds_map_value_variant_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + if r1 > 0xff goto l0_%=; \ + r3 = *(u8*)(r0 + 1); \ + if r3 > 0xff goto l0_%=; \ + r1 -= r3; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("check subtraction on pointers for unpriv") +__success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited") +__retval(0) +__naked void subtraction_on_pointers_for_unpriv(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = %[map_hash_8b] ll; \ + r2 = r10; \ + r2 += -8; \ + r6 = 9; \ + *(u64*)(r2 + 0) = r6; \ + call %[bpf_map_lookup_elem]; \ + r9 = r10; \ + r9 -= r0; \ + r1 = %[map_hash_8b] ll; \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: *(u64*)(r0 + 0) = r9; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on zero-extended MOV") +__success __success_unpriv __retval(0) +__naked void based_on_zero_extended_mov(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0x0000'0000'ffff'ffff */ \ + w2 = 0xffffffff; \ + /* r2 = 0 */ \ + r2 >>= 32; \ + /* no-op */ \ + r0 += r2; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on sign-extended MOV. test1") +__failure __msg("map_value pointer and 4294967295") +__failure_unpriv +__naked void on_sign_extended_mov_test1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0xffff'ffff'ffff'ffff */ \ + r2 = 0xffffffff; \ + /* r2 = 0xffff'ffff */ \ + r2 >>= 32; \ + /* r0 = <oob pointer> */ \ + r0 += r2; \ + /* access to OOB pointer */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on sign-extended MOV. test2") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__naked void on_sign_extended_mov_test2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0xffff'ffff'ffff'ffff */ \ + r2 = 0xffffffff; \ + /* r2 = 0xfff'ffff */ \ + r2 >>= 36; \ + /* r0 = <oob pointer> */ \ + r0 += r2; \ + /* access to OOB pointer */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("bounds check based on reg_off + var_off + insn_off. test1") +__failure __msg("value_size=8 off=1073741825") +__naked void var_off_insn_off_test1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r6 &= 1; \ + r6 += %[__imm_0]; \ + r0 += r6; \ + r0 += %[__imm_0]; \ +l0_%=: r0 = *(u8*)(r0 + 3); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, (1 << 29) - 1), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("bounds check based on reg_off + var_off + insn_off. test2") +__failure __msg("value 1073741823") +__naked void var_off_insn_off_test2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r6 &= 1; \ + r6 += %[__imm_0]; \ + r0 += r6; \ + r0 += %[__imm_1]; \ +l0_%=: r0 = *(u8*)(r0 + 3); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, (1 << 30) - 1), + __imm_const(__imm_1, (1 << 29) - 1), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of non-boundary-crossing range") +__success __success_unpriv __retval(0) +__naked void of_non_boundary_crossing_range(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r2 = 1; \ + /* r2 = 0x10'0000'0000 */ \ + r2 <<= 36; \ + /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \ + r1 += r2; \ + /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \ + r1 += 0x7fffffff; \ + /* r1 = [0x00, 0xff] */ \ + w1 -= 0x7fffffff; \ + /* r1 = 0 */ \ + r1 >>= 8; \ + /* no-op */ \ + r0 += r1; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of boundary-crossing range (1)") +__failure +/* not actually fully unbounded, but the bound is very high */ +__msg("value -4294967168 makes map_value pointer be out of bounds") +__failure_unpriv +__naked void of_boundary_crossing_range_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0xffff'ffff] or \ + * [0x0000'0000, 0x0000'007f] \ + */ \ + w1 += 0; \ + r1 -= %[__imm_0]; \ + /* r1 = [0x00, 0xff] or \ + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ + */ \ + r1 -= %[__imm_0]; \ + /* error on OOB pointer computation */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 0xffffff80 >> 1) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of boundary-crossing range (2)") +__failure __msg("value -4294967168 makes map_value pointer be out of bounds") +__failure_unpriv +__naked void of_boundary_crossing_range_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0xffff'ffff] or \ + * [0x0000'0000, 0x0000'007f] \ + * difference to previous test: truncation via MOV32\ + * instead of ALU32. \ + */ \ + w1 = w1; \ + r1 -= %[__imm_0]; \ + /* r1 = [0x00, 0xff] or \ + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ + */ \ + r1 -= %[__imm_0]; \ + /* error on OOB pointer computation */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 0xffffff80 >> 1) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after wrapping 32-bit addition") +__success __success_unpriv __retval(0) +__naked void after_wrapping_32_bit_addition(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = 0x7fff'ffff */ \ + r1 = 0x7fffffff; \ + /* r1 = 0xffff'fffe */ \ + r1 += 0x7fffffff; \ + /* r1 = 0 */ \ + w1 += 2; \ + /* no-op */ \ + r0 += r1; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after shift with oversized count operand") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__naked void shift_with_oversized_count_operand(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = 32; \ + r1 = 1; \ + /* r1 = (u32)1 << (u32)32 = ? */ \ + w1 <<= w2; \ + /* r1 = [0x0000, 0xffff] */ \ + r1 &= 0xffff; \ + /* computes unknown pointer, potentially OOB */ \ + r0 += r1; \ + /* potentially OOB access */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after right shift of maybe-negative number") +__failure __msg("R0 unbounded memory access") +__failure_unpriv +__naked void shift_of_maybe_negative_number(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + /* r1 = [-0x01, 0xfe] */ \ + r1 -= 1; \ + /* r1 = 0 or 0xff'ffff'ffff'ffff */ \ + r1 >>= 8; \ + /* r1 = 0 or 0xffff'ffff'ffff */ \ + r1 >>= 8; \ + /* computes unknown pointer, potentially OOB */ \ + r0 += r1; \ + /* potentially OOB access */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after 32-bit right shift with 64-bit input") +__failure __msg("math between map_value pointer and 4294967294 is not allowed") +__failure_unpriv +__naked void shift_with_64_bit_input(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 2; \ + /* r1 = 1<<32 */ \ + r1 <<= 31; \ + /* r1 = 0 (NOT 2!) */ \ + w1 >>= 31; \ + /* r1 = 0xffff'fffe (NOT 0!) */ \ + w1 -= 2; \ + /* error on computing OOB pointer */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test1") +__failure __msg("map_value pointer and 2147483646") +__failure_unpriv +__naked void size_signed_32bit_overflow_test1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 += 0x7ffffffe; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test2") +__failure __msg("pointer offset 1073741822") +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void size_signed_32bit_overflow_test2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 += 0x1fffffff; \ + r0 += 0x1fffffff; \ + r0 += 0x1fffffff; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test3") +__failure __msg("pointer offset -1073741822") +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void size_signed_32bit_overflow_test3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 -= 0x1fffffff; \ + r0 -= 0x1fffffff; \ + r0 = *(u64*)(r0 + 2); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test4") +__failure __msg("map_value pointer and 1000000000000") +__failure_unpriv +__naked void size_signed_32bit_overflow_test4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 1000000; \ + r1 *= 1000000; \ + r0 += r1; \ + r0 = *(u64*)(r0 + 2); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check mixed 32bit and 64bit arithmetic. test1") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void _32bit_and_64bit_arithmetic_test1(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = -1; \ + r1 <<= 32; \ + r1 += 1; \ + /* r1 = 0xffffFFFF00000001 */ \ + if w1 > 1 goto l0_%=; \ + /* check ALU64 op keeps 32bit bounds */ \ + r1 += 1; \ + if w1 > 2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: /* invalid ldx if bounds are lost above */ \ + r0 = *(u64*)(r0 - 1); \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("bounds check mixed 32bit and 64bit arithmetic. test2") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void _32bit_and_64bit_arithmetic_test2(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = -1; \ + r1 <<= 32; \ + r1 += 1; \ + /* r1 = 0xffffFFFF00000001 */ \ + r2 = 3; \ + /* r1 = 0x2 */ \ + w1 += 1; \ + /* check ALU32 op zero extends 64bit bounds */ \ + if r1 > r2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: /* invalid ldx if bounds are lost above */ \ + r0 = *(u64*)(r0 - 1); \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("assigning 32bit bounds to 64bit for wA = 0, wB = wA") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void for_wa_0_wb_wa(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + w9 = 0; \ + w2 = w9; \ + r6 = r7; \ + r6 += r2; \ + r3 = r6; \ + r3 += 8; \ + if r3 > r8 goto l0_%=; \ + r5 = *(u32*)(r6 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = 0, reg xor 1") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_0_reg_xor_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 0; \ + r1 ^= 1; \ + if r1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 = 0, reg32 xor 1") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg32_0_reg32_xor_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: w1 = 0; \ + w1 ^= 1; \ + if w1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = 2, reg xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_2_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 2; \ + r1 ^= 3; \ + if r1 > 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = any, reg xor 3") +__failure __msg("invalid access to map value") +__msg_unpriv("invalid access to map value") +__naked void reg_any_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + r1 ^= 3; \ + if r1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 = any, reg32 xor 3") +__failure __msg("invalid access to map value") +__msg_unpriv("invalid access to map value") +__naked void reg32_any_reg32_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + w1 ^= 3; \ + if w1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg > 0, reg xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_0_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + if r1 <= 0 goto l1_%=; \ + r1 ^= 3; \ + if r1 >= 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 > 0, reg32 xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg32_0_reg32_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + if w1 <= 0 goto l1_%=; \ + w1 ^= 3; \ + if w1 >= 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks after 32-bit truncation. test 1") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void _32_bit_truncation_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + /* This used to reduce the max bound to 0x7fffffff */\ + if r1 == 0 goto l1_%=; \ + if r1 > 0x7fffffff goto l0_%=; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks after 32-bit truncation. test 2") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void _32_bit_truncation_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 s< 1 goto l1_%=; \ + if w1 s< 0 goto l0_%=; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP_JLT for crossing 64-bit signed boundary") +__success __retval(0) +__naked void crossing_64_bit_signed_boundary_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x7fffffffffffff10 ll; \ + r1 += r0; \ + r0 = 0x8000000000000000 ll; \ +l1_%=: r0 += 1; \ + /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\ + if r0 < r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP_JSLT for crossing 64-bit signed boundary") +__success __retval(0) +__naked void crossing_64_bit_signed_boundary_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x7fffffffffffff10 ll; \ + r1 += r0; \ + r2 = 0x8000000000000fff ll; \ + r0 = 0x8000000000000000 ll; \ +l1_%=: r0 += 1; \ + if r0 s> r2 goto l0_%=; \ + /* r1 signed range is [S64_MIN, S64_MAX] */ \ + if r0 s< r1 goto l1_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check for loop upper bound greater than U32_MAX") +__success __retval(0) +__naked void bound_greater_than_u32_max(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x100000000 ll; \ + r1 += r0; \ + r0 = 0x100000000 ll; \ +l1_%=: r0 += 1; \ + if r0 < r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP32_JLT for crossing 32-bit signed boundary") +__success __retval(0) +__naked void crossing_32_bit_signed_boundary_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + w0 = 0x7fffff10; \ + w1 += w0; \ + w0 = 0x80000000; \ +l1_%=: w0 += 1; \ + /* r1 unsigned range is [0, 0x8000000f] */ \ + if w0 < w1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary") +__success __retval(0) +__naked void crossing_32_bit_signed_boundary_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + w0 = 0x7fffff10; \ + w1 += w0; \ + w2 = 0x80000fff; \ + w0 = 0x80000000; \ +l1_%=: w0 += 1; \ + if w0 s> w2 goto l0_%=; \ + /* r1 signed range is [S32_MIN, S32_MAX] */ \ + if w0 s< w1 goto l1_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c new file mode 100644 index 000000000000..c506afbdd936 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bounds_deduction.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("check deducing bounds from const, 1") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_1(void) +{ + asm volatile (" \ + r0 = 1; \ + if r0 s>= 1 goto l0_%=; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 2") +__success __failure_unpriv +__msg_unpriv("R1 has pointer with unsupported alu operation") +__retval(1) +__naked void deducing_bounds_from_const_2(void) +{ + asm volatile (" \ + r0 = 1; \ + if r0 s>= 1 goto l0_%=; \ + exit; \ +l0_%=: if r0 s<= 1 goto l1_%=; \ + exit; \ +l1_%=: r1 -= r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 3") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_3(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s<= 0 goto l0_%=; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 4") +__success __failure_unpriv +__msg_unpriv("R6 has pointer with unsupported alu operation") +__retval(0) +__naked void deducing_bounds_from_const_4(void) +{ + asm volatile (" \ + r6 = r1; \ + r0 = 0; \ + if r0 s<= 0 goto l0_%=; \ + exit; \ +l0_%=: if r0 s>= 0 goto l1_%=; \ + exit; \ +l1_%=: r6 -= r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 5") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_5(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s>= 1 goto l0_%=; \ + r0 -= r1; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 6") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_6(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s>= 0 goto l0_%=; \ + exit; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 7") +__failure __msg("dereference of modified ctx ptr") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void deducing_bounds_from_const_7(void) +{ + asm volatile (" \ + r0 = %[__imm_0]; \ + if r0 s>= 0 goto l0_%=; \ +l0_%=: r1 -= r0; \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__imm_0, ~0), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 8") +__failure __msg("negative offset ctx ptr R1 off=-1 disallowed") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void deducing_bounds_from_const_8(void) +{ + asm volatile (" \ + r0 = %[__imm_0]; \ + if r0 s>= 0 goto l0_%=; \ + r1 += r0; \ +l0_%=: r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__imm_0, ~0), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 9") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_9(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s>= 0 goto l0_%=; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 10") +__failure +__msg("math between ctx pointer and register with unbounded min value is not allowed") +__failure_unpriv +__naked void deducing_bounds_from_const_10(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s<= 0 goto l0_%=; \ +l0_%=: /* Marks reg as unknown. */ \ + r0 = -r0; \ + r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c new file mode 100644 index 000000000000..823f727cf210 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r0 == r2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_2(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 3 goto l0_%=; \ + r2 = 4; \ + if r0 == r2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_3(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r0 != r2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_4(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 3 goto l0_%=; \ + r2 = 4; \ + if r0 != r2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_5(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 3; \ + if w0 == w2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_6(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 > 4 goto l0_%=; \ + w2 = 5; \ + if w0 == w2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_7(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 3 goto l0_%=; \ + w2 = 2; \ + if w0 != w2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_8(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 > 3 goto l0_%=; \ + w2 = 4; \ + if w0 != w2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_9(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + r2 = 0; \ + if r2 > r0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_10(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 4 goto l0_%=; \ + r2 = 4; \ + if r2 > r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> >= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_11(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 4 goto l0_%=; \ + r2 = 3; \ + if r2 >= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> < <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_12(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 4 goto l0_%=; \ + r2 = 4; \ + if r2 < r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> <= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_13(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 >= 4 goto l0_%=; \ + r2 = 4; \ + if r2 <= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> == <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_14(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r2 == r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> s> <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_15(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s< 4 goto l0_%=; \ + r2 = 4; \ + if r2 s> r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> s>= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_16(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s< 4 goto l0_%=; \ + r2 = 3; \ + if r2 s>= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> s< <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_17(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s> 4 goto l0_%=; \ + r2 = 4; \ + if r2 s< r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> s<= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_18(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s> 4 goto l0_%=; \ + r2 = 5; \ + if r2 s<= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <const> != <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_19(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r2 != r0 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_20(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + w2 = 0; \ + if w2 > w0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_21(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 4; \ + if w2 > w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> >= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_22(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 3; \ + if w2 >= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> < <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_23(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 > 4 goto l0_%=; \ + w2 = 4; \ + if w2 < w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> <= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_24(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 >= 4 goto l0_%=; \ + w2 = 4; \ + if w2 <= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> == <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_25(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 3; \ + if w2 == w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> s> <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_26(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s< 4 goto l0_%=; \ + w2 = 4; \ + if w2 s> w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> s>= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_27(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s< 4 goto l0_%=; \ + w2 = 3; \ + if w2 s>= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> s< <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_28(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s> 4 goto l0_%=; \ + w2 = 5; \ + if w2 s< w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> s<= <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_29(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s>= 4 goto l0_%=; \ + w2 = 4; \ + if w2 s<= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <const> != <non_const>") +__success __retval(0) +__naked void deducing_bounds_from_non_const_30(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 3 goto l0_%=; \ + w2 = 2; \ + if w2 != w0 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c new file mode 100644 index 000000000000..4f40144748a5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("bounds checks mixing signed and unsigned, positive bounds") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_positive_bounds(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = 2; \ + if r2 >= r1 goto l0_%=; \ + if r1 s> 4 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void checks_mixing_signed_and_unsigned(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 2") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_2(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + r8 = 0; \ + r8 += r1; \ + if r8 s> 1 goto l0_%=; \ + r0 += r8; \ + r0 = 0; \ + *(u8*)(r8 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 3") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_3(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + r8 = r1; \ + if r8 s> 1 goto l0_%=; \ + r0 += r8; \ + r0 = 0; \ + *(u8*)(r8 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 4") +__success __success_unpriv __retval(0) +__naked void signed_and_unsigned_variant_4(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = 1; \ + r1 &= r2; \ + if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 5") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_5(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + if r1 s> 1 goto l0_%=; \ + r0 += 4; \ + r0 -= r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 6") +__failure __msg("R4 min value is negative, either use unsigned") +__failure_unpriv +__naked void signed_and_unsigned_variant_6(void) +{ + asm volatile (" \ + r9 = r1; \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = r9; \ + r2 = 0; \ + r3 = r10; \ + r3 += -512; \ + r4 = *(u64*)(r10 - 16); \ + r6 = -1; \ + if r4 > r6 goto l0_%=; \ + if r4 s> 1 goto l0_%=; \ + r4 += 1; \ + r5 = 0; \ + r6 = 0; \ + *(u16*)(r10 - 512) = r6; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 7") +__success __success_unpriv __retval(0) +__naked void signed_and_unsigned_variant_7(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = %[__imm_0]; \ + if r1 > r2 goto l0_%=; \ + if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 1024 * 1024 * 1024) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 8") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_8(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r2 > r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 9") +__success __success_unpriv __retval(0) +__naked void signed_and_unsigned_variant_9(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -9223372036854775808ULL ll; \ + if r2 > r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 10") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_10(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r2 > r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 11") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_11(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r2 >= r1 goto l1_%=; \ + /* Dead branch. */ \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 12") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_12(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -6; \ + if r2 >= r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 13") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_13(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = 2; \ + if r2 >= r1 goto l0_%=; \ + r7 = 1; \ + if r7 s> 0 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r7 += r1; \ + if r7 s> 4 goto l2_%=; \ + r0 += r7; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 14") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_14(void) +{ + asm volatile (" \ + r9 = *(u32*)(r1 + %[__sk_buff_mark]); \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + r8 = 2; \ + if r9 == 42 goto l1_%=; \ + if r8 s> r1 goto l2_%=; \ +l3_%=: if r1 s> 1 goto l2_%=; \ + r0 += r1; \ +l0_%=: r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l2_%=: r0 = 0; \ + exit; \ +l1_%=: if r1 > r2 goto l2_%=; \ + goto l3_%=; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 15") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_15(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -6; \ + if r2 >= r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 += r1; \ + if r0 > 1 goto l2_%=; \ + r0 = 0; \ + exit; \ +l2_%=: r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c new file mode 100644 index 000000000000..325a2bab4a71 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bpf_get_stack.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("tracepoint") +__description("bpf_get_stack return R0 within range") +__success +__naked void stack_return_r0_within_range(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + r9 = %[__imm_0]; \ + r1 = r6; \ + r2 = r7; \ + r3 = %[__imm_0]; \ + r4 = 256; \ + call %[bpf_get_stack]; \ + r1 = 0; \ + r8 = r0; \ + r8 <<= 32; \ + r8 s>>= 32; \ + if r1 s> r8 goto l0_%=; \ + r9 -= r8; \ + r2 = r7; \ + r2 += r8; \ + r1 = r9; \ + r1 <<= 32; \ + r1 s>>= 32; \ + r3 = r2; \ + r3 += r1; \ + r1 = r7; \ + r5 = %[__imm_0]; \ + r1 += r5; \ + if r3 >= r1 goto l0_%=; \ + r1 = r6; \ + r3 = r9; \ + r4 = 0; \ + call %[bpf_get_stack]; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_stack), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) / 2) + : __clobber_all); +} + +SEC("iter/task") +__description("bpf_get_task_stack return R0 range is refined") +__success +__naked void return_r0_range_is_refined(void) +{ + asm volatile (" \ + r6 = *(u64*)(r1 + 0); \ + r6 = *(u64*)(r6 + 0); /* ctx->meta->seq */\ + r7 = *(u64*)(r1 + 8); /* ctx->task */\ + r1 = %[map_array_48b] ll; /* fixup_map_array_48b */\ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: if r7 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r1 = r7; \ + r2 = r0; \ + r9 = r0; /* keep buf for seq_write */\ + r3 = 48; \ + r4 = 0; \ + call %[bpf_get_task_stack]; \ + if r0 s> 0 goto l2_%=; \ + r0 = 0; \ + exit; \ +l2_%=: r1 = r6; \ + r2 = r9; \ + r3 = r0; \ + call %[bpf_seq_write]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_task_stack), + __imm(bpf_map_lookup_elem), + __imm(bpf_seq_write), + __imm_addr(map_array_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c new file mode 100644 index 000000000000..a570e48b917a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/btf_ctx_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("fentry/bpf_modify_return_test") +__description("btf_ctx_access accept") +__success __retval(0) +__naked void btf_ctx_access_accept(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 8); /* load 2nd argument value (int pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("fentry/bpf_fentry_test9") +__description("btf_ctx_access u32 pointer accept") +__success __retval(0) +__naked void ctx_access_u32_pointer_accept(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 0); /* load 1nd argument value (u32 pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_cfg.c b/tools/testing/selftests/bpf/progs/verifier_cfg.c new file mode 100644 index 000000000000..df7697b94007 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cfg.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cfg.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("unreachable") +__failure __msg("unreachable") +__failure_unpriv +__naked void unreachable(void) +{ + asm volatile (" \ + exit; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unreachable2") +__failure __msg("unreachable") +__failure_unpriv +__naked void unreachable2(void) +{ + asm volatile (" \ + goto l0_%=; \ + goto l0_%=; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("out of range jump") +__failure __msg("jump out of range") +__failure_unpriv +__naked void out_of_range_jump(void) +{ + asm volatile (" \ + goto l0_%=; \ + exit; \ +l0_%=: \ +" ::: __clobber_all); +} + +SEC("socket") +__description("out of range jump2") +__failure __msg("jump out of range") +__failure_unpriv +__naked void out_of_range_jump2(void) +{ + asm volatile (" \ + goto -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("loop (back-edge)") +__failure __msg("unreachable insn 1") +__msg_unpriv("back-edge") +__naked void loop_back_edge(void) +{ + asm volatile (" \ +l0_%=: goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("loop2 (back-edge)") +__failure __msg("unreachable insn 4") +__msg_unpriv("back-edge") +__naked void loop2_back_edge(void) +{ + asm volatile (" \ +l0_%=: r1 = r0; \ + r2 = r0; \ + r3 = r0; \ + goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("conditional loop") +__failure __msg("infinite loop detected") +__msg_unpriv("back-edge") +__naked void conditional_loop(void) +{ + asm volatile (" \ + r0 = r1; \ +l0_%=: r2 = r0; \ + r3 = r0; \ + if r1 == 0 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c new file mode 100644 index 000000000000..d6c4a7f3f790 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test1") +__failure __msg("R0 has value (0x0; 0xffffffff)") +__naked void with_invalid_return_code_test1(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test2") +__success +__naked void with_invalid_return_code_test2(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + r0 &= 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test3") +__failure __msg("R0 has value (0x0; 0x3)") +__naked void with_invalid_return_code_test3(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + r0 &= 3; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test4") +__success +__naked void with_invalid_return_code_test4(void) +{ + asm volatile (" \ + r0 = 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test5") +__failure __msg("R0 has value (0x2; 0x0)") +__naked void with_invalid_return_code_test5(void) +{ + asm volatile (" \ + r0 = 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test6") +__failure __msg("R0 is not a known value (ctx)") +__naked void with_invalid_return_code_test6(void) +{ + asm volatile (" \ + r0 = r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test7") +__failure __msg("R0 has unknown scalar value") +__naked void with_invalid_return_code_test7(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + r2 = *(u32*)(r1 + 4); \ + r0 *= r2; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c new file mode 100644 index 000000000000..5ee3d349d6d0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("cgroup/skb") +__description("direct packet read test#1 for CGROUP_SKB") +__success __failure_unpriv +__msg_unpriv("invalid bpf_context access off=76 size=4") +__retval(0) +__naked void test_1_for_cgroup_skb(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = *(u32*)(r1 + %[__sk_buff_len]); \ + r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]); \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + *(u32*)(r1 + %[__sk_buff_mark]) = r6; \ + r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]); \ + r8 = *(u32*)(r1 + %[__sk_buff_protocol]); \ + r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)), + __imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)), + __imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)), + __imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("direct packet read test#2 for CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void test_2_for_cgroup_skb(void) +{ + asm volatile (" \ + r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]); \ + r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]); \ + r6 = *(u32*)(r1 + %[__sk_buff_priority]); \ + *(u32*)(r1 + %[__sk_buff_priority]) = r6; \ + r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\ + r8 = *(u32*)(r1 + %[__sk_buff_tc_index]); \ + r9 = *(u32*)(r1 + %[__sk_buff_hash]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)), + __imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)), + __imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)), + __imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)), + __imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("direct packet read test#3 for CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void test_3_for_cgroup_skb(void) +{ + asm volatile (" \ + r4 = *(u32*)(r1 + %[__sk_buff_cb_0]); \ + r5 = *(u32*)(r1 + %[__sk_buff_cb_1]); \ + r6 = *(u32*)(r1 + %[__sk_buff_cb_2]); \ + r7 = *(u32*)(r1 + %[__sk_buff_cb_3]); \ + r8 = *(u32*)(r1 + %[__sk_buff_cb_4]); \ + r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \ + *(u32*)(r1 + %[__sk_buff_cb_0]) = r4; \ + *(u32*)(r1 + %[__sk_buff_cb_1]) = r5; \ + *(u32*)(r1 + %[__sk_buff_cb_2]) = r6; \ + *(u32*)(r1 + %[__sk_buff_cb_3]) = r7; \ + *(u32*)(r1 + %[__sk_buff_cb_4]) = r8; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])), + __imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])), + __imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])), + __imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])), + __imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])), + __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("direct packet read test#4 for CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void test_4_for_cgroup_skb(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_family]); \ + r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]); \ + r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]); \ + r7 = *(u32*)(r1 + %[__sk_buff_remote_port]); \ + r8 = *(u32*)(r1 + %[__sk_buff_local_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)), + __imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)), + __imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])), + __imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])), + __imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])), + __imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])), + __imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)), + __imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)), + __imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])), + __imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])), + __imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])), + __imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])), + __imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid access of tc_classid for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid access of data_meta for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void data_meta_for_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_data_meta]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid access of flow_keys for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void flow_keys_for_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid write access to napi_id for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void napi_id_for_cgroup_skb(void) +{ + asm volatile (" \ + r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \ + *(u32*)(r1 + %[__sk_buff_napi_id]) = r9; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("write tstamp from CGROUP_SKB") +__success __failure_unpriv +__msg_unpriv("invalid bpf_context access off=152 size=8") +__retval(0) +__naked void write_tstamp_from_cgroup_skb(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r1 + %[__sk_buff_tstamp]) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("read tstamp from CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void read_tstamp_from_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u64*)(r1 + %[__sk_buff_tstamp]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c new file mode 100644 index 000000000000..9a13f5c11ac7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cgroup_storage.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __uint(max_entries, 0); + __type(key, struct bpf_cgroup_storage_key); + __type(value, char[TEST_DATA_LEN]); +} cgroup_storage SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); + __uint(max_entries, 0); + __type(key, struct bpf_cgroup_storage_key); + __type(value, char[64]); +} percpu_cgroup_storage SEC(".maps"); + +SEC("cgroup/skb") +__description("valid cgroup storage access") +__success __success_unpriv __retval(0) +__naked void valid_cgroup_storage_access(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 1") +__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage") +__failure_unpriv +__naked void invalid_cgroup_storage_access_1(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 2") +__failure __msg("fd 1 is not pointing to valid bpf_map") +__failure_unpriv +__naked void invalid_cgroup_storage_access_2(void) +{ + asm volatile (" \ + r2 = 0; \ + .8byte %[ld_map_fd]; \ + .8byte 0; \ + call %[bpf_get_local_storage]; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 3") +__failure __msg("invalid access to map value, value_size=64 off=256 size=4") +__failure_unpriv +__naked void invalid_cgroup_storage_access_3(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 256); \ + r1 += 1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 4") +__failure __msg("invalid access to map value, value_size=64 off=-2 size=4") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void invalid_cgroup_storage_access_4(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 - 2); \ + r0 = r1; \ + r1 += 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 5") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__failure_unpriv +__naked void invalid_cgroup_storage_access_5(void) +{ + asm volatile (" \ + r2 = 7; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 6") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__msg_unpriv("R2 leaks addr into helper function") +__naked void invalid_cgroup_storage_access_6(void) +{ + asm volatile (" \ + r2 = r1; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("valid per-cpu cgroup storage access") +__success __success_unpriv __retval(0) +__naked void per_cpu_cgroup_storage_access(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 1") +__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage") +__failure_unpriv +__naked void cpu_cgroup_storage_access_1(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 2") +__failure __msg("fd 1 is not pointing to valid bpf_map") +__failure_unpriv +__naked void cpu_cgroup_storage_access_2(void) +{ + asm volatile (" \ + r2 = 0; \ + .8byte %[ld_map_fd]; \ + .8byte 0; \ + call %[bpf_get_local_storage]; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 3") +__failure __msg("invalid access to map value, value_size=64 off=256 size=4") +__failure_unpriv +__naked void cpu_cgroup_storage_access_3(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 256); \ + r1 += 1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 4") +__failure __msg("invalid access to map value, value_size=64 off=-2 size=4") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void cpu_cgroup_storage_access_4(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 - 2); \ + r0 = r1; \ + r1 += 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 5") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__failure_unpriv +__naked void cpu_cgroup_storage_access_5(void) +{ + asm volatile (" \ + r2 = 7; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 6") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__msg_unpriv("R2 leaks addr into helper function") +__naked void cpu_cgroup_storage_access_6(void) +{ + asm volatile (" \ + r2 = r1; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_const_or.c b/tools/testing/selftests/bpf/progs/verifier_const_or.c new file mode 100644 index 000000000000..ba8922b2eebd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_const_or.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/const_or.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("tracepoint") +__description("constant register |= constant should keep constant type") +__success +__naked void constant_should_keep_constant_type(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r2 |= 13; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("constant register |= constant should not bypass stack boundary checks") +__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__naked void not_bypass_stack_boundary_checks_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r2 |= 24; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("constant register |= constant register should keep constant type") +__success +__naked void register_should_keep_constant_type(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r4 = 13; \ + r2 |= r4; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("constant register |= constant register should not bypass stack boundary checks") +__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__naked void not_bypass_stack_boundary_checks_2(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r4 = 24; \ + r2 |= r4; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c new file mode 100644 index 000000000000..a83809a1dbbf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ctx.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("tc") +__description("context stores via BPF_ATOMIC") +__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__naked void context_stores_via_bpf_atomic(void) +{ + asm volatile (" \ + r0 = 0; \ + lock *(u32 *)(r1 + %[__sk_buff_mark]) += w0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("arithmetic ops make PTR_TO_CTX unusable") +__failure __msg("dereference of modified ctx ptr") +__naked void make_ptr_to_ctx_unusable(void) +{ + asm volatile (" \ + r1 += %[__imm_0]; \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__imm_0, + offsetof(struct __sk_buff, data) - offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("pass unmodified ctx pointer to helper") +__success __retval(0) +__naked void unmodified_ctx_pointer_to_helper(void) +{ + asm volatile (" \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("tc") +__description("pass modified ctx pointer to helper, 1") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__naked void ctx_pointer_to_helper_1(void) +{ + asm volatile (" \ + r1 += -612; \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("socket") +__description("pass modified ctx pointer to helper, 2") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed") +__naked void ctx_pointer_to_helper_2(void) +{ + asm volatile (" \ + r1 += -612; \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +SEC("tc") +__description("pass modified ctx pointer to helper, 3") +__failure __msg("variable ctx access var_off=(0x0; 0x4)") +__naked void ctx_pointer_to_helper_3(void) +{ + asm volatile (" \ + r3 = *(u32*)(r1 + 0); \ + r3 &= 4; \ + r1 += r3; \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 1: ctx") +__success +__naked void or_null_check_1_ctx(void) +{ + asm volatile (" \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 2: null") +__success +__naked void or_null_check_2_null(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 3: 1") +__failure __msg("R1 type=scalar expected=ctx") +__naked void or_null_check_3_1(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 4: ctx - const") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__naked void null_check_4_ctx_const(void) +{ + asm volatile (" \ + r1 += -612; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/connect4") +__description("pass ctx or null check, 5: null (connect)") +__success +__naked void null_check_5_null_connect(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 6: null (bind)") +__success +__naked void null_check_6_null_bind(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 7: ctx (bind)") +__success +__naked void null_check_7_ctx_bind(void) +{ + asm volatile (" \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 8: null (bind)") +__failure __msg("R1 type=scalar expected=ctx") +__naked void null_check_8_null_bind(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c b/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c new file mode 100644 index 000000000000..65edc89799f9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("sk_msg") +__description("valid access family in SK_MSG") +__success +__naked void access_family_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_family]); \ + exit; \ +" : + : __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access remote_ip4 in SK_MSG") +__success +__naked void remote_ip4_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]); \ + exit; \ +" : + : __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access local_ip4 in SK_MSG") +__success +__naked void local_ip4_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]); \ + exit; \ +" : + : __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access remote_port in SK_MSG") +__success +__naked void remote_port_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]); \ + exit; \ +" : + : __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access local_port in SK_MSG") +__success +__naked void local_port_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_port]); \ + exit; \ +" : + : __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port)) + : __clobber_all); +} + +SEC("sk_skb") +__description("valid access remote_ip6 in SK_MSG") +__success +__naked void remote_ip6_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]); \ + exit; \ +" : + : __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])), + __imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])), + __imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])), + __imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3])) + : __clobber_all); +} + +SEC("sk_skb") +__description("valid access local_ip6 in SK_MSG") +__success +__naked void local_ip6_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]); \ + exit; \ +" : + : __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])), + __imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])), + __imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])), + __imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3])) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access size in SK_MSG") +__success +__naked void access_size_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_size]); \ + exit; \ +" : + : __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size)) + : __clobber_all); +} + +SEC("sk_msg") +__description("invalid 64B read of size in SK_MSG") +__failure __msg("invalid bpf_context access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void of_size_in_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_size]); \ + exit; \ +" : + : __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size)) + : __clobber_all); +} + +SEC("sk_msg") +__description("invalid read past end of SK_MSG") +__failure __msg("invalid bpf_context access") +__naked void past_end_of_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__imm_0]); \ + exit; \ +" : + : __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4) + : __clobber_all); +} + +SEC("sk_msg") +__description("invalid read offset in SK_MSG") +__failure __msg("invalid bpf_context access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void read_offset_in_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__imm_0]); \ + exit; \ +" : + : __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1) + : __clobber_all); +} + +SEC("sk_msg") +__description("direct packet read for SK_MSG") +__success +__naked void packet_read_for_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_data]); \ + r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)), + __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end)) + : __clobber_all); +} + +SEC("sk_msg") +__description("direct packet write for SK_MSG") +__success +__naked void packet_write_for_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_data]); \ + r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)), + __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end)) + : __clobber_all); +} + +SEC("sk_msg") +__description("overlapping checks for direct packet access SK_MSG") +__success +__naked void direct_packet_access_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_data]); \ + r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r1 = r2; \ + r1 += 6; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u16*)(r2 + 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)), + __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_d_path.c b/tools/testing/selftests/bpf/progs/verifier_d_path.c new file mode 100644 index 000000000000..ec79cbcfde91 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_d_path.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/d_path.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("fentry/dentry_open") +__description("d_path accept") +__success __retval(0) +__naked void d_path_accept(void) +{ + asm volatile (" \ + r1 = *(u32*)(r1 + 0); \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + r3 = 8 ll; \ + call %[bpf_d_path]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_d_path) + : __clobber_all); +} + +SEC("fentry/d_path") +__description("d_path reject") +__failure __msg("helper call is not allowed in probe") +__naked void d_path_reject(void) +{ + asm volatile (" \ + r1 = *(u32*)(r1 + 0); \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + r3 = 8 ll; \ + call %[bpf_d_path]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_d_path) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c new file mode 100644 index 000000000000..99a23dea8233 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("tc") +__description("pkt_end - pkt_start is allowed") +__success __retval(TEST_DATA_LEN) +__naked void end_pkt_start_is_allowed(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r0 -= r2; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test1") +__success __retval(0) +__naked void direct_packet_access_test1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test2") +__success __retval(0) +__naked void direct_packet_access_test2(void) +{ + asm volatile (" \ + r0 = 1; \ + r4 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data]); \ + r5 = r3; \ + r5 += 14; \ + if r5 > r4 goto l0_%=; \ + r0 = *(u8*)(r3 + 7); \ + r4 = *(u8*)(r3 + 12); \ + r4 *= 14; \ + r3 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 += r4; \ + r2 = *(u32*)(r1 + %[__sk_buff_len]); \ + r2 <<= 49; \ + r2 >>= 49; \ + r3 += r2; \ + r2 = r3; \ + r2 += 8; \ + r1 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + if r2 > r1 goto l1_%=; \ + r1 = *(u8*)(r3 + 4); \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("direct packet access: test3") +__failure __msg("invalid bpf_context access off=76") +__failure_unpriv +__naked void direct_packet_access_test3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test4 (write)") +__success __retval(0) +__naked void direct_packet_access_test4_write(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test5 (pkt_end >= reg, good access)") +__success __retval(0) +__naked void pkt_end_reg_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test6 (pkt_end >= reg, bad access)") +__failure __msg("invalid access to packet") +__naked void pkt_end_reg_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test7 (pkt_end >= reg, both accesses)") +__failure __msg("invalid access to packet") +__naked void pkt_end_reg_both_accesses(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test8 (double test, variant 1)") +__success __retval(0) +__naked void test8_double_test_variant_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + if r0 > r3 goto l1_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test9 (double test, variant 2)") +__success __retval(0) +__naked void test9_double_test_variant_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = 1; \ + exit; \ +l0_%=: if r0 > r3 goto l1_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test10 (write invalid)") +__failure __msg("invalid access to packet") +__naked void packet_access_test10_write_invalid(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: *(u8*)(r2 + 0) = r2; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test11 (shift, good access)") +__success __retval(1) +__naked void access_test11_shift_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = 144; \ + r5 = r3; \ + r5 += 23; \ + r5 >>= 3; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test12 (and, good access)") +__success __retval(1) +__naked void access_test12_and_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = 144; \ + r5 = r3; \ + r5 += 23; \ + r5 &= 15; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test13 (branches, good access)") +__success __retval(1) +__naked void access_test13_branches_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r4 = 1; \ + if r3 > r4 goto l1_%=; \ + r3 = 14; \ + goto l2_%=; \ +l1_%=: r3 = 24; \ +l2_%=: r5 = r3; \ + r5 += 23; \ + r5 &= 15; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)") +__success __retval(1) +__naked void _0_const_imm_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r5 = 12; \ + r5 >>= 4; \ + r6 = r2; \ + r6 += r5; \ + r0 = *(u8*)(r6 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test15 (spill with xadd)") +__failure __msg("R2 invalid mem access 'scalar'") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void access_test15_spill_with_xadd(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r5 = 4096; \ + r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r2; \ + lock *(u64 *)(r4 + 0) += r5; \ + r2 = *(u64*)(r4 + 0); \ + *(u32*)(r2 + 0) = r5; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test16 (arith on data_end)") +__failure __msg("R3 pointer arithmetic on pkt_end") +__naked void test16_arith_on_data_end(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + r3 += 16; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test17 (pruning, alignment)") +__failure __msg("misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4") +__flag(BPF_F_STRICT_ALIGNMENT) +__naked void packet_access_test17_pruning_alignment(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r0 = r2; \ + r0 += 14; \ + if r7 > 1 goto l0_%=; \ +l2_%=: if r0 > r3 goto l1_%=; \ + *(u32*)(r0 - 4) = r0; \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: r0 += 1; \ + goto l2_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test18 (imm += pkt_ptr, 1)") +__success __retval(0) +__naked void test18_imm_pkt_ptr_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 8; \ + r0 += r2; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test19 (imm += pkt_ptr, 2)") +__success __retval(0) +__naked void test19_imm_pkt_ptr_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r4 = 4; \ + r4 += r2; \ + *(u8*)(r4 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test20 (x += pkt_ptr, 1)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test20_x_pkt_ptr_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 0xffffffff; \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0x7fff; \ + r4 = r0; \ + r4 += r2; \ + r5 = r4; \ + r4 += %[__imm_0]; \ + if r4 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test21 (x += pkt_ptr, 2)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test21_x_pkt_ptr_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r4 = 0xffffffff; \ + *(u64*)(r10 - 8) = r4; \ + r4 = *(u64*)(r10 - 8); \ + r4 &= 0x7fff; \ + r4 += r2; \ + r5 = r4; \ + r4 += %[__imm_0]; \ + if r4 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test22 (x += pkt_ptr, 3)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test22_x_pkt_ptr_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + *(u64*)(r10 - 8) = r2; \ + *(u64*)(r10 - 16) = r3; \ + r3 = *(u64*)(r10 - 16); \ + if r0 > r3 goto l0_%=; \ + r2 = *(u64*)(r10 - 8); \ + r4 = 0xffffffff; \ + lock *(u64 *)(r10 - 8) += r4; \ + r4 = *(u64*)(r10 - 8); \ + r4 >>= 49; \ + r4 += r2; \ + r0 = r4; \ + r0 += 2; \ + if r0 > r3 goto l0_%=; \ + r2 = 1; \ + *(u16*)(r4 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test23 (x += pkt_ptr, 4)") +__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void test23_x_pkt_ptr_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0xffff; \ + r4 = r0; \ + r0 = 31; \ + r0 += r4; \ + r0 += r2; \ + r5 = r0; \ + r0 += %[__imm_0]; \ + if r0 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test24 (x += pkt_ptr, 5)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test24_x_pkt_ptr_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 0xffffffff; \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0xff; \ + r4 = r0; \ + r0 = 64; \ + r0 += r4; \ + r0 += r2; \ + r5 = r0; \ + r0 += %[__imm_0]; \ + if r0 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test25 (marking on <, good access)") +__success __retval(0) +__naked void test25_marking_on_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 < r3 goto l0_%=; \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test26 (marking on <, bad access)") +__failure __msg("invalid access to packet") +__naked void test26_marking_on_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 < r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test27 (marking on <=, good access)") +__success __retval(1) +__naked void test27_marking_on_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 <= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test28 (marking on <=, bad access)") +__failure __msg("invalid access to packet") +__naked void test28_marking_on_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 <= r0 goto l0_%=; \ +l1_%=: r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test29 (reg > pkt_end in subprog)") +__success __retval(0) +__naked void reg_pkt_end_in_subprog(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r2 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = r6; \ + r3 += 8; \ + call reg_pkt_end_in_subprog__1; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u8*)(r6 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void reg_pkt_end_in_subprog__1(void) +{ + asm volatile (" \ + r0 = 0; \ + if r3 > r2 goto l0_%=; \ + r0 = 1; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("direct packet access: test30 (check_id() in regsafe(), bad access)") +__failure __msg("invalid access to packet, off=0 size=1, R2") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void id_in_regsafe_bad_access(void) +{ + asm volatile (" \ + /* r9 = ctx */ \ + r9 = r1; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* r2 = ctx->data \ + * r3 = ctx->data \ + * r4 = ctx->data_end \ + */ \ + r2 = *(u32*)(r9 + %[__sk_buff_data]); \ + r3 = *(u32*)(r9 + %[__sk_buff_data]); \ + r4 = *(u32*)(r9 + %[__sk_buff_data_end]); \ + /* if r6 > 100 goto exit \ + * if r7 > 100 goto exit \ + */ \ + if r6 > 100 goto l0_%=; \ + if r7 > 100 goto l0_%=; \ + /* r2 += r6 ; this forces assignment of ID to r2\ + * r2 += 1 ; get some fixed off for r2\ + * r3 += r7 ; this forces assignment of ID to r3\ + * r3 += 1 ; get some fixed off for r3\ + */ \ + r2 += r6; \ + r2 += 1; \ + r3 += r7; \ + r3 += 1; \ + /* if r6 > r7 goto +1 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * r2 = r3 ; optionally share ID between r2 and r3\ + */ \ + if r6 != r7 goto l1_%=; \ + r2 = r3; \ +l1_%=: /* if r3 > ctx->data_end goto exit */ \ + if r3 > r4 goto l0_%=; \ + /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\ + * ; this is not always safe\ + */ \ + r5 = *(u8*)(r2 - 1); \ +l0_%=: /* exit(0) */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c new file mode 100644 index 000000000000..c538c6893552 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("direct stack access with 32-bit wraparound. test1") +__failure __msg("fp pointer and 2147483647") +__failure_unpriv +__naked void with_32_bit_wraparound_test1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0x7fffffff; \ + r1 += 0x7fffffff; \ + w0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("direct stack access with 32-bit wraparound. test2") +__failure __msg("fp pointer and 1073741823") +__failure_unpriv +__naked void with_32_bit_wraparound_test2(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0x3fffffff; \ + r1 += 0x3fffffff; \ + w0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("direct stack access with 32-bit wraparound. test3") +__failure __msg("fp pointer offset 1073741822") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void with_32_bit_wraparound_test3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0x1fffffff; \ + r1 += 0x1fffffff; \ + w0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_div0.c b/tools/testing/selftests/bpf/progs/verifier_div0.c new file mode 100644 index 000000000000..cca5ea18fc28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_div0.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/div0.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("DIV32 by 0, zero check 1") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_1_1(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + w2 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("DIV32 by 0, zero check 2") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_2_1(void) +{ + asm volatile (" \ + w0 = 42; \ + r1 = 0xffffffff00000000LL ll; \ + w2 = 1; \ + w2 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("DIV64 by 0, zero check") +__success __success_unpriv __retval(42) +__naked void div64_by_0_zero_check(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + r2 /= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOD32 by 0, zero check 1") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_1_2(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + w2 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOD32 by 0, zero check 2") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_2_2(void) +{ + asm volatile (" \ + w0 = 42; \ + r1 = 0xffffffff00000000LL ll; \ + w2 = 1; \ + w2 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOD64 by 0, zero check") +__success __success_unpriv __retval(42) +__naked void mod64_by_0_zero_check(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + r2 %%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV32 by 0, zero check ok, cls") +__success __retval(8) +__naked void _0_zero_check_ok_cls_1(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 2; \ + w2 = 16; \ + w2 /= w1; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV32 by 0, zero check 1, cls") +__success __retval(0) +__naked void _0_zero_check_1_cls_1(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 1; \ + w0 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV32 by 0, zero check 2, cls") +__success __retval(0) +__naked void _0_zero_check_2_cls_1(void) +{ + asm volatile (" \ + r1 = 0xffffffff00000000LL ll; \ + w0 = 1; \ + w0 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV64 by 0, zero check, cls") +__success __retval(0) +__naked void by_0_zero_check_cls(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 1; \ + r0 /= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD32 by 0, zero check ok, cls") +__success __retval(2) +__naked void _0_zero_check_ok_cls_2(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 3; \ + w2 = 5; \ + w2 %%= w1; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD32 by 0, zero check 1, cls") +__success __retval(1) +__naked void _0_zero_check_1_cls_2(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 1; \ + w0 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD32 by 0, zero check 2, cls") +__success __retval(1) +__naked void _0_zero_check_2_cls_2(void) +{ + asm volatile (" \ + r1 = 0xffffffff00000000LL ll; \ + w0 = 1; \ + w0 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD64 by 0, zero check 1, cls") +__success __retval(2) +__naked void _0_zero_check_1_cls_3(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 2; \ + r0 %%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD64 by 0, zero check 2, cls") +__success __retval(-1) +__naked void _0_zero_check_2_cls_3(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = -1; \ + r0 %%= r1; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c new file mode 100644 index 000000000000..458984da804c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/div_overflow.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <limits.h> +#include "bpf_misc.h" + +/* Just make sure that JITs used udiv/umod as otherwise we get + * an exception from INT_MIN/-1 overflow similarly as with div + * by zero. + */ + +SEC("tc") +__description("DIV32 overflow, check 1") +__success __retval(0) +__naked void div32_overflow_check_1(void) +{ + asm volatile (" \ + w1 = -1; \ + w0 = %[int_min]; \ + w0 /= w1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("DIV32 overflow, check 2") +__success __retval(0) +__naked void div32_overflow_check_2(void) +{ + asm volatile (" \ + w0 = %[int_min]; \ + w0 /= -1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("DIV64 overflow, check 1") +__success __retval(0) +__naked void div64_overflow_check_1(void) +{ + asm volatile (" \ + r1 = -1; \ + r2 = %[llong_min] ll; \ + r2 /= r1; \ + w0 = 0; \ + if r0 == r2 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("tc") +__description("DIV64 overflow, check 2") +__success __retval(0) +__naked void div64_overflow_check_2(void) +{ + asm volatile (" \ + r1 = %[llong_min] ll; \ + r1 /= -1; \ + w0 = 0; \ + if r0 == r1 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD32 overflow, check 1") +__success __retval(INT_MIN) +__naked void mod32_overflow_check_1(void) +{ + asm volatile (" \ + w1 = -1; \ + w0 = %[int_min]; \ + w0 %%= w1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD32 overflow, check 2") +__success __retval(INT_MIN) +__naked void mod32_overflow_check_2(void) +{ + asm volatile (" \ + w0 = %[int_min]; \ + w0 %%= -1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD64 overflow, check 1") +__success __retval(1) +__naked void mod64_overflow_check_1(void) +{ + asm volatile (" \ + r1 = -1; \ + r2 = %[llong_min] ll; \ + r3 = r2; \ + r2 %%= r1; \ + w0 = 0; \ + if r3 != r2 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD64 overflow, check 2") +__success __retval(1) +__naked void mod64_overflow_check_2(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r3 = r2; \ + r2 %%= -1; \ + w0 = 0; \ + if r3 != r2 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c new file mode 100644 index 000000000000..50c6b22606f6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +SEC("tracepoint") +__description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds") +__success +__naked void bitwise_and_jmp_correct_bounds(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = 16; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + r2 &= 64; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("socket") +__description("helper access to variable memory: stack, bitwise AND, zero included") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__retval(0) +__naked void stack_bitwise_and_zero_included(void) +{ + asm volatile (" \ + /* set max stack size */ \ + r6 = 0; \ + *(u64*)(r10 - 128) = r6; \ + /* set r3 to a random value */ \ + call %[bpf_get_prandom_u32]; \ + r3 = r0; \ + /* use bitwise AND to limit r3 range to [0, 64] */\ + r3 &= 64; \ + r1 = %[map_ringbuf] ll; \ + r2 = r10; \ + r2 += -64; \ + r4 = 0; \ + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ + * For unpriv this should signal an error, because memory at &fp[-64] is\ + * not initialized. \ + */ \ + call %[bpf_ringbuf_output]; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_ringbuf_output), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, bitwise AND + JMP, wrong max") +__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__naked void bitwise_and_jmp_wrong_max(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + r2 &= 65; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, correct bounds") +__success +__naked void memory_stack_jmp_correct_bounds(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = 16; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 > 64 goto l0_%=; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP (signed), correct bounds") +__success +__naked void stack_jmp_signed_correct_bounds(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = 16; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 s> 64 goto l0_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, bounds + offset") +__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__naked void memory_stack_jmp_bounds_offset(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 > 64 goto l0_%=; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r2 += 1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, wrong max") +__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__naked void memory_stack_jmp_wrong_max(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 > 65 goto l0_%=; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, no max check") +__failure +/* because max wasn't checked, signed min is negative */ +__msg("R2 min value is negative, either use unsigned or 'var &= const'") +__naked void stack_jmp_no_max_check(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("socket") +__description("helper access to variable memory: stack, JMP, no min check") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__retval(0) +__naked void stack_jmp_no_min_check(void) +{ + asm volatile (" \ + /* set max stack size */ \ + r6 = 0; \ + *(u64*)(r10 - 128) = r6; \ + /* set r3 to a random value */ \ + call %[bpf_get_prandom_u32]; \ + r3 = r0; \ + /* use JMP to limit r3 range to [0, 64] */ \ + if r3 > 64 goto l0_%=; \ + r1 = %[map_ringbuf] ll; \ + r2 = r10; \ + r2 += -64; \ + r4 = 0; \ + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ + * For unpriv this should signal an error, because memory at &fp[-64] is\ + * not initialized. \ + */ \ + call %[bpf_ringbuf_output]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_ringbuf_output), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP (signed), no min check") +__failure __msg("R2 min value is negative") +__naked void jmp_signed_no_min_check(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 s> 64 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map, JMP, correct bounds") +__success +__naked void memory_map_jmp_correct_bounds(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[sizeof_test_val]; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[sizeof_test_val] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(sizeof_test_val, sizeof(struct test_val)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map, JMP, wrong max") +__failure __msg("invalid access to map value, value_size=48 off=0 size=49") +__naked void memory_map_jmp_wrong_max(void) +{ + asm volatile (" \ + r6 = *(u64*)(r1 + 8); \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = r6; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[__imm_0] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) + 1) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map adjusted, JMP, correct bounds") +__success +__naked void map_adjusted_jmp_correct_bounds(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += 20; \ + r2 = %[sizeof_test_val]; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[__imm_0] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - 20), + __imm_const(sizeof_test_val, sizeof(struct test_val)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map adjusted, JMP, wrong max") +__failure __msg("R1 min value is outside of the allowed memory range") +__naked void map_adjusted_jmp_wrong_max(void) +{ + asm volatile (" \ + r6 = *(u64*)(r1 + 8); \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += 20; \ + r2 = r6; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[__imm_0] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - 19) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_1(void) +{ + asm volatile (" \ + r1 = 0; \ + r2 = 0; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + exit; \ +" : + : __imm(bpf_csum_diff) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)") +__failure __msg("R1 type=scalar expected=fp") +__naked void ptr_to_mem_or_null_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 0); \ + r1 = 0; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + r2 &= 64; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + exit; \ +" : + : __imm(bpf_csum_diff) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r2 = 0; \ + *(u64*)(r1 + 0) = r2; \ + r2 &= 8; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + exit; \ +" : + : __imm(bpf_csum_diff) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 0; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_5(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r1 = r10; \ + r1 += -8; \ + *(u64*)(r1 + 0) = r2; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_6(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +/* csum_diff of 64-byte packet */ +__flag(BPF_F_ANY_ALIGNMENT) +__naked void ptr_to_mem_or_null_7(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r6; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r1 = r6; \ + r2 = *(u64*)(r6 + 0); \ + if r2 > 8 goto l0_%=; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)") +__failure __msg("R1 type=scalar expected=fp") +__naked void ptr_to_mem_or_null_8(void) +{ + asm volatile (" \ + r1 = 0; \ + r2 = 0; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)") +__failure __msg("R1 type=scalar expected=fp") +__naked void ptr_to_mem_or_null_9(void) +{ + asm volatile (" \ + r1 = 0; \ + r2 = 1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_10(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r2 = 0; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_11(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 0; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_12(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r1 = r10; \ + r1 += -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_13(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("helper access to variable memory: 8 bytes leak") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64") +__retval(0) +__naked void variable_memory_8_bytes_leak(void) +{ + asm volatile (" \ + /* set max stack size */ \ + r6 = 0; \ + *(u64*)(r10 - 128) = r6; \ + /* set r3 to a random value */ \ + call %[bpf_get_prandom_u32]; \ + r3 = r0; \ + r1 = %[map_ringbuf] ll; \ + r2 = r10; \ + r2 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + /* Note: fp[-32] left uninitialized */ \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + /* Limit r3 range to [1, 64] */ \ + r3 &= 63; \ + r3 += 1; \ + r4 = 0; \ + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ + * For unpriv this should signal an error, because memory region [1, 64]\ + * at &fp[-64] is not fully initialized. \ + */ \ + call %[bpf_ringbuf_output]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_ringbuf_output), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: 8 bytes no leak (init memory)") +__success +__naked void bytes_no_leak_init_memory(void) +{ + asm volatile (" \ + r1 = r10; \ + r0 = 0; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r1 += -64; \ + r2 = 0; \ + r2 &= 32; \ + r2 += 32; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + r1 = *(u64*)(r10 - 16); \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c new file mode 100644 index 000000000000..74f5f9cd153d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_packet_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("xdp") +__description("helper access to packet: test1, valid packet_ptr range") +__success __retval(0) +__naked void test1_valid_packet_ptr_range(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r3 = r2; \ + r4 = 0; \ + call %[bpf_map_update_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_update_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test2, unchecked packet_ptr") +__failure __msg("invalid access to packet") +__naked void packet_test2_unchecked_packet_ptr(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test3, variable add") +__success __retval(0) +__naked void to_packet_test3_variable_add(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r5 = *(u8*)(r2 + 0); \ + r4 = r2; \ + r4 += r5; \ + r5 = r4; \ + r5 += 8; \ + if r5 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r2 = r4; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test4, packet_ptr with bad range") +__failure __msg("invalid access to packet") +__naked void packet_ptr_with_bad_range_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r4 = r2; \ + r4 += 4; \ + if r4 > r3 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test5, packet_ptr with too short range") +__failure __msg("invalid access to packet") +__naked void ptr_with_too_short_range_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r2 += 1; \ + r4 = r2; \ + r4 += 7; \ + if r4 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test6, cls valid packet_ptr range") +__success __retval(0) +__naked void cls_valid_packet_ptr_range(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r3 = r2; \ + r4 = 0; \ + call %[bpf_map_update_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_update_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test7, cls unchecked packet_ptr") +__failure __msg("invalid access to packet") +__naked void test7_cls_unchecked_packet_ptr(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test8, cls variable add") +__success __retval(0) +__naked void packet_test8_cls_variable_add(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r5 = *(u8*)(r2 + 0); \ + r4 = r2; \ + r4 += r5; \ + r5 = r4; \ + r5 += 8; \ + if r5 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r2 = r4; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test9, cls packet_ptr with bad range") +__failure __msg("invalid access to packet") +__naked void packet_ptr_with_bad_range_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = r2; \ + r4 += 4; \ + if r4 > r3 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test10, cls packet_ptr with too short range") +__failure __msg("invalid access to packet") +__naked void ptr_with_too_short_range_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r2 += 1; \ + r4 = r2; \ + r4 += 7; \ + if r4 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test11, cls unsuitable helper 1") +__failure __msg("helper access to the packet") +__naked void test11_cls_unsuitable_helper_1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r3 = r6; \ + r3 += 7; \ + if r3 > r7 goto l0_%=; \ + r2 = 0; \ + r4 = 42; \ + r5 = 0; \ + call %[bpf_skb_store_bytes]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_skb_store_bytes), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test12, cls unsuitable helper 2") +__failure __msg("helper access to the packet") +__naked void test12_cls_unsuitable_helper_2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = r6; \ + r6 += 8; \ + if r6 > r7 goto l0_%=; \ + r2 = 0; \ + r4 = 4; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test13, cls helper ok") +__success __retval(0) +__naked void packet_test13_cls_helper_ok(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test14, cls helper ok sub") +__success __retval(0) +__naked void test14_cls_helper_ok_sub(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 -= 4; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test15, cls helper fail sub") +__failure __msg("invalid access to packet") +__naked void test15_cls_helper_fail_sub(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 -= 12; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test16, cls helper fail range 1") +__failure __msg("invalid access to packet") +__naked void cls_helper_fail_range_1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = 8; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test17, cls helper fail range 2") +__failure __msg("R2 min value is negative") +__naked void cls_helper_fail_range_2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = -9; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test18, cls helper fail range 3") +__failure __msg("R2 min value is negative") +__naked void cls_helper_fail_range_3(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__imm_0, ~0), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test19, cls helper range zero") +__success __retval(0) +__naked void test19_cls_helper_range_zero(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = 0; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test20, pkt end as input") +__failure __msg("R1 type=pkt_end expected=fp") +__naked void test20_pkt_end_as_input(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r7; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test21, wrong reg") +__failure __msg("invalid access to packet") +__naked void to_packet_test21_wrong_reg(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c b/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c new file mode 100644 index 000000000000..0ede0ccd090c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_restricted.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct val); +} map_spin_lock SEC(".maps"); + +struct timer { + struct bpf_timer t; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct timer); +} map_timer SEC(".maps"); + +SEC("kprobe") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void in_bpf_prog_type_kprobe_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("tracepoint") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void in_bpf_prog_type_tracepoint_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("perf_event") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void bpf_prog_type_perf_event_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("raw_tracepoint") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void bpf_prog_type_raw_tracepoint_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("kprobe") +__description("bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void in_bpf_prog_type_kprobe_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("perf_event") +__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void bpf_prog_type_perf_event_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("tracepoint") +__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void in_bpf_prog_type_tracepoint_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("raw_tracepoint") +__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void bpf_prog_type_raw_tracepoint_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("kprobe") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void in_bpf_prog_type_kprobe_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("tracepoint") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void in_bpf_prog_type_tracepoint_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("perf_event") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void bpf_prog_type_perf_event_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("raw_tracepoint") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void bpf_prog_type_raw_tracepoint_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c new file mode 100644 index 000000000000..692216c0ad3d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c @@ -0,0 +1,1245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_value_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct other_val { + long long foo; + long long bar; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct other_val); +} map_hash_16b SEC(".maps"); + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("tracepoint") +__description("helper access to map: full range") +__success +__naked void access_to_map_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[sizeof_test_val]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(sizeof_test_val, sizeof(struct test_val)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: partial range") +__success +__naked void access_to_map_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: empty range") +__failure __msg("invalid access to map value, value_size=48 off=0 size=0") +__naked void access_to_map_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: out-of-bound range") +__failure __msg("invalid access to map value, value_size=48 off=0 size=56") +__naked void map_out_of_bound_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) + 8) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: negative range") +__failure __msg("R2 min value is negative") +__naked void access_to_map_negative_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): full range") +__success +__naked void via_const_imm_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): partial range") +__success +__naked void via_const_imm_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): empty range") +__failure __msg("invalid access to map value, value_size=48 off=4 size=0") +__naked void via_const_imm_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): out-of-bound range") +__failure __msg("invalid access to map value, value_size=48 off=4 size=52") +__naked void imm_out_of_bound_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): negative range (> adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_imm_negative_range_adjustment_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): negative range (< adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_imm_negative_range_adjustment_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = -1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): full range") +__success +__naked void via_const_reg_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): partial range") +__success +__naked void via_const_reg_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): empty range") +__failure __msg("R1 min value is outside of the allowed memory range") +__naked void via_const_reg_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = 0; \ + r1 += r3; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): out-of-bound range") +__failure __msg("invalid access to map value, value_size=48 off=4 size=52") +__naked void reg_out_of_bound_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): negative range (> adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_reg_negative_range_adjustment_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): negative range (< adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_reg_negative_range_adjustment_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = -1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): full range") +__success +__naked void map_via_variable_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): partial range") +__success +__naked void map_via_variable_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): empty range") +__failure __msg("R1 min value is outside of the allowed memory range") +__naked void map_via_variable_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): no max check") +__failure __msg("R1 unbounded memory access") +__naked void via_variable_no_max_check_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + r1 += r3; \ + r2 = 1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): wrong max check") +__failure __msg("invalid access to map value, value_size=48 off=4 size=45") +__naked void via_variable_wrong_max_check_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <, good access") +__success +__naked void bounds_check_using_good_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 < 32 goto l1_%=; \ + r0 = 0; \ +l0_%=: exit; \ +l1_%=: r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <, bad access") +__failure __msg("R1 unbounded memory access") +__naked void bounds_check_using_bad_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 < 32 goto l1_%=; \ + r1 += r3; \ +l0_%=: r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <=, good access") +__success +__naked void bounds_check_using_good_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 <= 32 goto l1_%=; \ + r0 = 0; \ +l0_%=: exit; \ +l1_%=: r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <=, bad access") +__failure __msg("R1 unbounded memory access") +__naked void bounds_check_using_bad_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 <= 32 goto l1_%=; \ + r1 += r3; \ +l0_%=: r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<, good access") +__success +__naked void check_using_s_good_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s< 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s< 0 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<, good access 2") +__success +__naked void using_s_good_access_2_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s< 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s< -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<, bad access") +__failure __msg("R1 min value is negative") +__naked void check_using_s_bad_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u64*)(r0 + 0); \ + if r3 s< 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s< -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<=, good access") +__success +__naked void check_using_s_good_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s<= 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s<= 0 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<=, good access 2") +__success +__naked void using_s_good_access_2_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s<= 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s<= -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<=, bad access") +__failure __msg("R1 min value is negative") +__naked void check_using_s_bad_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u64*)(r0 + 0); \ + if r3 s<= 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s<= -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map lookup helper access to map") +__success +__naked void lookup_helper_access_to_map(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map update helper access to map") +__success +__naked void update_helper_access_to_map(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r4 = 0; \ + r3 = r0; \ + r2 = r0; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_update_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_map_update_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map update helper access to map: wrong size") +__failure __msg("invalid access to map value, value_size=8 off=0 size=16") +__naked void access_to_map_wrong_size(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r4 = 0; \ + r3 = r0; \ + r2 = r0; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_update_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_map_update_elem), + __imm_addr(map_hash_16b), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const imm)") +__success +__naked void adjusted_map_via_const_imm(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r2 += %[other_val_bar]; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(other_val_bar, offsetof(struct other_val, bar)) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const imm): out-of-bound 1") +__failure __msg("invalid access to map value, value_size=16 off=12 size=8") +__naked void imm_out_of_bound_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r2 += %[__imm_0]; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(__imm_0, sizeof(struct other_val) - 4) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const imm): out-of-bound 2") +__failure __msg("invalid access to map value, value_size=16 off=-4 size=8") +__naked void imm_out_of_bound_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r2 += -4; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const reg)") +__success +__naked void adjusted_map_via_const_reg(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = %[other_val_bar]; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(other_val_bar, offsetof(struct other_val, bar)) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const reg): out-of-bound 1") +__failure __msg("invalid access to map value, value_size=16 off=12 size=8") +__naked void reg_out_of_bound_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = %[__imm_0]; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(__imm_0, sizeof(struct other_val) - 4) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const reg): out-of-bound 2") +__failure __msg("invalid access to map value, value_size=16 off=-4 size=8") +__naked void reg_out_of_bound_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = -4; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via variable)") +__success +__naked void to_adjusted_map_via_variable(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[other_val_bar] goto l0_%=; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(other_val_bar, offsetof(struct other_val, bar)) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via variable): no max check") +__failure +__msg("R2 unbounded memory access, make sure to bounds check any such access") +__naked void via_variable_no_max_check_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = *(u32*)(r0 + 0); \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via variable): wrong max check") +__failure __msg("invalid access to map value, value_size=16 off=9 size=8") +__naked void via_variable_wrong_max_check_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[__imm_0] goto l0_%=; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(__imm_0, offsetof(struct other_val, bar) + 1) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c new file mode 100644 index 000000000000..b054f9c48143 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/int_ptr.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG uninitialized") +__failure __msg("invalid indirect read from stack R4 off -16+0 size 8") +__naked void arg_ptr_to_long_uninitialized(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -8; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("socket") +__description("ARG_PTR_TO_LONG half-uninitialized") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8") +__retval(0) +__naked void ptr_to_long_half_uninitialized(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -8; \ + *(u32*)(r7 + 0) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG misaligned") +__failure __msg("misaligned stack access off (0x0; 0x0)+-20+0 size 8") +__naked void arg_ptr_to_long_misaligned(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -12; \ + r0 = 0; \ + *(u32*)(r7 + 0) = r0; \ + *(u64*)(r7 + 4) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG size < sizeof(long)") +__failure __msg("invalid indirect access to stack R4 off=-4 size=8") +__naked void to_long_size_sizeof_long(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -16; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += 12; \ + *(u32*)(r7 + 0) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG initialized") +__success +__naked void arg_ptr_to_long_initialized(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -8; \ + *(u64*)(r7 + 0) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c new file mode 100644 index 000000000000..bf16b00502f2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_xskmap SEC(".maps"); + +/* This is equivalent to the following program: + * + * r6 = skb->sk; + * r7 = sk_fullsock(r6); + * r0 = sk_fullsock(r6); + * if (r0 == 0) return 0; (a) + * if (r0 != r7) return 0; (b) + * *r7->type; (c) + * return 0; + * + * It is safe to dereference r7 at point (c), because of (a) and (b). + * The test verifies that relation r0 == r7 is propagated from (b) to (c). + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch") +__success __failure_unpriv __msg_unpriv("R7 pointer comparison") +__retval(0) +__naked void socket_for_jne_false_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 == r7) r0 = *(r7->type); */ \ + if r0 != r7 goto l0_%=; /* Use ! JNE ! */\ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as above, but verify that another branch of JNE still + * prohibits access to PTR_MAYBE_NULL. + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch") +__failure __msg("R7 invalid mem access 'sock_or_null'") +__failure_unpriv __msg_unpriv("R7 pointer comparison") +__naked void unchanged_for_jne_true_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 != 0 goto l0_%=; \ + /* if (r0 == r7) return 0; */ \ + if r0 != r7 goto l1_%=; /* Use ! JNE ! */\ + goto l0_%=; \ +l1_%=: /* r0 = *(r7->type); */ \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as a first test, but not null should be inferred for JEQ branch */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch") +__success __failure_unpriv __msg_unpriv("R7 pointer comparison") +__retval(0) +__naked void socket_for_jeq_true_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == null) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 != r7) return 0; */ \ + if r0 == r7 goto l1_%=; /* Use ! JEQ ! */\ + goto l0_%=; \ +l1_%=: /* r0 = *(r7->type); */ \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as above, but verify that another branch of JNE still + * prohibits access to PTR_MAYBE_NULL. + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch") +__failure __msg("R7 invalid mem access 'sock_or_null'") +__failure_unpriv __msg_unpriv("R7 pointer comparison") +__naked void unchanged_for_jeq_false_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == null) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 != r7) r0 = *(r7->type); */ \ + if r0 == r7 goto l0_%=; /* Use ! JEQ ! */\ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Maps are treated in a different branch of `mark_ptr_not_null_reg`, + * so separate test for maps case. + */ +SEC("xdp") +__description("jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE") +__success __retval(0) +__naked void null_ptr_to_map_value(void) +{ + asm volatile (" \ + /* r9 = &some stack to use as key */ \ + r1 = 0; \ + *(u32*)(r10 - 8) = r1; \ + r9 = r10; \ + r9 += -8; \ + /* r8 = process local map */ \ + r8 = %[map_xskmap] ll; \ + /* r6 = map_lookup_elem(r8, r9); */ \ + r1 = r8; \ + r2 = r9; \ + call %[bpf_map_lookup_elem]; \ + r6 = r0; \ + /* r7 = map_lookup_elem(r8, r9); */ \ + r1 = r8; \ + r2 = r9; \ + call %[bpf_map_lookup_elem]; \ + r7 = r0; \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* if (r6 != r7) return 0; */ \ + if r6 != r7 goto l0_%=; \ + /* read *r7; */ \ + r0 = *(u32*)(r7 + %[bpf_xdp_sock_queue_id]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap), + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ld_ind.c b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c new file mode 100644 index 000000000000..c925ba9a2e74 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ld_ind.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +SEC("socket") +__description("ld_ind: check calling conv, r1") +__failure __msg("R1 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r1(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 1; \ + .8byte %[ld_ind]; \ + r0 = r1; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r2") +__failure __msg("R2 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r2(void) +{ + asm volatile (" \ + r6 = r1; \ + r2 = 1; \ + .8byte %[ld_ind]; \ + r0 = r2; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r3") +__failure __msg("R3 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r3(void) +{ + asm volatile (" \ + r6 = r1; \ + r3 = 1; \ + .8byte %[ld_ind]; \ + r0 = r3; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r4") +__failure __msg("R4 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r4(void) +{ + asm volatile (" \ + r6 = r1; \ + r4 = 1; \ + .8byte %[ld_ind]; \ + r0 = r4; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r5") +__failure __msg("R5 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r5(void) +{ + asm volatile (" \ + r6 = r1; \ + r5 = 1; \ + .8byte %[ld_ind]; \ + r0 = r5; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r7") +__success __success_unpriv __retval(1) +__naked void ind_check_calling_conv_r7(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c b/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c new file mode 100644 index 000000000000..d153fbe50055 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/leak_ptr.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("leak pointer into ctx 1") +__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__failure_unpriv __msg_unpriv("R2 leaks addr into mem") +__naked void leak_pointer_into_ctx_1(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r2 = %[map_hash_8b] ll; \ + lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r2; \ + exit; \ +" : + : __imm_addr(map_hash_8b), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("leak pointer into ctx 2") +__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__failure_unpriv __msg_unpriv("R10 leaks addr into mem") +__naked void leak_pointer_into_ctx_2(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \ + lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r10; \ + exit; \ +" : + : __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("leak pointer into ctx 3") +__success __failure_unpriv __msg_unpriv("R2 leaks addr into ctx") +__retval(0) +__naked void leak_pointer_into_ctx_3(void) +{ + asm volatile (" \ + r0 = 0; \ + r2 = %[map_hash_8b] ll; \ + *(u64*)(r1 + %[__sk_buff_cb_0]) = r2; \ + exit; \ +" : + : __imm_addr(map_hash_8b), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("leak pointer into map val") +__success __failure_unpriv __msg_unpriv("R6 leaks addr into mem") +__retval(0) +__naked void leak_pointer_into_map_val(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = 0; \ + *(u64*)(r0 + 0) = r3; \ + lock *(u64 *)(r0 + 0) += r6; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c new file mode 100644 index 000000000000..5bc86af80a9a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/loops1.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("xdp") +__description("bounded loop, count to 4") +__success __retval(4) +__naked void bounded_loop_count_to_4(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 1; \ + if r0 < 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count to 20") +__success +__naked void bounded_loop_count_to_20(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 3; \ + if r0 < 20 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count from positive unknown to 4") +__success +__naked void from_positive_unknown_to_4(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + if r0 s< 0 goto l0_%=; \ +l1_%=: r0 += 1; \ + if r0 < 4 goto l1_%=; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count from totally unknown to 4") +__success +__naked void from_totally_unknown_to_4(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ +l0_%=: r0 += 1; \ + if r0 < 4 goto l0_%=; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count to 4 with equality") +__success +__naked void count_to_4_with_equality(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 1; \ + if r0 != 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, start in the middle") +__failure __msg("back-edge") +__naked void loop_start_in_the_middle(void) +{ + asm volatile (" \ + r0 = 0; \ + goto l0_%=; \ +l1_%=: r0 += 1; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("bounded loop containing a forward jump") +__success __retval(4) +__naked void loop_containing_a_forward_jump(void) +{ + asm volatile (" \ + r0 = 0; \ +l1_%=: r0 += 1; \ + if r0 == r0 goto l0_%=; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop that jumps out rather than in") +__success +__naked void jumps_out_rather_than_in(void) +{ + asm volatile (" \ + r6 = 0; \ +l1_%=: r6 += 1; \ + if r6 > 10000 goto l0_%=; \ + call %[bpf_get_prandom_u32]; \ + goto l1_%=; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop after a conditional jump") +__failure __msg("program is too large") +__naked void loop_after_a_conditional_jump(void) +{ + asm volatile (" \ + r0 = 5; \ + if r0 < 4 goto l0_%=; \ +l1_%=: r0 += 1; \ + goto l1_%=; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded recursion") +__failure __msg("back-edge") +__naked void bounded_recursion(void) +{ + asm volatile (" \ + r1 = 0; \ + call bounded_recursion__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void bounded_recursion__1(void) +{ + asm volatile (" \ + r1 += 1; \ + r0 = r1; \ + if r1 < 4 goto l0_%=; \ + exit; \ +l0_%=: call bounded_recursion__1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop in two jumps") +__failure __msg("loop detected") +__naked void infinite_loop_in_two_jumps(void) +{ + asm volatile (" \ + r0 = 0; \ +l1_%=: goto l0_%=; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop: three-jump trick") +__failure __msg("loop detected") +__naked void infinite_loop_three_jump_trick(void) +{ + asm volatile (" \ + r0 = 0; \ +l2_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l0_%=; \ + exit; \ +l0_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l1_%=; \ + exit; \ +l1_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l2_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("not-taken loop with back jump to 1st insn") +__success __retval(123) +__naked void back_jump_to_1st_insn_1(void) +{ + asm volatile (" \ +l0_%=: r0 = 123; \ + if r0 == 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("taken loop with back jump to 1st insn") +__success __retval(55) +__naked void back_jump_to_1st_insn_2(void) +{ + asm volatile (" \ + r1 = 10; \ + r2 = 0; \ + call back_jump_to_1st_insn_2__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void back_jump_to_1st_insn_2__1(void) +{ + asm volatile (" \ +l0_%=: r2 += r1; \ + r1 -= 1; \ + if r1 != 0 goto l0_%=; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("taken loop with back jump to 1st insn, 2") +__success __retval(55) +__naked void jump_to_1st_insn_2(void) +{ + asm volatile (" \ + r1 = 10; \ + r2 = 0; \ + call jump_to_1st_insn_2__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void jump_to_1st_insn_2__1(void) +{ + asm volatile (" \ +l0_%=: r2 += r1; \ + r1 -= 1; \ + if w1 != 0 goto l0_%=; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_lwt.c b/tools/testing/selftests/bpf/progs/verifier_lwt.c new file mode 100644 index 000000000000..5ab746307309 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_lwt.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/lwt.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("lwt_in") +__description("invalid direct packet write for LWT_IN") +__failure __msg("cannot write into packet") +__naked void packet_write_for_lwt_in(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_out") +__description("invalid direct packet write for LWT_OUT") +__failure __msg("cannot write into packet") +__naked void packet_write_for_lwt_out(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("direct packet write for LWT_XMIT") +__success __retval(0) +__naked void packet_write_for_lwt_xmit(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_in") +__description("direct packet read for LWT_IN") +__success __retval(0) +__naked void packet_read_for_lwt_in(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_out") +__description("direct packet read for LWT_OUT") +__success __retval(0) +__naked void packet_read_for_lwt_out(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("direct packet read for LWT_XMIT") +__success __retval(0) +__naked void packet_read_for_lwt_xmit(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("overlapping checks for direct packet access") +__success __retval(0) +__naked void checks_for_direct_packet_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r1 = r2; \ + r1 += 6; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u16*)(r2 + 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("make headroom for LWT_XMIT") +__success __retval(0) +__naked void make_headroom_for_lwt_xmit(void) +{ + asm volatile (" \ + r6 = r1; \ + r2 = 34; \ + r3 = 0; \ + call %[bpf_skb_change_head]; \ + /* split for s390 to succeed */ \ + r1 = r6; \ + r2 = 42; \ + r3 = 0; \ + call %[bpf_skb_change_head]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_skb_change_head) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_IN") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_in(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_OUT") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_out(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_XMIT") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_xmit(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("lwt_in") +__description("check skb->tc_classid half load not permitted for lwt prog") +__failure __msg("invalid bpf_context access") +__naked void not_permitted_for_lwt_prog(void) +{ + asm volatile ( + "r0 = 0;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);" +#else + "r0 = *(u16*)(r1 + %[__imm_0]);" +#endif + "exit;" + : + : __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2), + __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c new file mode 100644 index 000000000000..4eaab1468eb7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_in_map.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + }); +} map_in_map SEC(".maps"); + +SEC("socket") +__description("map in map access") +__success __success_unpriv __retval(0) +__naked void map_in_map_access(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("xdp") +__description("map in map state pruning") +__success __msg("processed 26 insns") +__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void map_in_map_state_pruning(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r6 = r10; \ + r6 += -4; \ + r2 = r6; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r2 = r6; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + r2 = r6; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l2_%=; \ + exit; \ +l2_%=: r2 = r6; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + 0); \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("invalid inner map pointer") +__failure __msg("R1 pointer arithmetic on map_ptr prohibited") +__failure_unpriv +__naked void invalid_inner_map_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + r1 += 8; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("forgot null checking on the inner map pointer") +__failure __msg("R1 type=map_value_or_null expected=map_ptr") +__failure_unpriv +__naked void on_the_inner_map_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c new file mode 100644 index 000000000000..11a079145966 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_ptr.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct other_val { + long long foo; + long long bar; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct other_val); +} map_hash_16b SEC(".maps"); + +SEC("socket") +__description("bpf_map_ptr: read with negative offset rejected") +__failure __msg("R1 is bpf_array invalid negative access: off=-8") +__failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__naked void read_with_negative_offset_rejected(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 = %[map_array_48b] ll; \ + r6 = *(u64*)(r1 - 8); \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: write rejected") +__failure __msg("only read from bpf_array is supported") +__failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__naked void bpf_map_ptr_write_rejected(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + *(u64*)(r1 + 0) = r2; \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: read non-existent field rejected") +__failure +__msg("cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4") +__failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void read_non_existent_field_rejected(void) +{ + asm volatile (" \ + r6 = 0; \ + r1 = %[map_array_48b] ll; \ + r6 = *(u32*)(r1 + 1); \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: read ops field accepted") +__success __failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__retval(1) +__naked void ptr_read_ops_field_accepted(void) +{ + asm volatile (" \ + r6 = 0; \ + r1 = %[map_array_48b] ll; \ + r6 = *(u64*)(r1 + 0); \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: r = 0, map_ptr = map_ptr + r") +__success __failure_unpriv +__msg_unpriv("R1 has pointer with unsupported alu operation") +__retval(0) +__naked void map_ptr_map_ptr_r(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = r10; \ + r2 += -8; \ + r0 = 0; \ + r1 = %[map_hash_16b] ll; \ + r1 += r0; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: r = 0, r = r + map_ptr") +__success __failure_unpriv +__msg_unpriv("R0 has pointer with unsupported alu operation") +__retval(0) +__naked void _0_r_r_map_ptr(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + r0 = %[map_hash_16b] ll; \ + r1 += r0; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c new file mode 100644 index 000000000000..c5a7c1ddc562 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_ptr_mixing.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + }); +} map_in_map SEC(".maps"); + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); +void dummy_prog_loop2_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps") = { + .values = { + [1] = (void *)&dummy_prog_loop2_socket, + [2] = (void *)&dummy_prog_24_socket, + [7] = (void *)&dummy_prog_42_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop2_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog2_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +SEC("tc") +__description("calls: two calls returning different map pointers for lookup (hash, array)") +__success __retval(1) +__naked void pointers_for_lookup_hash_array(void) +{ + asm volatile (" \ + /* main prog */ \ + if r1 != 0 goto l0_%=; \ + call pointers_for_lookup_hash_array__1; \ + goto l1_%=; \ +l0_%=: call pointers_for_lookup_hash_array__2; \ +l1_%=: r1 = r0; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + r0 = 1; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void pointers_for_lookup_hash_array__1(void) +{ + asm volatile (" \ + r0 = %[map_hash_48b] ll; \ + exit; \ +" : + : __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void pointers_for_lookup_hash_array__2(void) +{ + asm volatile (" \ + r0 = %[map_array_48b] ll; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("tc") +__description("calls: two calls returning different map pointers for lookup (hash, map in map)") +__failure __msg("only read from bpf_array is supported") +__naked void lookup_hash_map_in_map(void) +{ + asm volatile (" \ + /* main prog */ \ + if r1 != 0 goto l0_%=; \ + call lookup_hash_map_in_map__1; \ + goto l1_%=; \ +l0_%=: call lookup_hash_map_in_map__2; \ +l1_%=: r1 = r0; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + r0 = 1; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lookup_hash_map_in_map__1(void) +{ + asm volatile (" \ + r0 = %[map_array_48b] ll; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lookup_hash_map_in_map__2(void) +{ + asm volatile (" \ + r0 = %[map_in_map] ll; \ + exit; \ +" : + : __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("cond: two branches returning different map pointers for lookup (tail, tail)") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(42) +__naked void pointers_for_lookup_tail_tail_1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + if r6 != 0 goto l0_%=; \ + r2 = %[map_prog2_socket] ll; \ + goto l1_%=; \ +l0_%=: r2 = %[map_prog1_socket] ll; \ +l1_%=: r3 = 7; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("cond: two branches returning same map pointers for lookup (tail, tail)") +__success __success_unpriv __retval(42) +__naked void pointers_for_lookup_tail_tail_2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + if r6 == 0 goto l0_%=; \ + r2 = %[map_prog2_socket] ll; \ + goto l1_%=; \ +l0_%=: r2 = %[map_prog2_socket] ll; \ +l1_%=: r3 = 7; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c b/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c new file mode 100644 index 000000000000..1639628b832d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_ret_val.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("invalid map_fd for function call") +__failure __msg("fd 0 is not pointing to valid bpf_map") +__failure_unpriv +__naked void map_fd_for_function_call(void) +{ + asm volatile (" \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + .8byte %[ld_map_fd]; \ + .8byte 0; \ + call %[bpf_map_delete_elem]; \ + exit; \ +" : + : __imm(bpf_map_delete_elem), + __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0)) + : __clobber_all); +} + +SEC("socket") +__description("don't check return value before access") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +__failure_unpriv +__naked void check_return_value_before_access(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r1 = 0; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("access memory with incorrect alignment") +__failure __msg("misaligned value access") +__failure_unpriv +__flag(BPF_F_STRICT_ALIGNMENT) +__naked void access_memory_with_incorrect_alignment_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r0 + 4) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("sometimes access memory with incorrect alignment") +__failure __msg("R0 invalid mem access") +__msg_unpriv("R0 leaks addr") +__flag(BPF_F_STRICT_ALIGNMENT) +__naked void access_memory_with_incorrect_alignment_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +l0_%=: r1 = 1; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_masking.c b/tools/testing/selftests/bpf/progs/verifier_masking.c new file mode 100644 index 000000000000..5732cc1b4c47 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_masking.c @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/masking.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("masking, test out of bounds 1") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_1(void) +{ + asm volatile (" \ + w1 = 5; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 5 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 2") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_2(void) +{ + asm volatile (" \ + w1 = 1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 3") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_3(void) +{ + asm volatile (" \ + w1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 4") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_4(void) +{ + asm volatile (" \ + w1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 5") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_5(void) +{ + asm volatile (" \ + w1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 6") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_6(void) +{ + asm volatile (" \ + w1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 7") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_7(void) +{ + asm volatile (" \ + r1 = 5; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 5 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 8") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_8(void) +{ + asm volatile (" \ + r1 = 1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 9") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_9(void) +{ + asm volatile (" \ + r1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 10") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_10(void) +{ + asm volatile (" \ + r1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 11") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_11(void) +{ + asm volatile (" \ + r1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 12") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_12(void) +{ + asm volatile (" \ + r1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 1") +__success __success_unpriv __retval(4) +__naked void masking_test_in_bounds_1(void) +{ + asm volatile (" \ + w1 = 4; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 5 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 2") +__success __success_unpriv __retval(0) +__naked void masking_test_in_bounds_2(void) +{ + asm volatile (" \ + w1 = 0; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 3") +__success __success_unpriv __retval(0xfffffffe) +__naked void masking_test_in_bounds_3(void) +{ + asm volatile (" \ + w1 = 0xfffffffe; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 4") +__success __success_unpriv __retval(0xabcde) +__naked void masking_test_in_bounds_4(void) +{ + asm volatile (" \ + w1 = 0xabcde; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xabcdef - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 5") +__success __success_unpriv __retval(0) +__naked void masking_test_in_bounds_5(void) +{ + asm volatile (" \ + w1 = 0; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 6") +__success __success_unpriv __retval(46) +__naked void masking_test_in_bounds_6(void) +{ + asm volatile (" \ + w1 = 46; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 47 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 7") +__success __success_unpriv __retval(46) +__naked void masking_test_in_bounds_7(void) +{ + asm volatile (" \ + r3 = -46; \ + r3 *= -1; \ + w2 = %[__imm_0]; \ + r2 -= r3; \ + r2 |= r3; \ + r2 = -r2; \ + r2 s>>= 63; \ + r3 &= r2; \ + r0 = r3; \ + exit; \ +" : + : __imm_const(__imm_0, 47 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 8") +__success __success_unpriv __retval(0) +__naked void masking_test_in_bounds_8(void) +{ + asm volatile (" \ + r3 = -47; \ + r3 *= -1; \ + w2 = %[__imm_0]; \ + r2 -= r3; \ + r2 |= r3; \ + r2 = -r2; \ + r2 s>>= 63; \ + r3 &= r2; \ + r0 = r3; \ + exit; \ +" : + : __imm_const(__imm_0, 47 - 1) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_meta_access.c b/tools/testing/selftests/bpf/progs/verifier_meta_access.c new file mode 100644 index 000000000000..d81722fb5f19 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_meta_access.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/meta_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("xdp") +__description("meta access, test1") +__success __retval(0) +__naked void meta_access_test1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test2") +__failure __msg("invalid access to packet, off=-8") +__naked void meta_access_test2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r2; \ + r0 -= 8; \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test3") +__failure __msg("invalid access to packet") +__naked void meta_access_test3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test4") +__failure __msg("invalid access to packet") +__naked void meta_access_test4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r4 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r4; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test5") +__failure __msg("R3 !read_ok") +__naked void meta_access_test5(void) +{ + asm volatile (" \ + r3 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r4 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r3; \ + r0 += 8; \ + if r0 > r4 goto l0_%=; \ + r2 = -8; \ + call %[bpf_xdp_adjust_meta]; \ + r0 = *(u8*)(r3 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_xdp_adjust_meta), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test6") +__failure __msg("invalid access to packet") +__naked void meta_access_test6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r3; \ + r0 += 8; \ + r4 = r2; \ + r4 += 8; \ + if r4 > r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test7") +__success __retval(0) +__naked void meta_access_test7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r3; \ + r0 += 8; \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test8") +__success __retval(0) +__naked void meta_access_test8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = r2; \ + r4 += 0xFFFF; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test9") +__failure __msg("invalid access to packet") +__naked void meta_access_test9(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = r2; \ + r4 += 0xFFFF; \ + r4 += 1; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test10") +__failure __msg("invalid access to packet") +__naked void meta_access_test10(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r5 = 42; \ + r6 = 24; \ + *(u64*)(r10 - 8) = r5; \ + lock *(u64 *)(r10 - 8) += r6; \ + r5 = *(u64*)(r10 - 8); \ + if r5 > 100 goto l0_%=; \ + r3 += r5; \ + r5 = r3; \ + r6 = r2; \ + r6 += 8; \ + if r6 > r5 goto l0_%=; \ + r2 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test11") +__success __retval(0) +__naked void meta_access_test11(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r5 = 42; \ + r6 = 24; \ + *(u64*)(r10 - 8) = r5; \ + lock *(u64 *)(r10 - 8) += r6; \ + r5 = *(u64*)(r10 - 8); \ + if r5 > 100 goto l0_%=; \ + r2 += r5; \ + r5 = r2; \ + r6 = r2; \ + r6 += 8; \ + if r6 > r3 goto l0_%=; \ + r5 = *(u8*)(r5 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test12") +__success __retval(0) +__naked void meta_access_test12(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r5 = r3; \ + r5 += 16; \ + if r5 > r4 goto l0_%=; \ + r0 = *(u8*)(r3 + 0); \ + r5 = r2; \ + r5 += 16; \ + if r5 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c new file mode 100644 index 000000000000..65bba330e7e5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" + +#include "bpf_misc.h" + +#include <bpf/bpf_endian.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +SEC("netfilter") +__description("netfilter invalid context access, size too short") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test1(void) +{ + asm volatile (" \ + r2 = *(u8*)(r1 + %[__bpf_nf_ctx_state]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_state, offsetof(struct bpf_nf_ctx, state)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context access, size too short") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test2(void) +{ + asm volatile (" \ + r2 = *(u16*)(r1 + %[__bpf_nf_ctx_skb]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context access, past end of ctx") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test3(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[__bpf_nf_ctx_size]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_size, sizeof(struct bpf_nf_ctx)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context, write") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test4(void) +{ + asm volatile (" \ + r2 = r1; \ + *(u64*)(r2 + 0) = r1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb)) + : __clobber_all); +} + +#define NF_DROP 0 +#define NF_ACCEPT 1 + +SEC("netfilter") +__description("netfilter valid context read and invalid write") +__failure __msg("only read is supported") +int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx) +{ + struct nf_hook_state *state = (void *)ctx->state; + + state->sk = NULL; + return NF_ACCEPT; +} + +extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, + struct bpf_dynptr *ptr__uninit) __ksym; +extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, + void *buffer, uint32_t buffer__sz) __ksym; + +SEC("netfilter") +__description("netfilter test prog with skb and state read access") +__success __failure_unpriv +__retval(0) +int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx) +{ + const struct nf_hook_state *state = ctx->state; + struct sk_buff *skb = ctx->skb; + const struct iphdr *iph; + const struct tcphdr *th; + u8 buffer_iph[20] = {}; + u8 buffer_th[40] = {}; + struct bpf_dynptr ptr; + uint8_t ihl; + + if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr)) + return NF_ACCEPT; + + iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph)); + if (!iph) + return NF_ACCEPT; + + if (state->pf != 2) + return NF_ACCEPT; + + ihl = iph->ihl << 2; + + th = bpf_dynptr_slice(&ptr, ihl, buffer_th, sizeof(buffer_th)); + if (!th) + return NF_ACCEPT; + + return th->dest == bpf_htons(22) ? NF_ACCEPT : NF_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c new file mode 100644 index 000000000000..353ae6da00e1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("netfilter") +__description("bpf_exit with invalid return code. test1") +__failure __msg("R0 is not a known value") +__naked void with_invalid_return_code_test1(void) +{ + asm volatile (" \ + r0 = *(u64*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with valid return code. test2") +__success +__naked void with_valid_return_code_test2(void) +{ + asm volatile (" \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with valid return code. test3") +__success +__naked void with_valid_return_code_test3(void) +{ + asm volatile (" \ + r0 = 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with invalid return code. test4") +__failure __msg("R0 has value (0x2; 0x0)") +__naked void with_invalid_return_code_test4(void) +{ + asm volatile (" \ + r0 = 2; \ + exit; \ +" ::: __clobber_all); +} diff --git a/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c new file mode 100644 index 000000000000..8d27c780996f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/prevent_map_lookup.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} map_stacktrace SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps"); + +SEC("perf_event") +__description("prevent map lookup in stack trace") +__failure __msg("cannot pass map_type 7 into func bpf_map_lookup_elem") +__naked void map_lookup_in_stack_trace(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_stacktrace] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_stacktrace) + : __clobber_all); +} + +SEC("socket") +__description("prevent map lookup in prog array") +__failure __msg("cannot pass map_type 3 into func bpf_map_lookup_elem") +__failure_unpriv +__naked void map_lookup_in_prog_array(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_prog2_socket] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c new file mode 100644 index 000000000000..efbfc3a4ad6a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("tc") +__description("raw_stack: no skb_load_bytes") +__failure __msg("invalid read from stack R6 off=-8 size=8") +__naked void stack_no_skb_load_bytes(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = 8; \ + /* Call to skb_load_bytes() omitted. */ \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, negative len") +__failure __msg("R4 min value is negative") +__naked void skb_load_bytes_negative_len(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = -8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, negative len 2") +__failure __msg("R4 min value is negative") +__naked void load_bytes_negative_len_2(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = %[__imm_0]; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__imm_0, ~0) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, zero len") +__failure __msg("invalid zero-sized read") +__naked void skb_load_bytes_zero_len(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = 0; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, no init") +__success __retval(0) +__naked void skb_load_bytes_no_init(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, init") +__success __retval(0) +__naked void stack_skb_load_bytes_init(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = 0xcafe; \ + *(u64*)(r6 + 0) = r3; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs around bounds") +__success __retval(0) +__naked void bytes_spilled_regs_around_bounds(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -16; \ + *(u64*)(r6 - 8) = r1; \ + *(u64*)(r6 + 8) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 - 8); \ + r2 = *(u64*)(r6 + 8); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ + r0 += r2; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs corruption") +__failure __msg("R0 invalid mem access 'scalar'") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void load_bytes_spilled_regs_corruption(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs corruption 2") +__failure __msg("R3 invalid mem access 'scalar'") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bytes_spilled_regs_corruption_2(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -16; \ + *(u64*)(r6 - 8) = r1; \ + *(u64*)(r6 + 0) = r1; \ + *(u64*)(r6 + 8) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 - 8); \ + r2 = *(u64*)(r6 + 8); \ + r3 = *(u64*)(r6 + 0); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ + r0 += r2; \ + r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \ + r0 += r3; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs + data") +__success __retval(0) +__naked void load_bytes_spilled_regs_data(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -16; \ + *(u64*)(r6 - 8) = r1; \ + *(u64*)(r6 + 0) = r1; \ + *(u64*)(r6 + 8) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 - 8); \ + r2 = *(u64*)(r6 + 8); \ + r3 = *(u64*)(r6 + 0); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ + r0 += r2; \ + r0 += r3; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 1") +__failure __msg("invalid indirect access to stack R3 off=-513 size=8") +__naked void load_bytes_invalid_access_1(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -513; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 2") +__failure __msg("invalid indirect access to stack R3 off=-1 size=8") +__naked void load_bytes_invalid_access_2(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 3") +__failure __msg("R4 min value is negative") +__naked void load_bytes_invalid_access_3(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += 0xffffffff; \ + r3 = r6; \ + r4 = 0xffffffff; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 4") +__failure +__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'") +__naked void load_bytes_invalid_access_4(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -1; \ + r3 = r6; \ + r4 = 0x7fffffff; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 5") +__failure +__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'") +__naked void load_bytes_invalid_access_5(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -512; \ + r3 = r6; \ + r4 = 0x7fffffff; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 6") +__failure __msg("invalid zero-sized read") +__naked void load_bytes_invalid_access_6(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -512; \ + r3 = r6; \ + r4 = 0; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, large access") +__success __retval(0) +__naked void skb_load_bytes_large_access(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -512; \ + r3 = r6; \ + r4 = 512; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c b/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c new file mode 100644 index 000000000000..14a0172e2141 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/raw_tp_writable.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("raw_tracepoint.w") +__description("raw_tracepoint_writable: reject variable offset") +__failure +__msg("R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void tracepoint_writable_reject_variable_offset(void) +{ + asm volatile (" \ + /* r6 is our tp buffer */ \ + r6 = *(u64*)(r1 + 0); \ + r1 = %[map_hash_8b] ll; \ + /* move the key (== 0) to r10-8 */ \ + w0 = 0; \ + r2 = r10; \ + r2 += -8; \ + *(u64*)(r2 + 0) = r0; \ + /* lookup in the map */ \ + call %[bpf_map_lookup_elem]; \ + /* exit clean if null */ \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* shift the buffer pointer to a variable location */\ + r0 = *(u32*)(r0 + 0); \ + r6 += r0; \ + /* clobber whatever's there */ \ + r7 = 4242; \ + *(u64*)(r6 + 0) = r7; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c new file mode 100644 index 000000000000..c4c6da21265e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -0,0 +1,1495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#define BPF_SK_LOOKUP(func) \ + /* struct bpf_sock_tuple tuple = {} */ \ + "r2 = 0;" \ + "*(u32*)(r10 - 8) = r2;" \ + "*(u64*)(r10 - 16) = r2;" \ + "*(u64*)(r10 - 24) = r2;" \ + "*(u64*)(r10 - 32) = r2;" \ + "*(u64*)(r10 - 40) = r2;" \ + "*(u64*)(r10 - 48) = r2;" \ + /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ + "r2 = r10;" \ + "r2 += -48;" \ + "r3 = %[sizeof_bpf_sock_tuple];"\ + "r4 = 0;" \ + "r5 = 0;" \ + "call %[" #func "];" + +struct bpf_key {} __attribute__((preserve_access_index)); + +extern void bpf_key_put(struct bpf_key *key) __ksym; +extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; + +/* BTF FUNC records are not generated for kfuncs referenced + * from inline assembly. These records are necessary for + * libbpf to link the program. The function below is a hack + * to ensure that BTF FUNC records are generated. + */ +void __kfunc_btf_root(void) +{ + bpf_key_put(0); + bpf_lookup_system_key(0); + bpf_lookup_user_key(0, 0); +} + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +void dummy_prog_42_tc(void); +void dummy_prog_24_tc(void); +void dummy_prog_loop1_tc(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_tc SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_tc, + [1] = (void *)&dummy_prog_loop1_tc, + [2] = (void *)&dummy_prog_24_tc, + }, +}; + +SEC("tc") +__auxiliary +__naked void dummy_prog_42_tc(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("tc") +__auxiliary +__naked void dummy_prog_24_tc(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("tc") +__auxiliary +__naked void dummy_prog_loop1_tc(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_tc] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_tc) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference") +__failure __msg("Unreleased reference") +__naked void reference_tracking_leak_potential_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference to sock_common") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_sock_common_1(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r6 = r0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference on stack") +__failure __msg("Unreleased reference") +__naked void leak_potential_reference_on_stack(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference on stack 2") +__failure __msg("Unreleased reference") +__naked void potential_reference_on_stack_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r0; \ + r0 = 0; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: zero potential reference") +__failure __msg("Unreleased reference") +__naked void reference_tracking_zero_potential_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r0 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: zero potential reference to sock_common") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_sock_common_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r0 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: copy and zero potential references") +__failure __msg("Unreleased reference") +__naked void copy_and_zero_potential_references(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r7 = r0; \ + r0 = 0; \ + r7 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: acquire/release user key reference") +__success +__naked void acquire_release_user_key_reference(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_key_put]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: acquire/release system key reference") +__success +__naked void acquire_release_system_key_reference(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_key_put]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release user key reference without check") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void user_key_reference_without_check(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + r1 = r0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release system key reference without check") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void system_key_reference_without_check(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + r1 = r0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release with NULL key pointer") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void release_with_null_key_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: leak potential reference to user key") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_user_key(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + exit; \ +" : + : __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: leak potential reference to system key") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_system_key(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + exit; \ +" : + : __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference without check") +__failure __msg("type=sock_or_null expected=sock") +__naked void tracking_release_reference_without_check(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* reference in r0 may be NULL */ \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference to sock_common without check") +__failure __msg("type=sock_common_or_null expected=sock") +__naked void to_sock_common_without_check(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" /* reference in r0 may be NULL */ \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference") +__success __retval(0) +__naked void reference_tracking_release_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference to sock_common") +__success __retval(0) +__naked void release_reference_to_sock_common(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference 2") +__success __retval(0) +__naked void reference_tracking_release_reference_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference twice") +__failure __msg("type=scalar expected=sock") +__naked void reference_tracking_release_reference_twice(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference twice inside branch") +__failure __msg("type=scalar expected=sock") +__naked void release_reference_twice_inside_branch(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; /* goto end */ \ + call %[bpf_sk_release]; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: alloc, check, free in one subbranch") +__failure __msg("Unreleased reference") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void check_free_in_one_subbranch(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 16; \ + /* if (offsetof(skb, mark) > data_len) exit; */ \ + if r0 <= r3 goto l0_%=; \ + exit; \ +l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r6 == 0 goto l1_%=; /* mark == 0? */\ + /* Leak reference in R0 */ \ + exit; \ +l1_%=: if r0 == 0 goto l2_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l2_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: alloc, check, free in both subbranches") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void check_free_in_both_subbranches(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 16; \ + /* if (offsetof(skb, mark) > data_len) exit; */ \ + if r0 <= r3 goto l0_%=; \ + exit; \ +l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r6 == 0 goto l1_%=; /* mark == 0? */\ + if r0 == 0 goto l2_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l2_%=: exit; \ +l1_%=: if r0 == 0 goto l3_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l3_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: free reference in subprog") +__success __retval(0) +__naked void call_free_reference_in_subprog(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; /* unchecked reference */ \ + call call_free_reference_in_subprog__1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void call_free_reference_in_subprog__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r2 = r1; \ + if r2 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: free reference in subprog and outside") +__failure __msg("type=scalar expected=sock") +__naked void reference_in_subprog_and_outside(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; /* unchecked reference */ \ + r6 = r0; \ + call reference_in_subprog_and_outside__1; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void reference_in_subprog_and_outside__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r2 = r1; \ + if r2 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: alloc & leak reference in subprog") +__failure __msg("Unreleased reference") +__naked void alloc_leak_reference_in_subprog(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call alloc_leak_reference_in_subprog__1; \ + r1 = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void alloc_leak_reference_in_subprog__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r6 = r4; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* spill unchecked sk_ptr into stack of caller */\ + *(u64*)(r6 + 0) = r0; \ + r1 = r0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: alloc in subprog, release outside") +__success __retval(POINTER_VALUE) +__naked void alloc_in_subprog_release_outside(void) +{ + asm volatile (" \ + r4 = r10; \ + call alloc_in_subprog_release_outside__1; \ + r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void alloc_in_subprog_release_outside__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; /* return sk */ \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: sk_ptr leak into caller stack") +__failure __msg("Unreleased reference") +__naked void ptr_leak_into_caller_stack(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call ptr_leak_into_caller_stack__1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_leak_into_caller_stack__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r5 = r10; \ + r5 += -8; \ + *(u64*)(r5 + 0) = r4; \ + call ptr_leak_into_caller_stack__2; \ + /* spill unchecked sk_ptr into stack of caller */\ + r5 = r10; \ + r5 += -8; \ + r4 = *(u64*)(r5 + 0); \ + *(u64*)(r4 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_leak_into_caller_stack__2(void) +{ + asm volatile (" \ + /* subprog 2 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: sk_ptr spill into caller stack") +__success __retval(0) +__naked void ptr_spill_into_caller_stack(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call ptr_spill_into_caller_stack__1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_spill_into_caller_stack__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r5 = r10; \ + r5 += -8; \ + *(u64*)(r5 + 0) = r4; \ + call ptr_spill_into_caller_stack__2; \ + /* spill unchecked sk_ptr into stack of caller */\ + r5 = r10; \ + r5 += -8; \ + r4 = *(u64*)(r5 + 0); \ + *(u64*)(r4 + 0) = r0; \ + if r0 == 0 goto l0_%=; \ + /* now the sk_ptr is verified, free the reference */\ + r1 = *(u64*)(r4 + 0); \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_spill_into_caller_stack__2(void) +{ + asm volatile (" \ + /* subprog 2 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: allow LD_ABS") +__success __retval(0) +__naked void reference_tracking_allow_ld_abs(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r0 = *(u8*)skb[0]; \ + r0 = *(u16*)skb[0]; \ + r0 = *(u32*)skb[0]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: forbid LD_ABS while holding reference") +__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__naked void ld_abs_while_holding_reference(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r0 = *(u8*)skb[0]; \ + r0 = *(u16*)skb[0]; \ + r0 = *(u32*)skb[0]; \ + r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: allow LD_IND") +__success __retval(1) +__naked void reference_tracking_allow_ld_ind(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), + __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: forbid LD_IND while holding reference") +__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__naked void ld_ind_while_holding_reference(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r0; \ + r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + r1 = r4; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), + __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: check reference or tail call") +__success __retval(0) +__naked void check_reference_or_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* if (sk) bpf_sk_release() */ \ + r1 = r0; \ + if r1 != 0 goto l0_%=; \ + /* bpf_tail_call() */ \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference then tail call") +__success __retval(0) +__naked void release_reference_then_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* if (sk) bpf_sk_release() */ \ + r1 = r0; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: /* bpf_tail_call() */ \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak possible reference over tail call") +__failure __msg("tail_call would lead to reference leak") +__naked void possible_reference_over_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ + /* Look up socket and store in REG_6 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* bpf_tail_call() */ \ + r6 = r0; \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + /* if (sk) bpf_sk_release() */ \ + r1 = r6; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak checked reference over tail call") +__failure __msg("tail_call would lead to reference leak") +__naked void checked_reference_over_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ + /* Look up socket and store in REG_6 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + /* if (!sk) goto end */ \ + if r0 == 0 goto l0_%=; \ + /* bpf_tail_call() */ \ + r3 = 0; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + r1 = r6; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: mangle and release sock_or_null") +__failure __msg("R1 pointer arithmetic on sock_or_null prohibited") +__naked void and_release_sock_or_null(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r1 += 5; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: mangle and release sock") +__failure __msg("R1 pointer arithmetic on sock prohibited") +__naked void tracking_mangle_and_release_sock(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 += 5; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: access member") +__success __retval(0) +__naked void reference_tracking_access_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u32*)(r0 + 4); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: write to member") +__failure __msg("cannot write into sock") +__naked void reference_tracking_write_to_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 42 ll; \ + *(u32*)(r1 + %[bpf_sock_mark]) = r2; \ + r1 = r6; \ +l0_%=: call %[bpf_sk_release]; \ + r0 = 0 ll; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: invalid 64-bit access of member") +__failure __msg("invalid sock access off=0 size=8") +__naked void _64_bit_access_of_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u64*)(r0 + 0); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: access after release") +__failure __msg("!read_ok") +__naked void reference_tracking_access_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ + r2 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: direct access for lookup") +__success __retval(0) +__naked void tracking_direct_access_for_lookup(void) +{ + asm volatile (" \ + /* Check that the packet is at least 64B long */\ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 64; \ + if r0 > r3 goto l0_%=; \ + /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ \ + r3 = %[sizeof_bpf_sock_tuple]; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_sk_lookup_tcp]; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u32*)(r0 + 4); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_tcp_sock() after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bpf_tcp_sock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_sk_fullsock() after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bpf_sk_fullsock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void sk_fullsock_tp_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_fullsock]; \ + r1 = r6; \ + r6 = r0; \ + call %[bpf_sk_release]; \ + if r6 != 0 goto l2_%=; \ + exit; \ +l2_%=: r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use sk after bpf_sk_release(tp)") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void after_bpf_sk_release_tp(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)") +__success __retval(0) +__naked void after_bpf_sk_release_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_get_listener_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r6; \ + r6 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_src_port]); \ + exit; \ +" : + : __imm(bpf_get_listener_sock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: bpf_sk_release(listen_sk)") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_release_listen_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_get_listener_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_get_listener_sock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ +SEC("tc") +__description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)") +__failure __msg("invalid mem access") +__naked void and_bpf_tcp_sock_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + r1 = r6; \ + call %[bpf_tcp_sock]; \ + r8 = r0; \ + if r7 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]); \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: branch tracking valid pointer null comparison") +__success __retval(0) +__naked void tracking_valid_pointer_null_comparison(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + r3 = 1; \ + if r6 != 0 goto l0_%=; \ + r3 = 0; \ +l0_%=: if r6 == 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l1_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: branch tracking valid pointer value comparison") +__failure __msg("Unreleased reference") +__naked void tracking_valid_pointer_value_comparison(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + r3 = 1; \ + if r6 == 0 goto l0_%=; \ + r3 = 0; \ + if r6 == 1234 goto l0_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: bpf_sk_release(btf_tcp_sock)") +__success +__retval(0) +__naked void sk_release_btf_tcp_sock(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_skc_to_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_skc_to_tcp_sock), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release") +__failure __msg("invalid mem access") +__naked void to_tcp_sock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_skc_to_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u8*)(r7 + 0); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_skc_to_tcp_sock), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("socket") +__description("reference tracking: try to leak released ptr reg") +__success __failure_unpriv __msg_unpriv("R8 !read_ok") +__retval(0) +__naked void to_leak_released_ptr_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u32*)(r10 - 4) = r0; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r9 = r0; \ + r0 = 0; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r8 = r0; \ + r1 = r8; \ + r2 = 0; \ + call %[bpf_ringbuf_discard]; \ + r0 = 0; \ + *(u64*)(r9 + 0) = r8; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_ringbuf_discard), + __imm(bpf_ringbuf_reserve), + __imm_addr(map_array_48b), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_reg_equal.c b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c new file mode 100644 index 000000000000..dc1d8c30fb0e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("socket") +__description("check w reg equal if r reg upper32 bits 0") +__success +__naked void subreg_equality_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64 *)(r10 - 8) = r0; \ + r2 = *(u32 *)(r10 - 8); \ + /* At this point upper 4-bytes of r2 are 0, \ + * thus insn w3 = w2 should propagate reg id, \ + * and w2 < 9 comparison would also propagate \ + * the range for r3. \ + */ \ + w3 = w2; \ + if w2 < 9 goto l0_%=; \ + exit; \ +l0_%=: if r3 < 9 goto l1_%=; \ + /* r1 read is illegal at this point */ \ + r0 -= r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check w reg not equal if r reg upper32 bits not 0") +__failure __msg("R1 !read_ok") +__naked void subreg_equality_2(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + r2 = r0; \ + /* Upper 4-bytes of r2 may not be 0, thus insn \ + * w3 = w2 should not propagate reg id, and \ + * w2 < 9 comparison should not propagate \ + * the range for r3 either. \ + */ \ + w3 = w2; \ + if w2 < 9 goto l0_%=; \ + exit; \ +l0_%=: if r3 < 9 goto l1_%=; \ + /* r1 read is illegal at this point */ \ + r0 -= r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_regalloc.c b/tools/testing/selftests/bpf/progs/verifier_regalloc.c new file mode 100644 index 000000000000..ee5ddea87c91 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_regalloc.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/regalloc.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("tracepoint") +__description("regalloc basic") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_basic(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc negative") +__failure __msg("invalid access to map value, value_size=48 off=48 size=1") +__naked void regalloc_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 24 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u8*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc src_reg mark") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_src_reg_mark(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + r3 = 0; \ + if r3 s>= r2 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc src_reg negative") +__failure __msg("invalid access to map value, value_size=48 off=44 size=8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_src_reg_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 22 goto l0_%=; \ + r3 = 0; \ + if r3 s>= r2 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc and spill") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_and_spill(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + /* r0 has upper bound that should propagate into r2 */\ + *(u64*)(r10 - 8) = r2; /* spill r2 */ \ + r0 = 0; \ + r2 = 0; /* clear r0 and r2 */\ + r3 = *(u64*)(r10 - 8); /* fill r3 */ \ + if r0 s>= r3 goto l0_%=; \ + /* r3 has lower and upper bounds */ \ + r7 += r3; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc and spill negative") +__failure __msg("invalid access to map value, value_size=48 off=48 size=8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_and_spill_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 48 goto l0_%=; \ + /* r0 has upper bound that should propagate into r2 */\ + *(u64*)(r10 - 8) = r2; /* spill r2 */ \ + r0 = 0; \ + r2 = 0; /* clear r0 and r2 */\ + r3 = *(u64*)(r10 - 8); /* fill r3 */\ + if r0 s>= r3 goto l0_%=; \ + /* r3 has lower and upper bounds */ \ + r7 += r3; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc three regs") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_three_regs(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + r4 = r2; \ + if r0 s> 12 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r7 += r4; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc after call") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_after_call(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r8 = r0; \ + r9 = r0; \ + call regalloc_after_call__1; \ + if r8 s> 20 goto l0_%=; \ + if r9 s< 0 goto l0_%=; \ + r7 += r8; \ + r7 += r9; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void regalloc_after_call__1(void) +{ + asm volatile (" \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("regalloc in callee") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_in_callee(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r3 = r7; \ + call regalloc_in_callee__1; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void regalloc_in_callee__1(void) +{ + asm volatile (" \ + if r1 s> 20 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r3 += r1; \ + r3 += r2; \ + r0 = *(u64*)(r3 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("regalloc, spill, JEQ") +__success +__naked void regalloc_spill_jeq(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + *(u64*)(r10 - 8) = r0; /* spill r0 */ \ + if r0 == 0 goto l0_%=; \ +l0_%=: /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */\ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r2 == 20 goto l1_%=; \ +l1_%=: /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */\ + r3 = *(u64*)(r10 - 8); /* fill r3 with map_value */\ + if r3 == 0 goto l2_%=; /* skip ldx if map_value == NULL */\ + /* Buggy verifier will think that r3 == 20 here */\ + r0 = *(u64*)(r3 + 0); /* read from map_value */\ +l2_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ringbuf.c b/tools/testing/selftests/bpf/progs/verifier_ringbuf.c new file mode 100644 index 000000000000..ae1d521f326c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ringbuf.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ringbuf.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +SEC("socket") +__description("ringbuf: invalid reservation offset 1") +__failure __msg("R1 must have zero offset when passed to release func") +__failure_unpriv +__naked void ringbuf_invalid_reservation_offset_1(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* spill R6(mem) into the stack */ \ + *(u64*)(r10 - 8) = r6; \ + /* fill it back in R7 */ \ + r7 = *(u64*)(r10 - 8); \ + /* should be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u64*)(r7 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r7; \ + /* add invalid offset to reserved ringbuf memory */\ + r1 += 0xcafe; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("socket") +__description("ringbuf: invalid reservation offset 2") +__failure __msg("R7 min value is outside of the allowed memory range") +__failure_unpriv +__naked void ringbuf_invalid_reservation_offset_2(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* spill R6(mem) into the stack */ \ + *(u64*)(r10 - 8) = r6; \ + /* fill it back in R7 */ \ + r7 = *(u64*)(r10 - 8); \ + /* add invalid offset to reserved ringbuf memory */\ + r7 += 0xcafe; \ + /* should be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u64*)(r7 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r7; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("xdp") +__description("ringbuf: check passing rb mem to helpers") +__success __retval(0) +__naked void passing_rb_mem_to_helpers(void) +{ + asm volatile (" \ + r6 = r1; \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + r7 = r0; \ + /* check whether the reservation was successful */\ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* pass allocated ring buffer memory to fib lookup */\ + r1 = r6; \ + r2 = r0; \ + r3 = 8; \ + r4 = 0; \ + call %[bpf_fib_lookup]; \ + /* submit the ringbuf memory */ \ + r1 = r7; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_fib_lookup), + __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c new file mode 100644 index 000000000000..27ebfc1fd9ee --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/runtime_jit.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); +void dummy_prog_loop2_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps") = { + .values = { + [1] = (void *)&dummy_prog_loop2_socket, + [2] = (void *)&dummy_prog_24_socket, + [7] = (void *)&dummy_prog_42_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop2_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog2_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, prog once") +__success __success_unpriv __retval(42) +__naked void call_within_bounds_prog_once(void) +{ + asm volatile (" \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, prog loop") +__success __success_unpriv __retval(41) +__naked void call_within_bounds_prog_loop(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, no prog") +__success __success_unpriv __retval(1) +__naked void call_within_bounds_no_prog(void) +{ + asm volatile (" \ + r3 = 3; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2") +__success __success_unpriv __retval(24) +__naked void call_within_bounds_key_2(void) +{ + asm volatile (" \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2 / key 2, first branch") +__success __success_unpriv __retval(24) +__naked void _2_key_2_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2 / key 2, second branch") +__success __success_unpriv __retval(24) +__naked void _2_key_2_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 0 / key 2, first branch") +__success __success_unpriv __retval(24) +__naked void _0_key_2_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 0 / key 2, second branch") +__success __success_unpriv __retval(42) +__naked void _0_key_2_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, different maps, first branch") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(1) +__naked void bounds_different_maps_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 0; \ + r2 = %[map_prog2_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, different maps, second branch") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(42) +__naked void bounds_different_maps_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 0; \ + r2 = %[map_prog2_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call out of bounds") +__success __success_unpriv __retval(2) +__naked void tail_call_out_of_bounds(void) +{ + asm volatile (" \ + r3 = 256; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: pass negative index to tail_call") +__success __success_unpriv __retval(2) +__naked void negative_index_to_tail_call(void) +{ + asm volatile (" \ + r3 = -1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: pass > 32bit index to tail_call") +__success __success_unpriv __retval(42) +/* Verifier rewrite for unpriv skips tail call here. */ +__retval_unpriv(2) +__naked void _32bit_index_to_tail_call(void) +{ + asm volatile (" \ + r3 = 0x100000000 ll; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c new file mode 100644 index 000000000000..5a14498d352f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("pointer/scalar confusion in state equality check (way 1)") +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") +__retval(POINTER_VALUE) +__naked void state_equality_check_way_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l0_%=: r0 = r10; \ +l1_%=: goto l2_%=; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("pointer/scalar confusion in state equality check (way 2)") +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") +__retval(POINTER_VALUE) +__naked void state_equality_check_way_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = r10; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r0 + 0); \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("liveness pruning and write screening") +__failure __msg("R0 !read_ok") +__naked void liveness_pruning_and_write_screening(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* branch conditions teach us nothing about R2 */\ + if r2 >= 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r2 >= 0 goto l1_%=; \ + r0 = 0; \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("varlen_map_value_access pruning") +__failure __msg("R0 unbounded memory access") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void varlen_map_value_access_pruning(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + w2 = %[max_entries]; \ + if r2 s> r1 goto l1_%=; \ + w1 = 0; \ +l1_%=: w1 <<= 2; \ + r0 += r1; \ + goto l2_%=; \ +l2_%=: r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("search pruning: all branches should be verified (nop operation)") +__failure __msg("R6 invalid mem access 'scalar'") +__naked void should_be_verified_nop_operation(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r0 + 0); \ + if r3 == 0xbeef goto l1_%=; \ + r4 = 0; \ + goto l2_%=; \ +l1_%=: r4 = 1; \ +l2_%=: *(u64*)(r10 - 16) = r4; \ + call %[bpf_ktime_get_ns]; \ + r5 = *(u64*)(r10 - 16); \ + if r5 == 0 goto l0_%=; \ + r6 = 0; \ + r1 = 0xdead; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("search pruning: all branches should be verified (invalid stack access)") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -16+0 size 8") +__retval(0) +__naked void be_verified_invalid_stack_access(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r0 + 0); \ + r4 = 0; \ + if r3 == 0xbeef goto l1_%=; \ + *(u64*)(r10 - 16) = r4; \ + goto l2_%=; \ +l1_%=: *(u64*)(r10 - 24) = r4; \ +l2_%=: call %[bpf_ktime_get_ns]; \ + r5 = *(u64*)(r10 - 16); \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("precision tracking for u32 spill/fill") +__failure __msg("R0 min value is outside of the allowed memory range") +__naked void tracking_for_u32_spill_fill(void) +{ + asm volatile (" \ + r7 = r1; \ + call %[bpf_get_prandom_u32]; \ + w6 = 32; \ + if r0 == 0 goto l0_%=; \ + w6 = 4; \ +l0_%=: /* Additional insns to introduce a pruning point. */\ + call %[bpf_get_prandom_u32]; \ + r3 = 0; \ + r3 = 0; \ + if r0 == 0 goto l1_%=; \ + r3 = 0; \ +l1_%=: /* u32 spill/fill */ \ + *(u32*)(r10 - 8) = r6; \ + r8 = *(u32*)(r10 - 8); \ + /* out-of-bound map value access for r6=32 */ \ + r1 = 0; \ + *(u64*)(r10 - 16) = r1; \ + r2 = r10; \ + r2 += -16; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r0 += r8; \ + r1 = *(u32*)(r0 + 0); \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("precision tracking for u32 spills, u64 fill") +__failure __msg("div by zero") +__naked void for_u32_spills_u64_fill(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + w7 = 0xffffffff; \ + /* Additional insns to introduce a pruning point. */\ + r3 = 1; \ + r3 = 1; \ + r3 = 1; \ + r3 = 1; \ + call %[bpf_get_prandom_u32]; \ + if r0 == 0 goto l0_%=; \ + r3 = 1; \ +l0_%=: w3 /= 0; \ + /* u32 spills, u64 fill */ \ + *(u32*)(r10 - 4) = r6; \ + *(u32*)(r10 - 8) = r7; \ + r8 = *(u64*)(r10 - 8); \ + /* if r8 != X goto pc+1 r8 known in fallthrough branch */\ + if r8 != 0xffffffff goto l1_%=; \ + r3 = 1; \ +l1_%=: /* if r8 == X goto pc+1 condition always true on first\ + * traversal, so starts backtracking to mark r8 as requiring\ + * precision. r7 marked as needing precision. r6 not marked\ + * since it's not tracked. \ + */ \ + if r8 == 0xffffffff goto l2_%=; \ + /* fails if r8 correctly marked unknown after fill. */\ + w3 /= 0; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("allocated_stack") +__success __msg("processed 15 insns") +__success_unpriv __msg_unpriv("") __log_level(1) __retval(0) +__naked void allocated_stack(void) +{ + asm volatile (" \ + r6 = r1; \ + call %[bpf_get_prandom_u32]; \ + r7 = r0; \ + if r0 == 0 goto l0_%=; \ + r0 = 0; \ + *(u64*)(r10 - 8) = r6; \ + r6 = *(u64*)(r10 - 8); \ + *(u8*)(r10 - 9) = r7; \ + r7 = *(u8*)(r10 - 9); \ +l0_%=: if r0 != 0 goto l1_%=; \ +l1_%=: if r0 != 0 goto l2_%=; \ +l2_%=: if r0 != 0 goto l3_%=; \ +l3_%=: if r0 != 0 goto l4_%=; \ +l4_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* The test performs a conditional 64-bit write to a stack location + * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], + * then data is read from fp[-8]. This sequence is unsafe. + * + * The test would be mistakenly marked as safe w/o dst register parent + * preservation in verifier.c:copy_register_state() function. + * + * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the + * checkpoint state after conditional 64-bit assignment. + */ + +SEC("socket") +__description("write tracking and register parent chain bug") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -8+1 size 8") +__retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void and_register_parent_chain_bug(void) +{ + asm volatile (" \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* r0 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + /* if r0 > r6 goto +1 */ \ + if r0 > r6 goto l0_%=; \ + /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \ + r0 = 0xdeadbeef; \ + *(u64*)(r10 - 8) = r0; \ +l0_%=: r1 = 42; \ + *(u8*)(r10 - 8) = r1; \ + r2 = *(u64*)(r10 - 8); \ + /* exit(0) */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c new file mode 100644 index 000000000000..ee76b51005ab --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_sock.c @@ -0,0 +1,980 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/sock.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) + +struct { + __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} map_reuseport_array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_sockhash SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_sockmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_xskmap SEC(".maps"); + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(max_entries, 0); + __type(key, int); + __type(value, struct val); + __uint(map_flags, BPF_F_NO_PREALLOC); +} sk_storage_map SEC(".maps"); + +SEC("cgroup/skb") +__description("skb->sk: no NULL check") +__failure __msg("invalid mem access 'sock_common_or_null'") +__failure_unpriv +__naked void skb_sk_no_null_check(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + r0 = *(u32*)(r1 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("skb->sk: sk->family [non fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_family_non_fullsock_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("skb->sk: sk->type [fullsock field]") +__failure __msg("invalid sock_common access") +__failure_unpriv +__naked void sk_sk_type_fullsock_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_sk_fullsock(skb->sk): no !skb->sk check") +__failure __msg("type=sock_common_or_null expected=sock_common") +__failure_unpriv +__naked void sk_no_skb_sk_check_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + call %[bpf_sk_fullsock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): no NULL check on ret") +__failure __msg("invalid mem access 'sock_or_null'") +__failure_unpriv +__naked void no_null_check_on_ret_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->type [fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_sk_type_fullsock_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->family [non fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_family_non_fullsock_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->state [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_state_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)") +__success __success_unpriv __retval(0) +__naked void port_word_load_backward_compatibility(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [half load]") +__success __success_unpriv __retval(0) +__naked void sk_dst_port_half_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_half_load_invalid_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [byte load]") +__success __success_unpriv __retval(0) +__naked void sk_dst_port_byte_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \ + r2 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_byte_load_invalid(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_half_load_invalid_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]") +__success __success_unpriv __retval(0) +__naked void dst_ip6_load_2nd_byte(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->type [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_type_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->protocol [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_protocol_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): beyond last field") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void skb_sk_beyond_last_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): no !skb->sk check") +__failure __msg("type=sock_common_or_null expected=sock_common") +__failure_unpriv +__naked void sk_no_skb_sk_check_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + call %[bpf_tcp_sock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): no NULL check on ret") +__failure __msg("invalid mem access 'tcp_sock_or_null'") +__failure_unpriv +__naked void no_null_check_on_ret_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_snd_cwnd_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): tp->bytes_acked") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_bytes_acked(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): beyond last field") +__failure __msg("invalid tcp_sock access") +__failure_unpriv +__naked void skb_sk_beyond_last_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_snd_cwnd_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l2_%=; \ + exit; \ +l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(skb->sk)") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_release_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_fullsock_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_release), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(bpf_tcp_sock(skb->sk))") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_tcp_sock_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL") +__success __retval(0) +__naked void sk_null_0_value_null(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 0; \ + r3 = 0; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, 1, 1): value == 1") +__failure __msg("R3 type=scalar expected=fp") +__naked void sk_1_1_value_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 1; \ + r3 = 1; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value") +__success __retval(0) +__naked void stack_value_1_stack_value(void) +{ + asm volatile (" \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 1; \ + r3 = r10; \ + r3 += -8; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_map_lookup_elem(smap, &key)") +__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem") +__naked void map_lookup_elem_smap_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(sk_storage_map) + : __clobber_all); +} + +SEC("xdp") +__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id") +__success __retval(0) +__naked void xskmap_key_xs_queue_id(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_xskmap] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap), + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockmap, &key)") +__failure __msg("Unreleased reference id=2 alloc_insn=6") +__naked void map_lookup_elem_sockmap_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockmap] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockhash, &key)") +__failure __msg("Unreleased reference id=2 alloc_insn=6") +__naked void map_lookup_elem_sockhash_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockhash] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_sockhash) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)") +__success +__naked void field_bpf_sk_release_sk_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockmap] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = r0; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_sk_release), + __imm_addr(map_sockmap), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)") +__success +__naked void field_bpf_sk_release_sk_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockhash] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = r0; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_sk_release), + __imm_addr(map_sockhash), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)") +__success +__naked void ctx_reuseport_array_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_reuseport_array] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_reuseport_array) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)") +__success +__naked void reuseport_ctx_sockmap_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_sockmap] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)") +__success +__naked void reuseport_ctx_sockhash_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_sockmap] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("tc") +__description("mark null check on return value of bpf_skc_to helpers") +__failure __msg("invalid mem access") +__naked void of_bpf_skc_to_helpers(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r6 = r1; \ + call %[bpf_skc_to_tcp_sock]; \ + r7 = r0; \ + r1 = r6; \ + call %[bpf_skc_to_tcp_request_sock]; \ + r8 = r0; \ + if r8 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r7 + 0); \ + exit; \ +" : + : __imm(bpf_skc_to_tcp_request_sock), + __imm(bpf_skc_to_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c new file mode 100644 index 000000000000..136e5530b72c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +SEC("socket") +__description("check valid spill/fill") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(POINTER_VALUE) +__naked void check_valid_spill_fill(void) +{ + asm volatile (" \ + /* spill R1(ctx) into stack */ \ + *(u64*)(r10 - 8) = r1; \ + /* fill it back into R2 */ \ + r2 = *(u64*)(r10 - 8); \ + /* should be able to access R0 = *(R2 + 8) */ \ + /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check valid spill/fill, skb mark") +__success __success_unpriv __retval(0) +__naked void valid_spill_fill_skb_mark(void) +{ + asm volatile (" \ + r6 = r1; \ + *(u64*)(r10 - 8) = r6; \ + r0 = *(u64*)(r10 - 8); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("check valid spill/fill, ptr to mem") +__success __success_unpriv __retval(0) +__naked void spill_fill_ptr_to_mem(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* spill R6(mem) into the stack */ \ + *(u64*)(r10 - 8) = r6; \ + /* fill it back in R7 */ \ + r7 = *(u64*)(r10 - 8); \ + /* should be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u64*)(r7 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r7; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("socket") +__description("check with invalid reg offset 0") +__failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited") +__failure_unpriv +__naked void with_invalid_reg_offset_0(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* add invalid offset to memory or NULL */ \ + r0 += 1; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* should not be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u32*)(r6 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r6; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("socket") +__description("check corrupted spill/fill") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("attempt to corrupt spilled") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void check_corrupted_spill_fill(void) +{ + asm volatile (" \ + /* spill R1(ctx) into stack */ \ + *(u64*)(r10 - 8) = r1; \ + /* mess up with R1 pointer on stack */ \ + r0 = 0x23; \ + *(u8*)(r10 - 7) = r0; \ + /* fill back into R0 is fine for priv. \ + * R0 now becomes SCALAR_VALUE. \ + */ \ + r0 = *(u64*)(r10 - 8); \ + /* Load from R0 should fail. */ \ + r0 = *(u64*)(r0 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check corrupted spill/fill, LSB") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(POINTER_VALUE) +__naked void check_corrupted_spill_fill_lsb(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r1; \ + r0 = 0xcafe; \ + *(u16*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check corrupted spill/fill, MSB") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(POINTER_VALUE) +__naked void check_corrupted_spill_fill_msb(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r1; \ + r0 = 0x12345678; \ + *(u32*)(r10 - 4) = r0; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("Spill and refill a u32 const scalar. Offset to skb->data") +__success __retval(0) +__naked void scalar_offset_to_skb_data_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u32*)(r10 - 8); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("socket") +__description("Spill a u32 const, refill from another half of the uninit u32 from the stack") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -4+0 size 4") +__retval(0) +__naked void uninit_u32_from_the_stack(void) +{ + asm volatile (" \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \ + r4 = *(u32*)(r10 - 4); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void u16_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u16*)(r10 - 8); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill u32 const scalars. Refill as u64. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void u64_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w6 = 0; \ + w7 = 20; \ + *(u32*)(r10 - 4) = r6; \ + *(u32*)(r10 - 8) = r7; \ + r4 = *(u16*)(r10 - 8); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void _6_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u16*)(r10 - 6); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void addr_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + *(u32*)(r10 - 4) = r4; \ + r4 = *(u32*)(r10 - 4); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill and refill a umax=40 bounded scalar. Offset to skb->data") +__success __retval(0) +__naked void scalar_offset_to_skb_data_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \ + if r4 <= 40 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \ + *(u32*)(r10 - 8) = r4; \ + /* r4 = (*u32 *)(r10 - 8) */ \ + r4 = *(u32*)(r10 - 8); \ + /* r2 += r4 R2=pkt R4=umax=40 */ \ + r2 += r4; \ + /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \ + r0 = r2; \ + /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \ + r2 += 20; \ + /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\ + if r2 > r3 goto l1_%=; \ + /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\ + r0 = *(u32*)(r0 + 0); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) + : __clobber_all); +} + +SEC("tc") +__description("Spill a u32 scalar at fp-4 and then at fp-8") +__success __retval(0) +__naked void and_then_at_fp_8(void) +{ + asm volatile (" \ + w4 = 4321; \ + *(u32*)(r10 - 4) = r4; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u64*)(r10 - 8); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c new file mode 100644 index 000000000000..9c1aa69650f8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/spin_lock.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct val); +} map_spin_lock SEC(".maps"); + +SEC("cgroup/skb") +__description("spin_lock: test1 success") +__success __failure_unpriv __msg_unpriv("") +__retval(0) +__naked void spin_lock_test1_success(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test2 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__naked void lock_test2_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r1 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test3 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void lock_test3_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 1); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test4 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void lock_test4_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u16*)(r6 + 3); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test5 call within a locked region") +__failure __msg("calls are not allowed") +__failure_unpriv __msg_unpriv("") +__naked void call_within_a_locked_region(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + call %[bpf_get_prandom_u32]; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test6 missing unlock") +__failure __msg("unlock is missing") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test6_missing_unlock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + if r0 != 0 goto l1_%=; \ + call %[bpf_spin_unlock]; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test7 unlock without lock") +__failure __msg("without taking a lock") +__failure_unpriv __msg_unpriv("") +__naked void lock_test7_unlock_without_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + if r1 != 0 goto l1_%=; \ + call %[bpf_spin_lock]; \ +l1_%=: r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test8 double lock") +__failure __msg("calls are not allowed") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test8_double_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test9 different lock") +__failure __msg("unlock of different lock") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test9_different_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test10 lock in subprog without unlock") +__failure __msg("unlock is missing") +__failure_unpriv __msg_unpriv("") +__naked void lock_in_subprog_without_unlock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call lock_in_subprog_without_unlock__1; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lock_in_subprog_without_unlock__1(void) +{ + asm volatile (" \ + call %[bpf_spin_lock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_spin_lock) + : __clobber_all); +} + +SEC("tc") +__description("spin_lock: test11 ld_abs under lock") +__failure __msg("inside bpf_spin_lock") +__naked void test11_ld_abs_under_lock(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r7 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r0 = *(u8*)skb[0]; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("tc") +__description("spin_lock: regsafe compare reg->id for map value") +__failure __msg("bpf_spin_unlock of different lock") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void reg_id_for_map_value(void) +{ + asm volatile (" \ + r6 = r1; \ + r6 = *(u32*)(r6 + %[__sk_buff_mark]); \ + r1 = %[map_spin_lock] ll; \ + r9 = r1; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r2 = r10; \ + r2 += -4; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r7 = r0; \ + r1 = r9; \ + r2 = r10; \ + r2 += -4; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r8 = r0; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + if r6 == 0 goto l2_%=; \ + goto l3_%=; \ +l2_%=: r7 = r8; \ +l3_%=: r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +/* Make sure that regsafe() compares ids for spin lock records using + * check_ids(): + * 1: r9 = map_lookup_elem(...) ; r9.id == 1 + * 2: r8 = map_lookup_elem(...) ; r8.id == 2 + * 3: r7 = ktime_get_ns() + * 4: r6 = ktime_get_ns() + * 5: if r6 > r7 goto <9> + * 6: spin_lock(r8) + * 7: r9 = r8 + * 8: goto <10> + * 9: spin_lock(r9) + * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active, + * ; second visit to (10) should be considered safe + * ; if check_ids() is used. + * 11: exit(0) + */ + +SEC("cgroup/skb") +__description("spin_lock: regsafe() check_ids() similar id mappings") +__success __msg("29: safe") +__failure_unpriv __msg_unpriv("") +__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void check_ids_similar_id_mappings(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + /* r9 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r9 = r0; \ + /* r8 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l1_%=; \ + r8 = r0; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* if r6 > r7 goto +5 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * spin_lock(r8) \ + * r9 = r8 \ + * goto unlock \ + */ \ + if r6 > r7 goto l2_%=; \ + r1 = r8; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r9 = r8; \ + goto l3_%=; \ +l2_%=: /* spin_lock(r9) */ \ + r1 = r9; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ +l3_%=: /* spin_unlock(r9) */ \ + r1 = r9; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ +l0_%=: /* exit(0) */ \ + r0 = 0; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c new file mode 100644 index 000000000000..e0f77e3e7869 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/stack_ptr.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <limits.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +SEC("socket") +__description("PTR_TO_STACK store/load") +__success __success_unpriv __retval(0xfaceb00c) +__naked void ptr_to_stack_store_load(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -10; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 2) = r0; \ + r0 = *(u64*)(r1 + 2); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - bad alignment on off") +__failure __msg("misaligned stack access off (0x0; 0x0)+-8+2 size 8") +__failure_unpriv +__naked void load_bad_alignment_on_off(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 2) = r0; \ + r0 = *(u64*)(r1 + 2); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - bad alignment on reg") +__failure __msg("misaligned stack access off (0x0; 0x0)+-10+8 size 8") +__failure_unpriv +__naked void load_bad_alignment_on_reg(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -10; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 8) = r0; \ + r0 = *(u64*)(r1 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - out of bounds low") +__failure __msg("invalid write to stack R1 off=-79992 size=8") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void load_out_of_bounds_low(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -80000; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 8) = r0; \ + r0 = *(u64*)(r1 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - out of bounds high") +__failure __msg("invalid write to stack R1 off=0 size=8") +__failure_unpriv +__naked void load_out_of_bounds_high(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 8) = r0; \ + r0 = *(u64*)(r1 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 1") +__success __success_unpriv __retval(42) +__naked void to_stack_check_high_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -1; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 2") +__success __success_unpriv __retval(42) +__naked void to_stack_check_high_2(void) +{ + asm volatile (" \ + r1 = r10; \ + r0 = 42; \ + *(u8*)(r1 - 1) = r0; \ + r0 = *(u8*)(r1 - 1); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 3") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(42) +__naked void to_stack_check_high_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0; \ + r0 = 42; \ + *(u8*)(r1 - 1) = r0; \ + r0 = *(u8*)(r1 - 1); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 4") +__failure __msg("invalid write to stack R1 off=0 size=1") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_4(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 5") +__failure __msg("invalid write to stack R1") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_5(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" : + : __imm_const(__imm_0, (1 << 29) - 1) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 6") +__failure __msg("invalid write to stack") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_6(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + %[shrt_max]) = r0; \ + r0 = *(u8*)(r1 + %[shrt_max]); \ + exit; \ +" : + : __imm_const(__imm_0, (1 << 29) - 1), + __imm_const(shrt_max, SHRT_MAX) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 7") +__failure __msg("fp pointer offset") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_7(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + %[shrt_max]) = r0; \ + r0 = *(u8*)(r1 + %[shrt_max]); \ + exit; \ +" : + : __imm_const(__imm_0, (1 << 29) - 1), + __imm_const(shrt_max, SHRT_MAX) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 1") +__success __success_unpriv __retval(42) +__naked void to_stack_check_low_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -512; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 2") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(42) +__naked void to_stack_check_low_2(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -513; \ + r0 = 42; \ + *(u8*)(r1 + 1) = r0; \ + r0 = *(u8*)(r1 + 1); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 3") +__failure __msg("invalid write to stack R1 off=-513 size=1") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -513; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 4") +__failure __msg("math between fp pointer") +__failure_unpriv +__naked void to_stack_check_low_4(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[int_min]; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 5") +__failure __msg("invalid write to stack") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_5(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" : + : __imm_const(__imm_0, -((1 << 29) - 1)) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 6") +__failure __msg("invalid write to stack") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_6(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 %[shrt_min]) = r0; \ + r0 = *(u8*)(r1 %[shrt_min]); \ + exit; \ +" : + : __imm_const(__imm_0, -((1 << 29) - 1)), + __imm_const(shrt_min, SHRT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 7") +__failure __msg("fp pointer offset") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_7(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 %[shrt_min]) = r0; \ + r0 = *(u8*)(r1 %[shrt_min]); \ + exit; \ +" : + : __imm_const(__imm_0, -((1 << 29) - 1)), + __imm_const(shrt_min, SHRT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK mixed reg/k, 1") +__success __success_unpriv __retval(42) +__naked void stack_mixed_reg_k_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -3; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK mixed reg/k, 2") +__success __success_unpriv __retval(42) +__naked void stack_mixed_reg_k_2(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r0 = 0; \ + *(u64*)(r10 - 16) = r0; \ + r1 = r10; \ + r1 += -3; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r5 = r10; \ + r0 = *(u8*)(r5 - 6); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK mixed reg/k, 3") +__success __success_unpriv __retval(-3) +__naked void stack_mixed_reg_k_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -3; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK reg") +__success __success_unpriv __retval(42) +__naked void ptr_to_stack_reg(void) +{ + asm volatile (" \ + r1 = r10; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("stack pointer arithmetic") +__success __success_unpriv __retval(0) +__naked void stack_pointer_arithmetic(void) +{ + asm volatile (" \ + r1 = 4; \ + goto l0_%=; \ +l0_%=: r7 = r10; \ + r7 += -10; \ + r7 += -10; \ + r2 = r7; \ + r2 += r1; \ + r0 = 0; \ + *(u32*)(r2 + 4) = r0; \ + r2 = r7; \ + r2 += 8; \ + r0 = 0; \ + *(u32*)(r2 + 4) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("store PTR_TO_STACK in R10 to array map using BPF_B") +__success __retval(42) +__naked void array_map_using_bpf_b(void) +{ + asm volatile (" \ + /* Load pointer to map. */ \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = 2; \ + exit; \ +l0_%=: r1 = r0; \ + /* Copy R10 to R9. */ \ + r9 = r10; \ + /* Pollute other registers with unaligned values. */\ + r2 = -1; \ + r3 = -1; \ + r4 = -1; \ + r5 = -1; \ + r6 = -1; \ + r7 = -1; \ + r8 = -1; \ + /* Store both R9 and R10 with BPF_B and read back. */\ + *(u8*)(r1 + 0) = r10; \ + r2 = *(u8*)(r1 + 0); \ + *(u8*)(r1 + 0) = r9; \ + r3 = *(u8*)(r1 + 0); \ + /* Should read back as same value. */ \ + if r2 == r3 goto l1_%=; \ + r0 = 1; \ + exit; \ +l1_%=: r0 = 42; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_subreg.c b/tools/testing/selftests/bpf/progs/verifier_subreg.c new file mode 100644 index 000000000000..8613ea160dcd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_subreg.c @@ -0,0 +1,673 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/subreg.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +/* This file contains sub-register zero extension checks for insns defining + * sub-registers, meaning: + * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width + * forms (BPF_END) could define sub-registers. + * - Narrow direct loads, BPF_B/H/W | BPF_LDX. + * - BPF_LD is not exposed to JIT back-ends, so no need for testing. + * + * "get_prandom_u32" is used to initialize low 32-bit of some registers to + * prevent potential optimizations done by verifier or JIT back-ends which could + * optimize register back into constant when range info shows one register is a + * constant. + */ + +SEC("socket") +__description("add32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void add32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000000 ll; \ + w0 += w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("add32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void add32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + /* An insn could have no effect on the low 32-bit, for example:\ + * a = a + 0 \ + * a = a | 0 \ + * a = a & -1 \ + * But, they should still zero high 32-bit. \ + */ \ + w0 += 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 += -2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("sub32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void sub32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x1ffffffff ll; \ + w0 -= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("sub32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void sub32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 -= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 -= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mul32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mul32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000001 ll; \ + w0 *= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mul32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mul32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 *= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 *= -1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("div32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void div32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = -1; \ + w0 /= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("div32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void div32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 /= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 /= 2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("or32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void or32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000001 ll; \ + w0 |= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("or32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void or32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 |= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 |= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("and32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void and32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r1 |= r0; \ + r0 = 0x1ffffffff ll; \ + w0 &= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("and32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void and32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 &= -1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 &= -2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("lsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void lsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 <<= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("lsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void lsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 <<= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 <<= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("rsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void rsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 >>= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("rsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void rsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 >>= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 >>= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("neg32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void neg32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = -w0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mod32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mod32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = -1; \ + w0 %%= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mod32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mod32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 %%= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 %%= 2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("xor32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void xor32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000000 ll; \ + w0 ^= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("xor32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void xor32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 ^= 1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mov32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mov32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r1 |= r0; \ + r0 = 0x100000000 ll; \ + w0 = w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mov32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mov32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("arsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void arsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 s>>= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("arsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void arsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 s>>= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 s>>= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end16 (to_le) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void le_reg_zero_extend_check_1(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = le16 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end32 (to_le) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void le_reg_zero_extend_check_2(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = le32 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end16 (to_be) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void be_reg_zero_extend_check_1(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = be16 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end32 (to_be) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void be_reg_zero_extend_check_2(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = be32 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_b zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_b_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u8*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_h zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_h_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u16*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_w zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_w_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u32*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_uninit.c b/tools/testing/selftests/bpf/progs/verifier_uninit.c new file mode 100644 index 000000000000..7718cd7d19ce --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_uninit.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/uninit.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +SEC("socket") +__description("read uninitialized register") +__failure __msg("R2 !read_ok") +__failure_unpriv +__naked void read_uninitialized_register(void) +{ + asm volatile (" \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("read invalid register") +__failure __msg("R15 is invalid") +__failure_unpriv +__naked void read_invalid_register(void) +{ + asm volatile (" \ + .8byte %[mov64_reg]; \ + exit; \ +" : + : __imm_insn(mov64_reg, BPF_MOV64_REG(BPF_REG_0, -1)) + : __clobber_all); +} + +SEC("socket") +__description("program doesn't init R0 before exit") +__failure __msg("R0 !read_ok") +__failure_unpriv +__naked void t_init_r0_before_exit(void) +{ + asm volatile (" \ + r2 = r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("program doesn't init R0 before exit in all branches") +__failure __msg("R0 !read_ok") +__msg_unpriv("R1 pointer comparison") +__naked void before_exit_in_all_branches(void) +{ + asm volatile (" \ + if r1 >= 0 goto l0_%=; \ + r0 = 1; \ + r0 += 2; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c new file mode 100644 index 000000000000..7ea535bfbacd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -0,0 +1,726 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#define BPF_SK_LOOKUP(func) \ + /* struct bpf_sock_tuple tuple = {} */ \ + "r2 = 0;" \ + "*(u32*)(r10 - 8) = r2;" \ + "*(u64*)(r10 - 16) = r2;" \ + "*(u64*)(r10 - 24) = r2;" \ + "*(u64*)(r10 - 32) = r2;" \ + "*(u64*)(r10 - 40) = r2;" \ + "*(u64*)(r10 - 48) = r2;" \ + /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ + "r2 = r10;" \ + "r2 += -48;" \ + "r3 = %[sizeof_bpf_sock_tuple];"\ + "r4 = 0;" \ + "r5 = 0;" \ + "call %[" #func "];" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: return pointer") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(POINTER_VALUE) +__naked void unpriv_return_pointer(void) +{ + asm volatile (" \ + r0 = r10; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: add const to pointer") +__success __success_unpriv __retval(0) +__naked void unpriv_add_const_to_pointer(void) +{ + asm volatile (" \ + r1 += 8; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: add pointer to pointer") +__failure __msg("R1 pointer += pointer") +__failure_unpriv +__naked void unpriv_add_pointer_to_pointer(void) +{ + asm volatile (" \ + r1 += r10; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: neg pointer") +__success __failure_unpriv __msg_unpriv("R1 pointer arithmetic") +__retval(0) +__naked void unpriv_neg_pointer(void) +{ + asm volatile (" \ + r1 = -r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp pointer with const") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__retval(0) +__naked void unpriv_cmp_pointer_with_const(void) +{ + asm volatile (" \ + if r1 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp pointer with pointer") +__success __failure_unpriv __msg_unpriv("R10 pointer comparison") +__retval(0) +__naked void unpriv_cmp_pointer_with_pointer(void) +{ + asm volatile (" \ + if r1 == r10 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("unpriv: check that printk is disallowed") +__success +__naked void check_that_printk_is_disallowed(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = r10; \ + r1 += -8; \ + r2 = 8; \ + r3 = r1; \ + call %[bpf_trace_printk]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_trace_printk) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: pass pointer to helper function") +__success __failure_unpriv __msg_unpriv("R4 leaks addr") +__retval(0) +__naked void pass_pointer_to_helper_function(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + r3 = r2; \ + r4 = r2; \ + call %[bpf_map_update_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_update_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: indirectly pass pointer on stack to helper function") +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8") +__retval(0) +__naked void on_stack_to_helper_function(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: mangle pointer on stack 1") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(0) +__naked void mangle_pointer_on_stack_1(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = 0; \ + *(u32*)(r10 - 8) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: mangle pointer on stack 2") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(0) +__naked void mangle_pointer_on_stack_2(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = 0; \ + *(u8*)(r10 - 1) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: read pointer from stack in small chunks") +__failure __msg("invalid size") +__failure_unpriv +__naked void from_stack_in_small_chunks(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = *(u32*)(r10 - 8); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: write pointer into ctx") +__failure __msg("invalid bpf_context access") +__failure_unpriv __msg_unpriv("R1 leaks addr") +__naked void unpriv_write_pointer_into_ctx(void) +{ + asm volatile (" \ + *(u64*)(r1 + 0) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: spill/fill of ctx") +__success __success_unpriv __retval(0) +__naked void unpriv_spill_fill_of_ctx(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r1 = *(u64*)(r6 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 2") +__success __retval(0) +__naked void spill_fill_of_ctx_2(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 3") +__failure __msg("R1 type=fp expected=ctx") +__naked void spill_fill_of_ctx_3(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + *(u64*)(r6 + 0) = r10; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 4") +__failure __msg("R1 type=scalar expected=ctx") +__naked void spill_fill_of_ctx_4(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r0 = 1; \ + lock *(u64 *)(r10 - 8) += r0; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_stx(void) +{ + asm volatile (" \ + r3 = 42; \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += -16; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +/* Same as above, but use BPF_ST_MEM to save 42 + * instead of BPF_STX_MEM. + */ +SEC("tc") +__description("unpriv: spill/fill of different pointers st") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_st(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += -16; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + .8byte %[st_mem]; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_insn(st_mem, + BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - ctx and sock") +__failure __msg("type=ctx expected=sock") +__naked void pointers_stx_ctx_and_sock(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb == NULL) *target = sock; */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: /* else *target = skb; */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: /* struct __sk_buff *skb = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* skb->mark = 42; */ \ + r3 = 42; \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + /* if (sk) bpf_sk_release(sk) */ \ + if r1 == 0 goto l2_%=; \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - leak sock") +__failure +//.errstr = "same insn cannot be used with different pointers", +__msg("Unreleased reference") +__naked void different_pointers_stx_leak_sock(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb == NULL) *target = sock; */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: /* else *target = skb; */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: /* struct __sk_buff *skb = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* skb->mark = 42; */ \ + r3 = 42; \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - sock and ctx (read)") +__failure __msg("same insn cannot be used with different pointers") +__naked void stx_sock_and_ctx_read(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb) *target = skb */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: /* else *target = sock */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r2; \ +l1_%=: /* struct bpf_sock *sk = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\ + if r1 == 0 goto l2_%=; \ + r3 = *(u32*)(r1 + %[bpf_sock_mark]); \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - sock and ctx (write)") +__failure +//.errstr = "same insn cannot be used with different pointers", +__msg("cannot write into sock") +__naked void stx_sock_and_ctx_write(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb) *target = skb */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: /* else *target = sock */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r2; \ +l1_%=: /* struct bpf_sock *sk = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* if (sk) sk->mark = 42; bpf_sk_release(sk); */\ + if r1 == 0 goto l2_%=; \ + r3 = 42; \ + *(u32*)(r1 + %[bpf_sock_mark]) = r3; \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: write pointer into map elem value") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void pointer_into_map_elem_value(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + *(u64*)(r0 + 0) = r0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("alu32: mov u32 const") +__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'") +__retval(0) +__naked void alu32_mov_u32_const(void) +{ + asm volatile (" \ + w7 = 0; \ + w7 &= 1; \ + w0 = w7; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: partial copy of pointer") +__success __failure_unpriv __msg_unpriv("R10 partial copy") +__retval(0) +__naked void unpriv_partial_copy_of_pointer(void) +{ + asm volatile (" \ + w1 = w10; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: pass pointer to tail_call") +__success __failure_unpriv __msg_unpriv("R3 leaks addr into helper") +__retval(0) +__naked void pass_pointer_to_tail_call(void) +{ + asm volatile (" \ + r3 = r1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp map pointer with zero") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__retval(0) +__naked void cmp_map_pointer_with_zero(void) +{ + asm volatile (" \ + r1 = 0; \ + r1 = %[map_hash_8b] ll; \ + if r1 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: write into frame pointer") +__failure __msg("frame pointer is read only") +__failure_unpriv +__naked void unpriv_write_into_frame_pointer(void) +{ + asm volatile (" \ + r10 = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: spill/fill frame pointer") +__failure __msg("frame pointer is read only") +__failure_unpriv +__naked void unpriv_spill_fill_frame_pointer(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r10; \ + r10 = *(u64*)(r6 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp of frame pointer") +__success __failure_unpriv __msg_unpriv("R10 pointer comparison") +__retval(0) +__naked void unpriv_cmp_of_frame_pointer(void) +{ + asm volatile (" \ + if r10 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: adding of fp, reg") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(0) +__naked void unpriv_adding_of_fp_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = 0; \ + r1 += r10; \ + *(u64*)(r1 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: adding of fp, imm") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(0) +__naked void unpriv_adding_of_fp_imm(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = r10; \ + r1 += 0; \ + *(u64*)(r1 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp of stack pointer") +__success __failure_unpriv __msg_unpriv("R2 pointer comparison") +__retval(0) +__naked void unpriv_cmp_of_stack_pointer(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + if r2 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c new file mode 100644 index 000000000000..4d77407a0a79 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("perf_event") +__description("unpriv: spill/fill of different pointers ldx") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_ldx(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += %[__imm_0]; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + r1 = *(u64*)(r1 + %[sample_period]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, + -(__s32) offsetof(struct bpf_perf_event_data, sample_period) - 8), + __imm_const(sample_period, + offsetof(struct bpf_perf_event_data, sample_period)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value.c b/tools/testing/selftests/bpf/progs/verifier_value.c new file mode 100644 index 000000000000..b5af6b6f5acd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map element value store of cleared call register") +__failure __msg("R1 !read_ok") +__failure_unpriv __msg_unpriv("R1 !read_ok") +__naked void store_of_cleared_call_register(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value with unaligned store") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void element_value_with_unaligned_store(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += 3; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ + r1 = 43; \ + *(u64*)(r0 + 2) = r1; \ + r1 = 44; \ + *(u64*)(r0 - 2) = r1; \ + r8 = r0; \ + r1 = 32; \ + *(u64*)(r8 + 0) = r1; \ + r1 = 33; \ + *(u64*)(r8 + 2) = r1; \ + r1 = 34; \ + *(u64*)(r8 - 2) = r1; \ + r8 += 5; \ + r1 = 22; \ + *(u64*)(r8 + 0) = r1; \ + r1 = 23; \ + *(u64*)(r8 + 4) = r1; \ + r1 = 24; \ + *(u64*)(r8 - 7) = r1; \ + r7 = r8; \ + r7 += 3; \ + r1 = 22; \ + *(u64*)(r7 + 0) = r1; \ + r1 = 23; \ + *(u64*)(r7 + 4) = r1; \ + r1 = 24; \ + *(u64*)(r7 - 4) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value with unaligned load") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void element_value_with_unaligned_load(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 >= %[max_entries] goto l0_%=; \ + r0 += 3; \ + r7 = *(u64*)(r0 + 0); \ + r7 = *(u64*)(r0 + 2); \ + r8 = r0; \ + r7 = *(u64*)(r8 + 0); \ + r7 = *(u64*)(r8 + 2); \ + r0 += 5; \ + r7 = *(u64*)(r0 + 0); \ + r7 = *(u64*)(r0 + 4); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES) + : __clobber_all); +} + +SEC("socket") +__description("map element value is preserved across register spilling") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void is_preserved_across_register_spilling(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += %[test_val_foo]; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ + r1 = r10; \ + r1 += -184; \ + *(u64*)(r1 + 0) = r0; \ + r3 = *(u64*)(r1 + 0); \ + r1 = 42; \ + *(u64*)(r3 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c b/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c new file mode 100644 index 000000000000..d7a5ba9bbe6a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_adj_spill.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map element value is preserved across register spilling") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void is_preserved_across_register_spilling(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ + r1 = r10; \ + r1 += -184; \ + *(u64*)(r1 + 0) = r0; \ + r3 = *(u64*)(r1 + 0); \ + r1 = 42; \ + *(u64*)(r3 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value or null is marked on register spilling") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void is_marked_on_register_spilling(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + r1 = r10; \ + r1 += -152; \ + *(u64*)(r1 + 0) = r0; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r1 + 0); \ + r1 = 42; \ + *(u64*)(r3 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c new file mode 100644 index 000000000000..71814a753216 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_illegal_alu.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map element value illegal alu op, 1") +__failure __msg("R0 bitwise operator &= on pointer") +__failure_unpriv +__naked void value_illegal_alu_op_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 &= 8; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 2") +__failure __msg("R0 32-bit pointer arithmetic prohibited") +__failure_unpriv +__naked void value_illegal_alu_op_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + w0 += 0; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 3") +__failure __msg("R0 pointer arithmetic with /= operator") +__failure_unpriv +__naked void value_illegal_alu_op_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 /= 42; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 4") +__failure __msg("invalid mem access 'scalar'") +__failure_unpriv __msg_unpriv("R0 pointer arithmetic prohibited") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void value_illegal_alu_op_4(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = be64 r0; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 5") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("leaking pointer from stack off -8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void value_illegal_alu_op_5(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = 4096; \ + r2 = r10; \ + r2 += -8; \ + *(u64*)(r2 + 0) = r0; \ + lock *(u64 *)(r2 + 0) += r3; \ + r0 = *(u64*)(r2 + 0); \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_or_null.c b/tools/testing/selftests/bpf/progs/verifier_value_or_null.c new file mode 100644 index 000000000000..8ff668a242eb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_or_null.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_or_null.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("tc") +__description("multiple registers share map_lookup_elem result") +__success __retval(0) +__naked void share_map_lookup_elem_result(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("alu ops on ptr_to_map_value_or_null, 1") +__failure __msg("R4 pointer arithmetic on map_value_or_null") +__naked void map_value_or_null_1(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r4 += -2; \ + r4 += 2; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("alu ops on ptr_to_map_value_or_null, 2") +__failure __msg("R4 pointer arithmetic on map_value_or_null") +__naked void map_value_or_null_2(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r4 &= -1; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("alu ops on ptr_to_map_value_or_null, 3") +__failure __msg("R4 pointer arithmetic on map_value_or_null") +__naked void map_value_or_null_3(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r4 <<= 1; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("invalid memory access with multiple map_lookup_elem calls") +__failure __msg("R4 !read_ok") +__naked void multiple_map_lookup_elem_calls(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + r8 = r1; \ + r7 = r2; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r1 = r8; \ + r2 = r7; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("valid indirect map_lookup_elem access with 2nd lookup in branch") +__success __retval(0) +__naked void with_2nd_lookup_in_branch(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + r8 = r1; \ + r7 = r2; \ + call %[bpf_map_lookup_elem]; \ + r2 = 10; \ + if r2 != 0 goto l0_%=; \ + r1 = r8; \ + r2 = r7; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r4 = r0; \ + if r0 == 0 goto l1_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access from else condition") +__failure __msg("R0 unbounded memory access") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void map_access_from_else_condition(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 >= %[__imm_0] goto l1_%=; \ + r1 += 1; \ +l1_%=: r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, MAX_ENTRIES-1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tc") +__description("map lookup and null branch prediction") +__success __retval(0) +__naked void lookup_and_null_branch_prediction(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r6 = r0; \ + if r6 == 0 goto l0_%=; \ + if r6 != 0 goto l0_%=; \ + r10 += 10; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("MAP_VALUE_OR_NULL check_ids() in regsafe()") +__failure __msg("R8 invalid mem access 'map_value_or_null'") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void null_check_ids_in_regsafe(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + /* r9 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r9 = r0; \ + /* r8 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r8 = r0; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* if r6 > r7 goto +1 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * r9 = r8 ; optionally share ID between r9 and r8\ + */ \ + if r6 > r7 goto l0_%=; \ + r9 = r8; \ +l0_%=: /* if r9 == 0 goto <exit> */ \ + if r9 == 0 goto l1_%=; \ + /* read map value via r8, this is not always \ + * safe because r8 might be not equal to r9. \ + */ \ + r0 = *(u64*)(r8 + 0); \ +l1_%=: /* exit 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c new file mode 100644 index 000000000000..5ba6e53571c8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c @@ -0,0 +1,1423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_ptr_arith.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <errno.h> +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct other_val { + long long foo; + long long bar; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct other_val); +} map_hash_16b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs const") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void value_ptr_unknown_vs_const(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 3; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs unknown") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void value_ptr_const_vs_unknown(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 3; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs const (ne)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_const_vs_const_ne(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 3; \ + goto l4_%=; \ +l3_%=: r1 = 5; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs const (eq)") +__success __success_unpriv __retval(1) +__naked void ptr_const_vs_const_eq(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 5; \ + goto l4_%=; \ +l3_%=: r1 = 5; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (eq)") +__success __success_unpriv __retval(1) +__naked void ptr_unknown_vs_unknown_eq(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (lt)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_unknown_vs_unknown_lt(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x3; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (gt)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_unknown_vs_unknown_gt(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x3; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr from different maps") +__success __success_unpriv __retval(1) +__naked void value_ptr_from_different_maps(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar from different maps") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(1) +__naked void known_scalar_from_different_maps(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r0 -= r1; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr from different maps, but same value properties") +__success __success_unpriv __retval(1) +__naked void maps_but_same_value_properties(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_48b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_48b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: mixing value pointer and scalar, 1") +__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited") +__retval(0) +__naked void value_pointer_and_scalar_1(void) +{ + asm volatile (" \ + /* load map value pointer into r0 and r2 */ \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* load some number from the map into r1 */ \ + r1 = *(u8*)(r0 + 0); \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l1_%=; \ + /* branch A */ \ + r2 = r0; \ + r3 = 0; \ + goto l2_%=; \ +l1_%=: /* branch B */ \ + r2 = 0; \ + r3 = 0x100000; \ +l2_%=: /* common instruction */ \ + r2 += r3; \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l3_%=; \ + /* branch A */ \ + goto l4_%=; \ +l3_%=: /* branch B */ \ + r0 = 0x13371337; \ + /* verifier follows fall-through */ \ + if r2 != 0x100000 goto l4_%=; \ + r0 = 0; \ + exit; \ +l4_%=: /* fake-dead code; targeted from branch A to \ + * prevent dead code sanitization \ + */ \ + r0 = *(u8*)(r0 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: mixing value pointer and scalar, 2") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void value_pointer_and_scalar_2(void) +{ + asm volatile (" \ + /* load map value pointer into r0 and r2 */ \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* load some number from the map into r1 */ \ + r1 = *(u8*)(r0 + 0); \ + /* depending on r1, branch: */ \ + if r1 == 0 goto l1_%=; \ + /* branch A */ \ + r2 = 0; \ + r3 = 0x100000; \ + goto l2_%=; \ +l1_%=: /* branch B */ \ + r2 = r0; \ + r3 = 0; \ +l2_%=: /* common instruction */ \ + r2 += r3; \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l3_%=; \ + /* branch A */ \ + goto l4_%=; \ +l3_%=: /* branch B */ \ + r0 = 0x13371337; \ + /* verifier follows fall-through */ \ + if r2 != 0x100000 goto l4_%=; \ + r0 = 0; \ + exit; \ +l4_%=: /* fake-dead code; targeted from branch A to \ + * prevent dead code sanitization, rejected \ + * via branch B however \ + */ \ + r0 = *(u8*)(r0 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 1") +__success __success_unpriv __retval(0x100000) +__naked void alu_with_different_scalars_1(void) +{ + asm volatile (" \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u32*)(r0 + 0); \ + if r1 == 0 goto l1_%=; \ + r2 = 0; \ + r3 = 0x100000; \ + goto l2_%=; \ +l1_%=: r2 = 42; \ + r3 = 0x100001; \ +l2_%=: r2 += r3; \ + r0 = r2; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 2") +__success __success_unpriv __retval(0) +__naked void alu_with_different_scalars_2(void) +{ + asm volatile (" \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r6 = r1; \ + r2 = r10; \ + r2 += -16; \ + r7 = 0; \ + *(u64*)(r10 - 16) = r7; \ + call %[bpf_map_delete_elem]; \ + r7 = r0; \ + r1 = r6; \ + r2 = r10; \ + r2 += -16; \ + call %[bpf_map_delete_elem]; \ + r6 = r0; \ + r8 = r6; \ + r8 += r7; \ + r0 = r8; \ + r0 += %[einval]; \ + r0 += %[einval]; \ + exit; \ +" : + : __imm(bpf_map_delete_elem), + __imm_addr(map_array_48b), + __imm_const(einval, EINVAL) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 3") +__success __success_unpriv __retval(0) +__naked void alu_with_different_scalars_3(void) +{ + asm volatile (" \ + r0 = %[einval]; \ + r0 *= -1; \ + r7 = r0; \ + r0 = %[einval]; \ + r0 *= -1; \ + r6 = r0; \ + r8 = r6; \ + r8 += r7; \ + r0 = r8; \ + r0 += %[einval]; \ + r0 += %[einval]; \ + exit; \ +" : + : __imm_const(einval, EINVAL) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 1") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void upper_oob_arith_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 48; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void upper_oob_arith_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 49; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 3") +__success __success_unpriv __retval(1) +__naked void upper_oob_arith_test_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 1") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void lower_oob_arith_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 48; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void lower_oob_arith_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 48; \ + r0 -= r1; \ + r1 = 1; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 3") +__success __success_unpriv __retval(1) +__naked void lower_oob_arith_test_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 47; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr") +__success __success_unpriv __retval(1) +__naked void access_known_scalar_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 1") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 2") +__failure __msg("invalid access to map value") +__failure_unpriv +__naked void value_ptr_known_scalar_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 49; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 3") +__failure __msg("invalid access to map value") +__failure_unpriv +__naked void value_ptr_known_scalar_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = -1; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 4") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 5; \ + r0 += r1; \ + r1 = -2; \ + r0 += r1; \ + r1 = -1; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 5") +__success __success_unpriv __retval(0xabcdef12) +__naked void value_ptr_known_scalar_5(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_const(__imm_0, (6 + 1) * sizeof(int)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 6") +__success __success_unpriv __retval(0xabcdef12) +__naked void value_ptr_known_scalar_6(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r0 += r1; \ + r1 = %[__imm_1]; \ + r0 += r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_const(__imm_0, (3 + 1) * sizeof(int)), + __imm_const(__imm_1, 3 * sizeof(int)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += N, value_ptr -= N known scalar") +__success __success_unpriv __retval(0x12345678) +__naked void value_ptr_n_known_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + w1 = 0x12345678; \ + *(u32*)(r0 + 0) = r1; \ + r0 += 2; \ + r1 = 2; \ + r0 -= r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 1") +__success __success_unpriv __retval(1) +__naked void unknown_scalar_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 2") +__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 3") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = -1; \ + r0 += r1; \ + r1 = 1; \ + r0 += r1; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 4") +__failure __msg("R1 max value is outside of the allowed memory range") +__msg_unpriv("R1 pointer arithmetic of map value goes out of range") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 19; \ + r0 += r1; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 1") +__success __success_unpriv __retval(1) +__naked void value_ptr_unknown_scalar_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 2") +__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void value_ptr_unknown_scalar_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r0 += r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 3") +__success __success_unpriv __retval(1) +__naked void value_ptr_unknown_scalar_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + r2 = *(u64*)(r0 + 8); \ + r3 = *(u64*)(r0 + 16); \ + r1 &= 0xf; \ + r3 &= 1; \ + r3 |= 1; \ + if r2 > r3 goto l0_%=; \ + r0 += r3; \ + r0 = *(u8*)(r0 + 0); \ + r0 = 1; \ +l1_%=: exit; \ +l0_%=: r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += value_ptr") +__failure __msg("R0 pointer += pointer prohibited") +__failure_unpriv +__naked void access_value_ptr_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += r0; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar -= value_ptr") +__failure __msg("R1 tried to subtract pointer from scalar") +__failure_unpriv +__naked void access_known_scalar_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 -= r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__naked void access_value_ptr_known_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, 2") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 6; \ + r2 = 4; \ + r0 += r1; \ + r0 -= r2; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar -= value_ptr") +__failure __msg("R1 tried to subtract pointer from scalar") +__failure_unpriv +__naked void access_unknown_scalar_value_ptr(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 -= r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= unknown scalar") +__failure __msg("R0 min value is negative") +__failure_unpriv +__naked void access_value_ptr_unknown_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= unknown scalar, 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void value_ptr_unknown_scalar_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 |= 0x7; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0x7; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= value_ptr") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("R0 pointer -= pointer prohibited") +__naked void access_value_ptr_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 -= r0; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: trying to leak tainted dst reg") +__failure __msg("math between map_value pointer and 4294967295 is not allowed") +__failure_unpriv +__naked void to_leak_tainted_dst_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r2 = r0; \ + w1 = 0xFFFFFFFF; \ + w1 = w1; \ + r2 -= r1; \ + *(u64*)(r0 + 0) = r2; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("tc") +__description("32bit pkt_ptr -= scalar") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void _32bit_pkt_ptr_scalar(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + r6 = r7; \ + r6 += 40; \ + if r6 > r8 goto l0_%=; \ + w4 = w7; \ + w6 -= w4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("32bit scalar -= pkt_ptr") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void _32bit_scalar_pkt_ptr(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + r6 = r7; \ + r6 += 40; \ + if r6 > r8 goto l0_%=; \ + w4 = w6; \ + w4 -= w7; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c new file mode 100644 index 000000000000..83a90afba785 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/var_off.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("lwt_in") +__description("variable-offset ctx access") +__failure __msg("variable ctx access var_off=(0x0; 0x4)") +__naked void variable_offset_ctx_access(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + /* add it to skb. We now have either &skb->len or\ + * &skb->pkt_type, but we don't know which \ + */ \ + r1 += r2; \ + /* dereference it */ \ + r0 = *(u32*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/skb") +__description("variable-offset stack read, priv vs unpriv") +__success __failure_unpriv +__msg_unpriv("R2 variable stack access prohibited for !root") +__retval(0) +__naked void stack_read_priv_vs_unpriv(void) +{ + asm volatile (" \ + /* Fill the top 8 bytes of the stack */ \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 8; \ + /* add it to fp. We now have either fp-4 or fp-8, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it for a stack read */ \ + r0 = *(u32*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("lwt_in") +__description("variable-offset stack read, uninitialized") +__failure __msg("invalid variable-offset read from stack R2") +__naked void variable_offset_stack_read_uninitialized(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 8; \ + /* add it to fp. We now have either fp-4 or fp-8, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it for a stack read */ \ + r0 = *(u32*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("variable-offset stack write, priv vs unpriv") +__success __failure_unpriv +/* Variable stack access is rejected for unprivileged. + */ +__msg_unpriv("R2 variable stack access prohibited for !root") +__retval(0) +__naked void stack_write_priv_vs_unpriv(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 8-byte aligned */ \ + r2 &= 8; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-8 or fp-16, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* Dereference it for a stack write */ \ + r0 = 0; \ + *(u64*)(r2 + 0) = r0; \ + /* Now read from the address we just wrote. This shows\ + * that, after a variable-offset write, a priviledged\ + * program can read the slots that were in the range of\ + * that write (even if the verifier doesn't actually know\ + * if the slot being read was really written to or not.\ + */ \ + r3 = *(u64*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("variable-offset stack write clobbers spilled regs") +__failure +/* In the priviledged case, dereferencing a spilled-and-then-filled + * register is rejected because the previous variable offset stack + * write might have overwritten the spilled pointer (i.e. we lose track + * of the spilled register when we analyze the write). + */ +__msg("R2 invalid mem access 'scalar'") +__failure_unpriv +/* The unprivileged case is not too interesting; variable + * stack access is rejected. + */ +__msg_unpriv("R2 variable stack access prohibited for !root") +__naked void stack_write_clobbers_spilled_regs(void) +{ + asm volatile (" \ + /* Dummy instruction; needed because we need to patch the next one\ + * and we can't patch the first instruction. \ + */ \ + r6 = 0; \ + /* Make R0 a map ptr */ \ + r0 = %[map_hash_8b] ll; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 8-byte aligned */ \ + r2 &= 8; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-8 or fp-16, but\ + * we don't know which. \ + */ \ + r2 += r10; \ + /* Spill R0(map ptr) into stack */ \ + *(u64*)(r10 - 8) = r0; \ + /* Dereference the unknown value for a stack write */\ + r0 = 0; \ + *(u64*)(r2 + 0) = r0; \ + /* Fill the register back into R2 */ \ + r2 = *(u64*)(r10 - 8); \ + /* Try to dereference R2 for a memory load */ \ + r0 = *(u64*)(r2 + 8); \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("sockops") +__description("indirect variable-offset stack access, unbounded") +__failure __msg("invalid unbounded variable-offset indirect access to stack R4") +__naked void variable_offset_stack_access_unbounded(void) +{ + asm volatile (" \ + r2 = 6; \ + r3 = 28; \ + /* Fill the top 16 bytes of the stack. */ \ + r4 = 0; \ + *(u64*)(r10 - 16) = r4; \ + r4 = 0; \ + *(u64*)(r10 - 8) = r4; \ + /* Get an unknown value. */ \ + r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\ + /* Check the lower bound but don't check the upper one. */\ + if r4 s< 0 goto l0_%=; \ + /* Point the lower bound to initialized stack. Offset is now in range\ + * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\ + */ \ + r4 -= 16; \ + r4 += r10; \ + r5 = 8; \ + /* Dereference it indirectly. */ \ + call %[bpf_getsockopt]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_getsockopt), + __imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received)) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, max out of bound") +__failure __msg("invalid variable-offset indirect access to stack R2") +__naked void access_max_out_of_bound(void) +{ + asm volatile (" \ + /* Fill the top 8 bytes of the stack */ \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 8; \ + /* add it to fp. We now have either fp-4 or fp-8, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it indirectly */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, min out of bound") +__failure __msg("invalid variable-offset indirect access to stack R2") +__naked void access_min_out_of_bound(void) +{ + asm volatile (" \ + /* Fill the top 8 bytes of the stack */ \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 516; \ + /* add it to fp. We now have either fp-516 or fp-512, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it indirectly */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, min_off < min_initialized") +__failure __msg("invalid indirect read from stack R2 var_off") +__naked void access_min_off_min_initialized(void) +{ + asm volatile (" \ + /* Fill only the top 8 bytes of the stack. */ \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned. */ \ + r2 &= 4; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-12 or fp-16, but we don't know\ + * which. fp-16 size 8 is partially uninitialized stack.\ + */ \ + r2 += r10; \ + /* Dereference it indirectly. */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("indirect variable-offset stack access, priv vs unpriv") +__success __failure_unpriv +__msg_unpriv("R2 variable stack access prohibited for !root") +__retval(0) +__naked void stack_access_priv_vs_unpriv(void) +{ + asm volatile (" \ + /* Fill the top 16 bytes of the stack. */ \ + r2 = 0; \ + *(u64*)(r10 - 16) = r2; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value. */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned. */ \ + r2 &= 4; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ + * which, but either way it points to initialized stack.\ + */ \ + r2 += r10; \ + /* Dereference it indirectly. */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, ok") +__success __retval(0) +__naked void variable_offset_stack_access_ok(void) +{ + asm volatile (" \ + /* Fill the top 16 bytes of the stack. */ \ + r2 = 0; \ + *(u64*)(r10 - 16) = r2; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value. */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned. */ \ + r2 &= 4; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ + * which, but either way it points to initialized stack.\ + */ \ + r2 += r10; \ + /* Dereference it indirectly. */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_xadd.c b/tools/testing/selftests/bpf/progs/verifier_xadd.c new file mode 100644 index 000000000000..05a0a55adb45 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_xadd.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/xadd.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("tc") +__description("xadd/w check unaligned stack") +__failure __msg("misaligned stack access off") +__naked void xadd_w_check_unaligned_stack(void) +{ + asm volatile (" \ + r0 = 1; \ + *(u64*)(r10 - 8) = r0; \ + lock *(u32 *)(r10 - 7) += w0; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("xadd/w check unaligned map") +__failure __msg("misaligned value access off") +__naked void xadd_w_check_unaligned_map(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 1; \ + lock *(u32 *)(r0 + 3) += w1; \ + r0 = *(u32*)(r0 + 3); \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("xdp") +__description("xadd/w check unaligned pkt") +__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void xadd_w_check_unaligned_pkt(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + r0 = 99; \ + goto l1_%=; \ +l0_%=: r0 = 1; \ + r1 = 0; \ + *(u32*)(r2 + 0) = r1; \ + r1 = 0; \ + *(u32*)(r2 + 3) = r1; \ + lock *(u32 *)(r2 + 1) += w0; \ + lock *(u32 *)(r2 + 2) += w0; \ + r0 = *(u32*)(r2 + 1); \ +l1_%=: exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("xadd/w check whether src/dst got mangled, 1") +__success __retval(3) +__naked void src_dst_got_mangled_1(void) +{ + asm volatile (" \ + r0 = 1; \ + r6 = r0; \ + r7 = r10; \ + *(u64*)(r10 - 8) = r0; \ + lock *(u64 *)(r10 - 8) += r0; \ + lock *(u64 *)(r10 - 8) += r0; \ + if r6 != r0 goto l0_%=; \ + if r7 != r10 goto l0_%=; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +l0_%=: r0 = 42; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("xadd/w check whether src/dst got mangled, 2") +__success __retval(3) +__naked void src_dst_got_mangled_2(void) +{ + asm volatile (" \ + r0 = 1; \ + r6 = r0; \ + r7 = r10; \ + *(u32*)(r10 - 8) = r0; \ + lock *(u32 *)(r10 - 8) += w0; \ + lock *(u32 *)(r10 - 8) += w0; \ + if r6 != r0 goto l0_%=; \ + if r7 != r10 goto l0_%=; \ + r0 = *(u32*)(r10 - 8); \ + exit; \ +l0_%=: r0 = 42; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_xdp.c b/tools/testing/selftests/bpf/progs/verifier_xdp.c new file mode 100644 index 000000000000..50768ed179b3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_xdp.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/xdp.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("xdp") +__description("XDP, using ifindex from netdev") +__success __retval(1) +__naked void xdp_using_ifindex_from_netdev(void) +{ + asm volatile (" \ + r0 = 0; \ + r2 = *(u32*)(r1 + %[xdp_md_ingress_ifindex]); \ + if r2 < 1 goto l0_%=; \ + r0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(xdp_md_ingress_ifindex, offsetof(struct xdp_md, ingress_ifindex)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c new file mode 100644 index 000000000000..df2dfd1b15d1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c @@ -0,0 +1,1722 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("xdp") +__description("XDP pkt read, pkt_end mangling, bad access 1") +__failure __msg("R3 pointer arithmetic on pkt_end") +__naked void end_mangling_bad_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + r3 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end mangling, bad access 2") +__failure __msg("R3 pointer arithmetic on pkt_end") +__naked void end_mangling_bad_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + r3 -= 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_1_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_pkt_data_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_end_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_end_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_1_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_pkt_data_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_9(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_9(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_meta_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_10(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_10(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_pkt_data_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_11(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_11(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_1_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_12(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_12(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_pkt_data_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_13(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_13(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_1_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_14(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_14(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_15(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_15(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_meta_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_16(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_16(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_features.c b/tools/testing/selftests/bpf/progs/xdp_features.c index 87c247d56f72..67424084a38a 100644 --- a/tools/testing/selftests/bpf/progs/xdp_features.c +++ b/tools/testing/selftests/bpf/progs/xdp_features.c @@ -70,7 +70,6 @@ xdp_process_echo_packet(struct xdp_md *xdp, bool dut) struct tlv_hdr *tlv; struct udphdr *uh; __be16 port; - __u8 *cmd; if (eh + 1 > (struct ethhdr *)data_end) return -EINVAL; diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c index 4ad73847b8a5..54cf1765118b 100644 --- a/tools/testing/selftests/bpf/progs/xdping_kern.c +++ b/tools/testing/selftests/bpf/progs/xdping_kern.c @@ -89,7 +89,6 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type) SEC("xdp") int xdping_client(struct xdp_md *ctx) { - void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct pinginfo *pinginfo = NULL; struct ethhdr *eth = data; @@ -153,7 +152,6 @@ int xdping_client(struct xdp_md *ctx) SEC("xdp") int xdping_server(struct xdp_md *ctx) { - void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; struct icmphdr *icmph; diff --git a/tools/testing/selftests/bpf/progs/xdpwall.c b/tools/testing/selftests/bpf/progs/xdpwall.c index 7a891a0c3a39..c2dd0c28237a 100644 --- a/tools/testing/selftests/bpf/progs/xdpwall.c +++ b/tools/testing/selftests/bpf/progs/xdpwall.c @@ -321,7 +321,6 @@ int edgewall(struct xdp_md *ctx) void *data = (void *)(long)(ctx->data); struct fw_match_info match_info = {}; struct pkt_info info = {}; - __u8 parse_err = NO_ERR; void *transport_hdr; struct ethhdr *eth; bool filter_res; diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c index 744a01d0e57d..a630c95c7471 100644 --- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c +++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c @@ -3,6 +3,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "xsk_xdp_metadata.h" struct { __uint(type, BPF_MAP_TYPE_XSKMAP); @@ -12,6 +13,7 @@ struct { } xsk SEC(".maps"); static unsigned int idx; +int count = 0; SEC("xdp") int xsk_def_prog(struct xdp_md *xdp) { @@ -27,4 +29,27 @@ SEC("xdp") int xsk_xdp_drop(struct xdp_md *xdp) return bpf_redirect_map(&xsk, 0, XDP_DROP); } +SEC("xdp") int xsk_xdp_populate_metadata(struct xdp_md *xdp) +{ + void *data, *data_meta; + struct xdp_info *meta; + int err; + + /* Reserve enough for all custom metadata. */ + err = bpf_xdp_adjust_meta(xdp, -(int)sizeof(struct xdp_info)); + if (err) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_meta = (void *)(long)xdp->data_meta; + + if (data_meta + sizeof(struct xdp_info) > data) + return XDP_DROP; + + meta = data_meta; + meta->count = count++; + + return bpf_redirect_map(&xsk, 0, XDP_DROP); +} + char _license[] SEC("license") = "GPL"; |