summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-05-24 02:07:13 +0300
committerJakub Kicinski <kuba@kernel.org>2022-05-24 02:07:14 +0300
commit1ef0736c0711e2633a59b540931406de626f2836 (patch)
tree3c230f459eda15a2263bb1fb9ac99eb0f79d802b /include
parent9fa87dd23251574a29cf948fd16cf39075762f3e (diff)
parent608b638ebf368f18431f47bbbd0d93828cbbdf83 (diff)
downloadlinux-1ef0736c0711e2633a59b540931406de626f2836.tar.xz
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2022-05-23 We've added 113 non-merge commits during the last 26 day(s) which contain a total of 121 files changed, 7425 insertions(+), 1586 deletions(-). The main changes are: 1) Speed up symbol resolution for kprobes multi-link attachments, from Jiri Olsa. 2) Add BPF dynamic pointer infrastructure e.g. to allow for dynamically sized ringbuf reservations without extra memory copies, from Joanne Koong. 3) Big batch of libbpf improvements towards libbpf 1.0 release, from Andrii Nakryiko. 4) Add BPF link iterator to traverse links via seq_file ops, from Dmitrii Dolgov. 5) Add source IP address to BPF tunnel key infrastructure, from Kaixi Fan. 6) Refine unprivileged BPF to disable only object-creating commands, from Alan Maguire. 7) Fix JIT blinding of ld_imm64 when they point to subprogs, from Alexei Starovoitov. 8) Add BPF access to mptcp_sock structures and their meta data, from Geliang Tang. 9) Add new BPF helper for access to remote CPU's BPF map elements, from Feng Zhou. 10) Allow attaching 64-bit cookie to BPF link of fentry/fexit/fmod_ret, from Kui-Feng Lee. 11) Follow-ups to typed pointer support in BPF maps, from Kumar Kartikeya Dwivedi. 12) Add busy-poll test cases to the XSK selftest suite, from Magnus Karlsson. 13) Improvements in BPF selftest test_progs subtest output, from Mykola Lysenko. 14) Fill bpf_prog_pack allocator areas with illegal instructions, from Song Liu. 15) Add generic batch operations for BPF map-in-map cases, from Takshak Chahande. 16) Make bpf_jit_enable more user friendly when permanently on 1, from Tiezhu Yang. 17) Fix an array overflow in bpf_trampoline_get_progs(), from Yuntao Wang. ==================== Link: https://lore.kernel.org/r/20220523223805.27931-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h122
-rw-r--r--include/linux/bpf_types.h1
-rw-r--r--include/linux/bpf_verifier.h20
-rw-r--r--include/linux/btf_ids.h3
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/kallsyms.h7
-rw-r--r--include/net/mptcp.h6
-rw-r--r--include/uapi/linux/bpf.h113
8 files changed, 251 insertions, 27 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index be94833d390a..a7080c86fa76 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -89,6 +89,7 @@ struct bpf_map_ops {
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
int (*map_pop_elem)(struct bpf_map *map, void *value);
int (*map_peek_elem)(struct bpf_map *map, void *value);
+ void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -389,9 +390,20 @@ enum bpf_type_flag {
*/
PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
- __BPF_TYPE_LAST_FLAG = PTR_UNTRUSTED,
+ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to memory local to the bpf program. */
+ DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to a ringbuf record. */
+ DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
+#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF)
+
/* Max number of base types. */
#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
@@ -408,16 +420,11 @@ enum bpf_arg_type {
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
- ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
- /* the following constraints used to prototype bpf_memcmp() and other
- * functions that access data on eBPF program stack
+ /* Used to prototype bpf_memcmp() and other functions that access data
+ * on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
- ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
- * helper function must fill all bytes or clear
- * them in error case.
- */
ARG_CONST_SIZE, /* number of bytes accessed from memory */
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
@@ -439,6 +446,7 @@ enum bpf_arg_type {
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
__BPF_ARG_TYPE_MAX,
/* Extended arg_types. */
@@ -449,6 +457,10 @@ enum bpf_arg_type {
ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
+ /* pointer to memory does not need to be initialized, helper function must fill
+ * all bytes or clear them in error case.
+ */
+ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
/* This must be the last entry. Its purpose is to ensure the enum is
* wide enough to hold the higher bits reserved for bpf_type_flag.
@@ -476,6 +488,7 @@ enum bpf_return_type {
RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
+ RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
/* This must be the last entry. Its purpose is to ensure the enum is
@@ -721,15 +734,17 @@ struct btf_func_model {
#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
- * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
+ * bytes on x86.
*/
-#define BPF_MAX_TRAMP_PROGS 38
+#define BPF_MAX_TRAMP_LINKS 38
-struct bpf_tramp_progs {
- struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
- int nr_progs;
+struct bpf_tramp_links {
+ struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
+ int nr_links;
};
+struct bpf_tramp_run_ctx;
+
/* Different use cases for BPF trampoline:
* 1. replace nop at the function entry (kprobe equivalent)
* flags = BPF_TRAMP_F_RESTORE_REGS
@@ -753,13 +768,14 @@ struct bpf_tramp_progs {
struct bpf_tramp_image;
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_tramp_progs *tprogs,
+ struct bpf_tramp_links *tlinks,
void *orig_call);
/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
@@ -852,9 +868,10 @@ static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
{
return bpf_func(ctx, insnsi);
}
+
#ifdef CONFIG_BPF_JIT
-int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
@@ -905,12 +922,12 @@ int bpf_jit_charge_modmem(u32 size);
void bpf_jit_uncharge_modmem(u32 size);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else
-static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
+static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
struct bpf_trampoline *tr)
{
return -ENOTSUPP;
}
-static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
+static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
struct bpf_trampoline *tr)
{
return -ENOTSUPP;
@@ -1009,7 +1026,6 @@ struct bpf_prog_aux {
bool tail_call_reachable;
bool xdp_has_frags;
bool use_bpf_prog_pack;
- struct hlist_node tramp_hlist;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -1096,6 +1112,19 @@ struct bpf_link_ops {
struct bpf_link_info *info);
};
+struct bpf_tramp_link {
+ struct bpf_link link;
+ struct hlist_node tramp_hlist;
+ u64 cookie;
+};
+
+struct bpf_tracing_link {
+ struct bpf_tramp_link link;
+ enum bpf_attach_type attach_type;
+ struct bpf_trampoline *trampoline;
+ struct bpf_prog *tgt_prog;
+};
+
struct bpf_link_primer {
struct bpf_link *link;
struct file *file;
@@ -1133,8 +1162,8 @@ bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
-int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
- struct bpf_prog *prog,
+int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ struct bpf_tramp_link *link,
const struct btf_func_model *model,
void *image, void *image_end);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
@@ -1339,6 +1368,12 @@ struct bpf_trace_run_ctx {
u64 bpf_cookie;
};
+struct bpf_tramp_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ struct bpf_run_ctx *saved_run_ctx;
+};
+
static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
{
struct bpf_run_ctx *old_ctx = NULL;
@@ -1544,6 +1579,7 @@ void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname, int flags);
@@ -2161,6 +2197,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
@@ -2198,12 +2235,16 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
extern const struct bpf_func_proto bpf_copy_from_user_proto;
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_snprintf_proto;
@@ -2223,6 +2264,7 @@ extern const struct bpf_func_proto bpf_find_vma_proto;
extern const struct bpf_func_proto bpf_loop_proto;
extern const struct bpf_func_proto bpf_strncmp_proto;
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
+extern const struct bpf_func_proto bpf_kptr_xchg_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -2336,6 +2378,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+int bpf_arch_text_invalidate(void *dst, size_t len);
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
@@ -2346,4 +2389,33 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
+/* the implementation of the opaque uapi struct bpf_dynptr */
+struct bpf_dynptr_kern {
+ void *data;
+ /* Size represents the number of usable bytes of dynptr data.
+ * If for example the offset is at 4 for a local dynptr whose data is
+ * of type u64, the number of usable bytes is 4.
+ *
+ * The upper 8 bits are reserved. It is as follows:
+ * Bits 0 - 23 = size
+ * Bits 24 - 30 = dynptr type
+ * Bit 31 = whether dynptr is read-only
+ */
+ u32 size;
+ u32 offset;
+} __aligned(8);
+
+enum bpf_dynptr_type {
+ BPF_DYNPTR_TYPE_INVALID,
+ /* Points to memory that is local to the bpf program */
+ BPF_DYNPTR_TYPE_LOCAL,
+ /* Underlying data is a ringbuf record */
+ BPF_DYNPTR_TYPE_RINGBUF,
+};
+
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size);
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+int bpf_dynptr_check_size(u32 size);
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 3e24ad0c4b3c..2b9112b80171 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -141,3 +141,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
#endif
BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
+BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1f1e7f2ea967..e8439f6cbe57 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -72,6 +72,18 @@ struct bpf_reg_state {
u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ /* For dynptr stack slots */
+ struct {
+ enum bpf_dynptr_type type;
+ /* A dynptr is 16 bytes so it takes up 2 stack slots.
+ * We need to track which slot is the first slot
+ * to protect against cases where the user may try to
+ * pass in an address starting at the second slot of the
+ * dynptr.
+ */
+ bool first_slot;
+ } dynptr;
+
/* Max size from any of the above. */
struct {
unsigned long raw1;
@@ -88,6 +100,8 @@ struct bpf_reg_state {
* for the purpose of tracking that it's freed.
* For PTR_TO_SOCKET this is used to share which pointers retain the
* same reference to the socket, to determine proper reference freeing.
+ * For stack slots that are dynptrs, this is used to track references to
+ * the dynptr to determine proper reference freeing.
*/
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
@@ -174,9 +188,15 @@ enum bpf_stack_slot_type {
STACK_SPILL, /* register spilled into stack */
STACK_MISC, /* BPF program wrote some data into this slot */
STACK_ZERO, /* BPF program wrote constant zero */
+ /* A dynptr is stored in this stack slot. The type of dynptr
+ * is stored in bpf_stack_state->spilled_ptr.dynptr.type
+ */
+ STACK_DYNPTR,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
+#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
struct bpf_stack_state {
struct bpf_reg_state spilled_ptr;
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index bc5d9cc34e4c..335a19092368 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -178,7 +178,8 @@ extern struct btf_id_set name;
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
- BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock)
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock)
enum {
#define BTF_SOCK_TYPE(name, str) name,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 4816b7e11047..820500430eae 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -303,6 +303,8 @@ int unregister_ftrace_function(struct ftrace_ops *ops);
extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
#else /* !CONFIG_FUNCTION_TRACER */
/*
* (un)register_ftrace_function must be a macro since the ops parameter
@@ -313,6 +315,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
+static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_FUNCTION_TRACER */
struct ftrace_func_entry {
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index ce1bd2fbf23e..ad39636e0c3f 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -65,11 +65,11 @@ static inline void *dereference_symbol_descriptor(void *ptr)
return ptr;
}
+#ifdef CONFIG_KALLSYMS
int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
unsigned long),
void *data);
-#ifdef CONFIG_KALLSYMS
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
@@ -163,6 +163,11 @@ static inline bool kallsyms_show_value(const struct cred *cred)
return false;
}
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ unsigned long), void *data)
+{
+ return -EOPNOTSUPP;
+}
#endif /*CONFIG_KALLSYMS*/
static inline void print_ip_sym(const char *loglvl, unsigned long ip)
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index d4ec894ce67b..4d761ad530c9 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -284,4 +284,10 @@ static inline int mptcpv6_init(void) { return 0; }
static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
#endif
+#if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL)
+struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
+#else
+static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
+#endif
+
#endif /* __NET_MPTCP_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 444fe6f1cf35..f4009dbdf62d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1013,6 +1013,7 @@ enum bpf_link_type {
BPF_LINK_TYPE_XDP = 6,
BPF_LINK_TYPE_PERF_EVENT = 7,
BPF_LINK_TYPE_KPROBE_MULTI = 8,
+ BPF_LINK_TYPE_STRUCT_OPS = 9,
MAX_BPF_LINK_TYPE,
};
@@ -1489,6 +1490,15 @@ union bpf_attr {
__aligned_u64 addrs;
__aligned_u64 cookies;
} kprobe_multi;
+ struct {
+ /* this is overlaid with the target_btf_id above. */
+ __u32 target_btf_id;
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 cookie;
+ } tracing;
};
} link_create;
@@ -5154,6 +5164,91 @@ union bpf_attr {
* if not NULL, is a reference which must be released using its
* corresponding release function, or moved into a BPF map before
* program exit.
+ *
+ * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
+ * Description
+ * Perform a lookup in *percpu map* for an entry associated to
+ * *key* on *cpu*.
+ * Return
+ * Map value associated to *key* on *cpu*, or **NULL** if no entry
+ * was found or *cpu* is invalid.
+ *
+ * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Get a dynptr to local memory *data*.
+ *
+ * *data* must be a ptr to a map value.
+ * The maximum *size* supported is DYNPTR_MAX_SIZE.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
+ * -EINVAL if flags is not 0.
+ *
+ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ * through the dynptr interface. *flags* must be 0.
+ *
+ * Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ * reservation fails. This is enforced by the verifier.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Submit reserved ring buffer sample, pointed to by *data*,
+ * through the dynptr interface. This is a no-op if the dynptr is
+ * invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_submit'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Discard reserved ring buffer sample through the dynptr
+ * interface. This is a no-op if the dynptr is invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_discard'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset)
+ * Description
+ * Read *len* bytes from *src* into *dst*, starting from *offset*
+ * into *src*.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *src*'s data, -EINVAL if *src* is an invalid dynptr.
+ *
+ * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len)
+ * Description
+ * Write *len* bytes from *src* into *dst*, starting from *offset*
+ * into *dst*.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
+ * is a read-only dynptr.
+ *
+ * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
+ * Description
+ * Get a pointer to the underlying dynptr data.
+ *
+ * *len* must be a statically known value. The returned data slice
+ * is invalidated whenever the dynptr is invalidated.
+ * Return
+ * Pointer to the underlying dynptr data, NULL if the dynptr is
+ * read-only, if the dynptr is invalid, or if the offset and length
+ * is out of bounds.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5351,6 +5446,15 @@ union bpf_attr {
FN(skb_set_tstamp), \
FN(ima_file_hash), \
FN(kptr_xchg), \
+ FN(map_lookup_percpu_elem), \
+ FN(skc_to_mptcp_sock), \
+ FN(dynptr_from_mem), \
+ FN(ringbuf_reserve_dynptr), \
+ FN(ringbuf_submit_dynptr), \
+ FN(ringbuf_discard_dynptr), \
+ FN(dynptr_read), \
+ FN(dynptr_write), \
+ FN(dynptr_data), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5604,6 +5708,10 @@ struct bpf_tunnel_key {
__u8 tunnel_ttl;
__u16 tunnel_ext; /* Padding, future use. */
__u32 tunnel_label;
+ union {
+ __u32 local_ipv4;
+ __u32 local_ipv6[4];
+ };
};
/* user accessible mirror of in-kernel xfrm_state.
@@ -6498,6 +6606,11 @@ struct bpf_timer {
__u64 :64;
} __attribute__((aligned(8)));
+struct bpf_dynptr {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.