summaryrefslogtreecommitdiff
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2020-03-13 05:23:12 +0300
committerAlexei Starovoitov <ast@kernel.org>2020-03-13 22:49:52 +0300
commit1afbcd9466f2fd979dde57ad424524a2fc5572e3 (patch)
treeb693861f42cde02243deb69d151fa80fcee09364 /include/linux/bpf.h
parent98868668367b24487c0b0b3298d7ca98409baf07 (diff)
parent7ac88eba185b4d0e06a71678e54bc092edcd3af3 (diff)
downloadlinux-1afbcd9466f2fd979dde57ad424524a2fc5572e3.tar.xz
Merge branch 'generalize-bpf-ksym'
Jiri Olsa says: ==================== this patchset adds trampoline and dispatcher objects to be visible in /proc/kallsyms. $ sudo cat /proc/kallsyms | tail -20 ... ffffffffa050f000 t bpf_prog_5a2b06eab81b8f51 [bpf] ffffffffa0511000 t bpf_prog_6deef7357e7b4530 [bpf] ffffffffa0542000 t bpf_trampoline_13832 [bpf] ffffffffa0548000 t bpf_prog_96f1b5bf4e4cc6dc_mutex_lock [bpf] ffffffffa0572000 t bpf_prog_d1c63e29ad82c4ab_bpf_prog1 [bpf] ffffffffa0585000 t bpf_prog_e314084d332a5338__dissect [bpf] ffffffffa0587000 t bpf_prog_59785a79eac7e5d2_mutex_unlock [bpf] ffffffffa0589000 t bpf_prog_d0db6e0cac050163_mutex_lock [bpf] ffffffffa058d000 t bpf_prog_d8f047721e4d8321_bpf_prog2 [bpf] ffffffffa05df000 t bpf_trampoline_25637 [bpf] ffffffffa05e3000 t bpf_prog_d8f047721e4d8321_bpf_prog2 [bpf] ffffffffa05e5000 t bpf_prog_3b185187f1855c4c [bpf] ffffffffa05e7000 t bpf_prog_d8f047721e4d8321_bpf_prog2 [bpf] ffffffffa05eb000 t bpf_prog_93cebb259dd5c4b2_do_sys_open [bpf] ffffffffa0677000 t bpf_dispatcher_xdp [bpf] v5 changes: - keeping just 1 bpf_tree for all the objects and adding flag to recognize bpf_objects when searching for exception tables [Alexei] - no need for is_bpf_image_address call in kernel_text_address [Alexei] - removed the bpf_image tree, because it's no longer needed v4 changes: - add trampoline and dispatcher to kallsyms once the it's allocated [Alexei] - omit the symbols sorting for kallsyms [Alexei] - small title change in one patch [Song] - some function renames: bpf_get_prog_name to bpf_prog_ksym_set_name bpf_get_prog_addr_region to bpf_prog_ksym_set_addr - added acks to changelogs - I checked and there'll be conflict on perftool side with upcoming changes from Adrian Hunter (text poke events), so I think it's better if Arnaldo takes the perf changes via perf tree and we will solve all conflicts there v3 changes: - use container_of directly in bpf_get_ksym_start [Daniel] - add more changelog explanations for ksym addresses [Daniel] v2 changes: - omit extra condition in __bpf_ksym_add for sorting code (Andrii) - rename bpf_kallsyms_tree_ops to bpf_ksym_tree (Andrii) - expose only executable code in kallsyms (Andrii) - use full trampoline key as its kallsyms id (Andrii) - explained the BPF_TRAMP_REPLACE case (Andrii) - small format changes in bpf_trampoline_link_prog/bpf_trampoline_unlink_prog (Andrii) - propagate error value in bpf_dispatcher_update and update kallsym if it's successful (Andrii) - get rid of __always_inline for bpf_ksym_tree callbacks (Andrii) - added KSYMBOL notification for bpf_image add/removal - added perf tools changes to properly display trampoline/dispatcher ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h65
1 files changed, 40 insertions, 25 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c2f815e9f7d0..bdb981c204fa 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -18,6 +18,7 @@
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/kallsyms.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -471,6 +472,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
u64 notrace __bpf_prog_enter(void);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+struct bpf_ksym {
+ unsigned long start;
+ unsigned long end;
+ char name[KSYM_NAME_LEN];
+ struct list_head lnode;
+ struct latch_tree_node tnode;
+ bool prog;
+};
+
enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
@@ -503,6 +513,7 @@ struct bpf_trampoline {
/* Executable image of trampoline */
void *image;
u64 selector;
+ struct bpf_ksym ksym;
};
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
@@ -520,9 +531,10 @@ struct bpf_dispatcher {
int num_progs;
void *image;
u32 image_off;
+ struct bpf_ksym ksym;
};
-static __always_inline unsigned int bpf_dispatcher_nopfunc(
+static __always_inline unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
unsigned int (*bpf_func)(const void *,
@@ -535,17 +547,21 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr);
-#define BPF_DISPATCHER_INIT(name) { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .func = &name##func, \
- .progs = {}, \
- .num_progs = 0, \
- .image = NULL, \
- .image_off = 0 \
+#define BPF_DISPATCHER_INIT(_name) { \
+ .mutex = __MUTEX_INITIALIZER(_name.mutex), \
+ .func = &_name##_func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0, \
+ .ksym = { \
+ .name = #_name, \
+ .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
+ }, \
}
#define DEFINE_BPF_DISPATCHER(name) \
- noinline unsigned int name##func( \
+ noinline unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
unsigned int (*bpf_func)(const void *, \
@@ -553,26 +569,26 @@ void bpf_trampoline_put(struct bpf_trampoline *tr);
{ \
return bpf_func(ctx, insnsi); \
} \
- EXPORT_SYMBOL(name##func); \
- struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
+ EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
+ struct bpf_dispatcher bpf_dispatcher_##name = \
+ BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
#define DECLARE_BPF_DISPATCHER(name) \
- unsigned int name##func( \
+ unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
unsigned int (*bpf_func)(const void *, \
const struct bpf_insn *)); \
- extern struct bpf_dispatcher name;
-#define BPF_DISPATCHER_FUNC(name) name##func
-#define BPF_DISPATCHER_PTR(name) (&name)
+ extern struct bpf_dispatcher bpf_dispatcher_##name;
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
+#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
-struct bpf_image {
- struct latch_tree_node tnode;
- unsigned char data[];
-};
-#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
-bool is_bpf_image_address(unsigned long address);
-void *bpf_image_alloc(void);
+/* Called only from JIT-enabled code, so there's no need for stubs. */
+void *bpf_jit_alloc_exec_page(void);
+void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_del(struct bpf_ksym *ksym);
+void bpf_ksym_add(struct bpf_ksym *ksym);
+void bpf_ksym_del(struct bpf_ksym *ksym);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
@@ -589,7 +605,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
-#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
struct bpf_prog *from,
@@ -650,8 +666,7 @@ struct bpf_prog_aux {
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
u32 size_poke_tab;
- struct latch_tree_node ksym_tnode;
- struct list_head ksym_lnode;
+ struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;