From 7bd3a33ae6d2b820bc44a206f9b81b96840219fd Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 17 Jun 2020 11:31:32 -0700 Subject: libbpf: Bump version to 0.1.0 Bump libbpf version to 0.1.0, as new development cycle starts. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200617183132.1970836-1-andriin@fb.com --- tools/lib/bpf/libbpf.map | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index f732c77b7ed0..c914347f5065 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -270,3 +270,6 @@ LIBBPF_0.0.9 { ring_buffer__new; ring_buffer__poll; } LIBBPF_0.0.8; + +LIBBPF_0.1.0 { +} LIBBPF_0.0.9; -- cgit v1.2.3 From 1bdb6c9a1c43fdf9b83b2331dfc6229bd2e71d9b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 20 Jun 2020 23:21:12 -0700 Subject: libbpf: Add a bunch of attribute getters/setters for map definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a bunch of getter for various aspects of BPF map. Some of these attribute (e.g., key_size, value_size, type, etc) are available right now in struct bpf_map_def, but this patch adds getter allowing to fetch them individually. bpf_map_def approach isn't very scalable, when ABI stability requirements are taken into account. It's much easier to extend libbpf and add support for new features, when each aspect of BPF map has separate getter/setter. Getters follow the common naming convention of not explicitly having "get" in its name: bpf_map__type() returns map type, bpf_map__key_size() returns key_size. Setters, though, explicitly have set in their name: bpf_map__set_type(), bpf_map__set_key_size(). This patch ensures we now have a getter and a setter for the following map attributes: - type; - max_entries; - map_flags; - numa_node; - key_size; - value_size; - ifindex. bpf_map__resize() enforces unnecessary restriction of max_entries > 0. It is unnecessary, because libbpf actually supports zero max_entries for some cases (e.g., for PERF_EVENT_ARRAY map) and treats it specially during map creation time. To allow setting max_entries=0, new bpf_map__set_max_entries() setter is added. bpf_map__resize()'s behavior is preserved for backwards compatibility reasons. Map ifindex getter is added as well. There is a setter already, but no corresponding getter. Fix this assymetry as well. bpf_map__set_ifindex() itself is converted from void function into error-returning one, similar to other setters. The only error returned right now is -EBUSY, if BPF map is already loaded and has corresponding FD. One lacking attribute with no ability to get/set or even specify it declaratively is numa_node. This patch fixes this gap and both adds programmatic getter/setter, as well as adds support for numa_node field in BTF-defined map. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/bpf/20200621062112.3006313-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 100 +++++++++++++++++++++++++++++++++++++++++++---- tools/lib/bpf/libbpf.h | 30 ++++++++++++-- tools/lib/bpf/libbpf.map | 14 +++++++ 3 files changed, 134 insertions(+), 10 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 477c679ed945..259a6360475f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -310,6 +310,7 @@ struct bpf_map { int map_ifindex; int inner_map_fd; struct bpf_map_def def; + __u32 numa_node; __u32 btf_var_idx; __u32 btf_key_type_id; __u32 btf_value_type_id; @@ -1957,6 +1958,10 @@ static int parse_btf_map_def(struct bpf_object *obj, return -EINVAL; pr_debug("map '%s': found map_flags = %u.\n", map->name, map->def.map_flags); + } else if (strcmp(name, "numa_node") == 0) { + if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) + return -EINVAL; + pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); } else if (strcmp(name, "key_size") == 0) { __u32 sz; @@ -3222,20 +3227,27 @@ err_free_new_name: return err; } -int bpf_map__resize(struct bpf_map *map, __u32 max_entries) +__u32 bpf_map__max_entries(const struct bpf_map *map) { - if (!map || !max_entries) - return -EINVAL; + return map->def.max_entries; +} - /* If map already created, its attributes can't be changed. */ +int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) +{ if (map->fd >= 0) return -EBUSY; - map->def.max_entries = max_entries; - return 0; } +int bpf_map__resize(struct bpf_map *map, __u32 max_entries) +{ + if (!map || !max_entries) + return -EINVAL; + + return bpf_map__set_max_entries(map, max_entries); +} + static int bpf_object__probe_loading(struct bpf_object *obj) { @@ -3603,6 +3615,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) create_attr.map_flags = def->map_flags; create_attr.key_size = def->key_size; create_attr.value_size = def->value_size; + create_attr.numa_node = map->numa_node; if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { int nr_cpus; @@ -7088,6 +7101,71 @@ const char *bpf_map__name(const struct bpf_map *map) return map ? map->name : NULL; } +enum bpf_map_type bpf_map__type(const struct bpf_map *map) +{ + return map->def.type; +} + +int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.type = type; + return 0; +} + +__u32 bpf_map__map_flags(const struct bpf_map *map) +{ + return map->def.map_flags; +} + +int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.map_flags = flags; + return 0; +} + +__u32 bpf_map__numa_node(const struct bpf_map *map) +{ + return map->numa_node; +} + +int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) +{ + if (map->fd >= 0) + return -EBUSY; + map->numa_node = numa_node; + return 0; +} + +__u32 bpf_map__key_size(const struct bpf_map *map) +{ + return map->def.key_size; +} + +int bpf_map__set_key_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.key_size = size; + return 0; +} + +__u32 bpf_map__value_size(const struct bpf_map *map) +{ + return map->def.value_size; +} + +int bpf_map__set_value_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.value_size = size; + return 0; +} + __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) { return map ? map->btf_key_type_id : 0; @@ -7140,9 +7218,17 @@ bool bpf_map__is_internal(const struct bpf_map *map) return map->libbpf_type != LIBBPF_MAP_UNSPEC; } -void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +__u32 bpf_map__ifindex(const struct bpf_map *map) +{ + return map->map_ifindex; +} + +int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) { + if (map->fd >= 0) + return -EBUSY; map->map_ifindex = ifindex; + return 0; } int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 334437af3014..fdd279fb1866 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -418,11 +418,38 @@ bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); LIBBPF_API struct bpf_map * bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); +/* get/set map FD */ LIBBPF_API int bpf_map__fd(const struct bpf_map *map); +LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); +/* get map definition */ LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); +/* get map name */ LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); +/* get/set map type */ +LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); +/* get/set map size (max_entries) */ +LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); +LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); +/* get/set map flags */ +LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); +/* get/set map NUMA node */ +LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); +/* get/set map key size */ +LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); +/* get/set map value size */ +LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); +/* get map key/value BTF type IDs */ LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); +/* get/set map if_index */ +LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, @@ -430,11 +457,8 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, const void *data, size_t size); -LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); -LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); -LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c914347f5065..9914e0db4859 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -272,4 +272,18 @@ LIBBPF_0.0.9 { } LIBBPF_0.0.8; LIBBPF_0.1.0 { + global: + bpf_map__ifindex; + bpf_map__key_size; + bpf_map__map_flags; + bpf_map__max_entries; + bpf_map__numa_node; + bpf_map__set_key_size; + bpf_map__set_map_flags; + bpf_map__set_max_entries; + bpf_map__set_numa_node; + bpf_map__set_type; + bpf_map__set_value_size; + bpf_map__type; + bpf_map__value_size; } LIBBPF_0.0.9; -- cgit v1.2.3 From 2e33efe32e019328916ce653dc1265d637261993 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:16:55 -0700 Subject: libbpf: Generalize libbpf externs support Switch existing Kconfig externs to be just one of few possible kinds of more generic externs. This refactoring is in preparation for ksymbol extern support, added in the follow up patch. There are no functional changes intended. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hao Luo Link: https://lore.kernel.org/bpf/20200619231703.738941-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 346 +++++++++++++++++++++++++++++-------------------- 1 file changed, 206 insertions(+), 140 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 259a6360475f..ffccb5af32a5 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -330,24 +330,35 @@ struct bpf_map { enum extern_type { EXT_UNKNOWN, - EXT_CHAR, - EXT_BOOL, - EXT_INT, - EXT_TRISTATE, - EXT_CHAR_ARR, + EXT_KCFG, +}; + +enum kcfg_type { + KCFG_UNKNOWN, + KCFG_CHAR, + KCFG_BOOL, + KCFG_INT, + KCFG_TRISTATE, + KCFG_CHAR_ARR, }; struct extern_desc { - const char *name; + enum extern_type type; int sym_idx; int btf_id; - enum extern_type type; - int sz; - int align; - int data_off; - bool is_signed; - bool is_weak; + int sec_btf_id; + const char *name; bool is_set; + bool is_weak; + union { + struct { + enum kcfg_type type; + int sz; + int align; + int data_off; + bool is_signed; + } kcfg; + }; }; static LIST_HEAD(bpf_objects_list); @@ -1424,19 +1435,19 @@ static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, return NULL; } -static int set_ext_value_tri(struct extern_desc *ext, void *ext_val, - char value) +static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, + char value) { - switch (ext->type) { - case EXT_BOOL: + switch (ext->kcfg.type) { + case KCFG_BOOL: if (value == 'm') { - pr_warn("extern %s=%c should be tristate or char\n", + pr_warn("extern (kcfg) %s=%c should be tristate or char\n", ext->name, value); return -EINVAL; } *(bool *)ext_val = value == 'y' ? true : false; break; - case EXT_TRISTATE: + case KCFG_TRISTATE: if (value == 'y') *(enum libbpf_tristate *)ext_val = TRI_YES; else if (value == 'm') @@ -1444,14 +1455,14 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val, else /* value == 'n' */ *(enum libbpf_tristate *)ext_val = TRI_NO; break; - case EXT_CHAR: + case KCFG_CHAR: *(char *)ext_val = value; break; - case EXT_UNKNOWN: - case EXT_INT: - case EXT_CHAR_ARR: + case KCFG_UNKNOWN: + case KCFG_INT: + case KCFG_CHAR_ARR: default: - pr_warn("extern %s=%c should be bool, tristate, or char\n", + pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n", ext->name, value); return -EINVAL; } @@ -1459,29 +1470,29 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val, return 0; } -static int set_ext_value_str(struct extern_desc *ext, char *ext_val, - const char *value) +static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, + const char *value) { size_t len; - if (ext->type != EXT_CHAR_ARR) { - pr_warn("extern %s=%s should char array\n", ext->name, value); + if (ext->kcfg.type != KCFG_CHAR_ARR) { + pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); return -EINVAL; } len = strlen(value); if (value[len - 1] != '"') { - pr_warn("extern '%s': invalid string config '%s'\n", + pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", ext->name, value); return -EINVAL; } /* strip quotes */ len -= 2; - if (len >= ext->sz) { - pr_warn("extern '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", - ext->name, value, len, ext->sz - 1); - len = ext->sz - 1; + if (len >= ext->kcfg.sz) { + pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", + ext->name, value, len, ext->kcfg.sz - 1); + len = ext->kcfg.sz - 1; } memcpy(ext_val, value + 1, len); ext_val[len] = '\0'; @@ -1508,11 +1519,11 @@ static int parse_u64(const char *value, __u64 *res) return 0; } -static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v) +static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) { - int bit_sz = ext->sz * 8; + int bit_sz = ext->kcfg.sz * 8; - if (ext->sz == 8) + if (ext->kcfg.sz == 8) return true; /* Validate that value stored in u64 fits in integer of `ext->sz` @@ -1527,26 +1538,26 @@ static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v) * For unsigned target integer, check that all the (64 - Y) bits are * zero. */ - if (ext->is_signed) + if (ext->kcfg.is_signed) return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); else return (v >> bit_sz) == 0; } -static int set_ext_value_num(struct extern_desc *ext, void *ext_val, - __u64 value) +static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, + __u64 value) { - if (ext->type != EXT_INT && ext->type != EXT_CHAR) { - pr_warn("extern %s=%llu should be integer\n", + if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { + pr_warn("extern (kcfg) %s=%llu should be integer\n", ext->name, (unsigned long long)value); return -EINVAL; } - if (!is_ext_value_in_range(ext, value)) { - pr_warn("extern %s=%llu value doesn't fit in %d bytes\n", - ext->name, (unsigned long long)value, ext->sz); + if (!is_kcfg_value_in_range(ext, value)) { + pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n", + ext->name, (unsigned long long)value, ext->kcfg.sz); return -ERANGE; } - switch (ext->sz) { + switch (ext->kcfg.sz) { case 1: *(__u8 *)ext_val = value; break; case 2: *(__u16 *)ext_val = value; break; case 4: *(__u32 *)ext_val = value; break; @@ -1592,30 +1603,30 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj, if (!ext || ext->is_set) return 0; - ext_val = data + ext->data_off; + ext_val = data + ext->kcfg.data_off; value = sep + 1; switch (*value) { case 'y': case 'n': case 'm': - err = set_ext_value_tri(ext, ext_val, *value); + err = set_kcfg_value_tri(ext, ext_val, *value); break; case '"': - err = set_ext_value_str(ext, ext_val, value); + err = set_kcfg_value_str(ext, ext_val, value); break; default: /* assume integer */ err = parse_u64(value, &num); if (err) { - pr_warn("extern %s=%s should be integer\n", + pr_warn("extern (kcfg) %s=%s should be integer\n", ext->name, value); return err; } - err = set_ext_value_num(ext, ext_val, num); + err = set_kcfg_value_num(ext, ext_val, num); break; } if (err) return err; - pr_debug("extern %s=%s\n", ext->name, value); + pr_debug("extern (kcfg) %s=%s\n", ext->name, value); return 0; } @@ -1686,16 +1697,20 @@ static int bpf_object__read_kconfig_mem(struct bpf_object *obj, static int bpf_object__init_kconfig_map(struct bpf_object *obj) { - struct extern_desc *last_ext; + struct extern_desc *last_ext = NULL, *ext; size_t map_sz; - int err; + int i, err; - if (obj->nr_extern == 0) - return 0; + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type == EXT_KCFG) + last_ext = ext; + } - last_ext = &obj->externs[obj->nr_extern - 1]; - map_sz = last_ext->data_off + last_ext->sz; + if (!last_ext) + return 0; + map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, obj->efile.symbols_shndx, NULL, map_sz); @@ -2714,8 +2729,33 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name) return -ENOENT; } -static enum extern_type find_extern_type(const struct btf *btf, int id, - bool *is_signed) +static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { + const struct btf_var_secinfo *vs; + const struct btf_type *t; + int i, j, n; + + if (!btf) + return -ESRCH; + + n = btf__get_nr_types(btf); + for (i = 1; i <= n; i++) { + t = btf__type_by_id(btf, i); + + if (!btf_is_datasec(t)) + continue; + + vs = btf_var_secinfos(t); + for (j = 0; j < btf_vlen(t); j++, vs++) { + if (vs->type == ext_btf_id) + return i; + } + } + + return -ENOENT; +} + +static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, + bool *is_signed) { const struct btf_type *t; const char *name; @@ -2730,29 +2770,29 @@ static enum extern_type find_extern_type(const struct btf *btf, int id, int enc = btf_int_encoding(t); if (enc & BTF_INT_BOOL) - return t->size == 1 ? EXT_BOOL : EXT_UNKNOWN; + return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; if (is_signed) *is_signed = enc & BTF_INT_SIGNED; if (t->size == 1) - return EXT_CHAR; + return KCFG_CHAR; if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) - return EXT_UNKNOWN; - return EXT_INT; + return KCFG_UNKNOWN; + return KCFG_INT; } case BTF_KIND_ENUM: if (t->size != 4) - return EXT_UNKNOWN; + return KCFG_UNKNOWN; if (strcmp(name, "libbpf_tristate")) - return EXT_UNKNOWN; - return EXT_TRISTATE; + return KCFG_UNKNOWN; + return KCFG_TRISTATE; case BTF_KIND_ARRAY: if (btf_array(t)->nelems == 0) - return EXT_UNKNOWN; - if (find_extern_type(btf, btf_array(t)->type, NULL) != EXT_CHAR) - return EXT_UNKNOWN; - return EXT_CHAR_ARR; + return KCFG_UNKNOWN; + if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) + return KCFG_UNKNOWN; + return KCFG_CHAR_ARR; default: - return EXT_UNKNOWN; + return KCFG_UNKNOWN; } } @@ -2761,23 +2801,29 @@ static int cmp_externs(const void *_a, const void *_b) const struct extern_desc *a = _a; const struct extern_desc *b = _b; - /* descending order by alignment requirements */ - if (a->align != b->align) - return a->align > b->align ? -1 : 1; - /* ascending order by size, within same alignment class */ - if (a->sz != b->sz) - return a->sz < b->sz ? -1 : 1; + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + if (a->type == EXT_KCFG) { + /* descending order by alignment requirements */ + if (a->kcfg.align != b->kcfg.align) + return a->kcfg.align > b->kcfg.align ? -1 : 1; + /* ascending order by size, within same alignment class */ + if (a->kcfg.sz != b->kcfg.sz) + return a->kcfg.sz < b->kcfg.sz ? -1 : 1; + } + /* resolve ties by name */ return strcmp(a->name, b->name); } static int bpf_object__collect_externs(struct bpf_object *obj) { + struct btf_type *sec, *kcfg_sec = NULL; const struct btf_type *t; struct extern_desc *ext; - int i, n, off, btf_id; - struct btf_type *sec; - const char *ext_name; + int i, n, off; + const char *ext_name, *sec_name; Elf_Scn *scn; GElf_Shdr sh; @@ -2823,22 +2869,39 @@ static int bpf_object__collect_externs(struct bpf_object *obj) ext->name = btf__name_by_offset(obj->btf, t->name_off); ext->sym_idx = i; ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; - ext->sz = btf__resolve_size(obj->btf, t->type); - if (ext->sz <= 0) { - pr_warn("failed to resolve size of extern '%s': %d\n", - ext_name, ext->sz); - return ext->sz; - } - ext->align = btf__align_of(obj->btf, t->type); - if (ext->align <= 0) { - pr_warn("failed to determine alignment of extern '%s': %d\n", - ext_name, ext->align); - return -EINVAL; - } - ext->type = find_extern_type(obj->btf, t->type, - &ext->is_signed); - if (ext->type == EXT_UNKNOWN) { - pr_warn("extern '%s' type is unsupported\n", ext_name); + + ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); + if (ext->sec_btf_id <= 0) { + pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", + ext_name, ext->btf_id, ext->sec_btf_id); + return ext->sec_btf_id; + } + sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); + sec_name = btf__name_by_offset(obj->btf, sec->name_off); + + if (strcmp(sec_name, KCONFIG_SEC) == 0) { + kcfg_sec = sec; + ext->type = EXT_KCFG; + ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); + if (ext->kcfg.sz <= 0) { + pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.sz); + return ext->kcfg.sz; + } + ext->kcfg.align = btf__align_of(obj->btf, t->type); + if (ext->kcfg.align <= 0) { + pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.align); + return -EINVAL; + } + ext->kcfg.type = find_kcfg_type(obj->btf, t->type, + &ext->kcfg.is_signed); + if (ext->kcfg.type == KCFG_UNKNOWN) { + pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); + return -ENOTSUP; + } + } else { + pr_warn("unrecognized extern section '%s'\n", sec_name); return -ENOTSUP; } } @@ -2847,42 +2910,40 @@ static int bpf_object__collect_externs(struct bpf_object *obj) if (!obj->nr_extern) return 0; - /* sort externs by (alignment, size, name) and calculate their offsets - * within a map */ + /* sort externs by type, for kcfg ones also by (align, size, name) */ qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); - off = 0; - for (i = 0; i < obj->nr_extern; i++) { - ext = &obj->externs[i]; - ext->data_off = roundup(off, ext->align); - off = ext->data_off + ext->sz; - pr_debug("extern #%d: symbol %d, off %u, name %s\n", - i, ext->sym_idx, ext->data_off, ext->name); - } - btf_id = btf__find_by_name(obj->btf, KCONFIG_SEC); - if (btf_id <= 0) { - pr_warn("no BTF info found for '%s' datasec\n", KCONFIG_SEC); - return -ESRCH; - } + if (kcfg_sec) { + sec = kcfg_sec; + /* for kcfg externs calculate their offsets within a .kconfig map */ + off = 0; + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KCFG) + continue; - sec = (struct btf_type *)btf__type_by_id(obj->btf, btf_id); - sec->size = off; - n = btf_vlen(sec); - for (i = 0; i < n; i++) { - struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; - - t = btf__type_by_id(obj->btf, vs->type); - ext_name = btf__name_by_offset(obj->btf, t->name_off); - ext = find_extern_by_name(obj, ext_name); - if (!ext) { - pr_warn("failed to find extern definition for BTF var '%s'\n", - ext_name); - return -ESRCH; + ext->kcfg.data_off = roundup(off, ext->kcfg.align); + off = ext->kcfg.data_off + ext->kcfg.sz; + pr_debug("extern #%d (kcfg): symbol %d, off %u, name %s\n", + i, ext->sym_idx, ext->kcfg.data_off, ext->name); + } + sec->size = off; + n = btf_vlen(sec); + for (i = 0; i < n; i++) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + + t = btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, t->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF var '%s'\n", + ext_name); + return -ESRCH; + } + btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vs->offset = ext->kcfg.data_off; } - vs->offset = ext->data_off; - btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; } - return 0; } @@ -3012,11 +3073,11 @@ static int bpf_program__record_reloc(struct bpf_program *prog, sym_idx); return -LIBBPF_ERRNO__RELOC; } - pr_debug("found extern #%d '%s' (sym %d, off %u) for insn %u\n", - i, ext->name, ext->sym_idx, ext->data_off, insn_idx); + pr_debug("found extern #%d '%s' (sym %d) for insn %u\n", + i, ext->name, ext->sym_idx, insn_idx); reloc_desc->type = RELO_EXTERN; reloc_desc->insn_idx = insn_idx; - reloc_desc->sym_off = ext->data_off; + reloc_desc->sym_off = i; /* sym_off stores extern index */ return 0; } @@ -4941,6 +5002,7 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) for (i = 0; i < prog->nr_reloc; i++) { struct reloc_desc *relo = &prog->reloc_desc[i]; struct bpf_insn *insn = &prog->insns[relo->insn_idx]; + struct extern_desc *ext; if (relo->insn_idx + 1 >= (int)prog->insns_cnt) { pr_warn("relocation out of range: '%s'\n", @@ -4959,9 +5021,10 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) insn[0].imm = obj->maps[relo->map_idx].fd; break; case RELO_EXTERN: + ext = &obj->externs[relo->sym_off]; insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; - insn[1].imm = relo->sym_off; + insn[1].imm = ext->kcfg.data_off; break; case RELO_CALL: err = bpf_program__reloc_text(prog, obj, relo); @@ -5585,30 +5648,33 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, { bool need_config = false; struct extern_desc *ext; + void *kcfg_data = NULL; int err, i; - void *data; if (obj->nr_extern == 0) return 0; - data = obj->maps[obj->kconfig_map_idx].mmaped; + if (obj->kconfig_map_idx >= 0) + kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; - if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { - void *ext_val = data + ext->data_off; + if (ext->type == EXT_KCFG && + strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { + void *ext_val = kcfg_data + ext->kcfg.data_off; __u32 kver = get_kernel_version(); if (!kver) { pr_warn("failed to get kernel version\n"); return -EINVAL; } - err = set_ext_value_num(ext, ext_val, kver); + err = set_kcfg_value_num(ext, ext_val, kver); if (err) return err; - pr_debug("extern %s=0x%x\n", ext->name, kver); - } else if (strncmp(ext->name, "CONFIG_", 7) == 0) { + pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); + } else if (ext->type == EXT_KCFG && + strncmp(ext->name, "CONFIG_", 7) == 0) { need_config = true; } else { pr_warn("unrecognized extern '%s'\n", ext->name); @@ -5616,20 +5682,20 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, } } if (need_config && extra_kconfig) { - err = bpf_object__read_kconfig_mem(obj, extra_kconfig, data); + err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); if (err) return -EINVAL; need_config = false; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; - if (!ext->is_set) { + if (ext->type == EXT_KCFG && !ext->is_set) { need_config = true; break; } } } if (need_config) { - err = bpf_object__read_kconfig_file(obj, data); + err = bpf_object__read_kconfig_file(obj, kcfg_data); if (err) return -EINVAL; } -- cgit v1.2.3 From 1c0c7074fefd769f62dda155e881ca90c9e3e75a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:16:56 -0700 Subject: libbpf: Add support for extracting kernel symbol addresses Add support for another (in addition to existing Kconfig) special kind of externs in BPF code, kernel symbol externs. Such externs allow BPF code to "know" kernel symbol address and either use it for comparisons with kernel data structures (e.g., struct file's f_op pointer, to distinguish different kinds of file), or, with the help of bpf_probe_user_kernel(), to follow pointers and read data from global variables. Kernel symbol addresses are found through /proc/kallsyms, which should be present in the system. Currently, such kernel symbol variables are typeless: they have to be defined as `extern const void ` and the only operation you can do (in C code) with them is to take its address. Such extern should reside in a special section '.ksyms'. bpf_helpers.h header provides __ksym macro for this. Strong vs weak semantics stays the same as with Kconfig externs. If symbol is not found in /proc/kallsyms, this will be a failure for strong (non-weak) extern, but will be defaulted to 0 for weak externs. If the same symbol is defined multiple times in /proc/kallsyms, then it will be error if any of the associated addresses differs. In that case, address is ambiguous, so libbpf falls on the side of caution, rather than confusing user with randomly chosen address. In the future, once kernel is extended with variables BTF information, such ksym externs will be supported in a typed version, which will allow BPF program to read variable's contents directly, similarly to how it's done for fentry/fexit input arguments. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hao Luo Link: https://lore.kernel.org/bpf/20200619231703.738941-3-andriin@fb.com --- tools/lib/bpf/bpf_helpers.h | 1 + tools/lib/bpf/btf.h | 5 ++ tools/lib/bpf/libbpf.c | 144 ++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 144 insertions(+), 6 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index f67dce2af802..a510d8ed716f 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -75,5 +75,6 @@ enum libbpf_tristate { }; #define __kconfig __attribute__((section(".kconfig"))) +#define __ksym __attribute__((section(".ksyms"))) #endif diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 70c1b7ec2bd0..06cd1731c154 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -168,6 +168,11 @@ static inline bool btf_kflag(const struct btf_type *t) return BTF_INFO_KFLAG(t->info); } +static inline bool btf_is_void(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_UNKN; +} + static inline bool btf_is_int(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_INT; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ffccb5af32a5..18461deb1b19 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -285,6 +285,7 @@ struct bpf_struct_ops { #define BSS_SEC ".bss" #define RODATA_SEC ".rodata" #define KCONFIG_SEC ".kconfig" +#define KSYMS_SEC ".ksyms" #define STRUCT_OPS_SEC ".struct_ops" enum libbpf_map_type { @@ -331,6 +332,7 @@ struct bpf_map { enum extern_type { EXT_UNKNOWN, EXT_KCFG, + EXT_KSYM, }; enum kcfg_type { @@ -358,6 +360,9 @@ struct extern_desc { int data_off; bool is_signed; } kcfg; + struct { + unsigned long long addr; + } ksym; }; }; @@ -2817,9 +2822,25 @@ static int cmp_externs(const void *_a, const void *_b) return strcmp(a->name, b->name); } +static int find_int_btf_id(const struct btf *btf) +{ + const struct btf_type *t; + int i, n; + + n = btf__get_nr_types(btf); + for (i = 1; i <= n; i++) { + t = btf__type_by_id(btf, i); + + if (btf_is_int(t) && btf_int_bits(t) == 32) + return i; + } + + return 0; +} + static int bpf_object__collect_externs(struct bpf_object *obj) { - struct btf_type *sec, *kcfg_sec = NULL; + struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; const struct btf_type *t; struct extern_desc *ext; int i, n, off; @@ -2900,6 +2921,17 @@ static int bpf_object__collect_externs(struct bpf_object *obj) pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); return -ENOTSUP; } + } else if (strcmp(sec_name, KSYMS_SEC) == 0) { + const struct btf_type *vt; + + ksym_sec = sec; + ext->type = EXT_KSYM; + + vt = skip_mods_and_typedefs(obj->btf, t->type, NULL); + if (!btf_is_void(vt)) { + pr_warn("extern (ksym) '%s' is not typeless (void)\n", ext_name); + return -ENOTSUP; + } } else { pr_warn("unrecognized extern section '%s'\n", sec_name); return -ENOTSUP; @@ -2913,6 +2945,46 @@ static int bpf_object__collect_externs(struct bpf_object *obj) /* sort externs by type, for kcfg ones also by (align, size, name) */ qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); + /* for .ksyms section, we need to turn all externs into allocated + * variables in BTF to pass kernel verification; we do this by + * pretending that each extern is a 8-byte variable + */ + if (ksym_sec) { + /* find existing 4-byte integer type in BTF to use for fake + * extern variables in DATASEC + */ + int int_btf_id = find_int_btf_id(obj->btf); + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KSYM) + continue; + pr_debug("extern (ksym) #%d: symbol %d, name %s\n", + i, ext->sym_idx, ext->name); + } + + sec = ksym_sec; + n = btf_vlen(sec); + for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + struct btf_type *vt; + + vt = (void *)btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, vt->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF var '%s'\n", + ext_name); + return -ESRCH; + } + btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vt->type = int_btf_id; + vs->offset = off; + vs->size = sizeof(int); + } + sec->size = off; + } + if (kcfg_sec) { sec = kcfg_sec; /* for kcfg externs calculate their offsets within a .kconfig map */ @@ -2924,7 +2996,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj) ext->kcfg.data_off = roundup(off, ext->kcfg.align); off = ext->kcfg.data_off + ext->kcfg.sz; - pr_debug("extern #%d (kcfg): symbol %d, off %u, name %s\n", + pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", i, ext->sym_idx, ext->kcfg.data_off, ext->name); } sec->size = off; @@ -5022,9 +5094,14 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) break; case RELO_EXTERN: ext = &obj->externs[relo->sym_off]; - insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; - insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; - insn[1].imm = ext->kcfg.data_off; + if (ext->type == EXT_KCFG) { + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; + insn[1].imm = ext->kcfg.data_off; + } else /* EXT_KSYM */ { + insn[0].imm = (__u32)ext->ksym.addr; + insn[1].imm = ext->ksym.addr >> 32; + } break; case RELO_CALL: err = bpf_program__reloc_text(prog, obj, relo); @@ -5643,10 +5720,58 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) return 0; } +static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +{ + char sym_type, sym_name[500]; + unsigned long long sym_addr; + struct extern_desc *ext; + int ret, err = 0; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (!f) { + err = -errno; + pr_warn("failed to open /proc/kallsyms: %d\n", err); + return err; + } + + while (true) { + ret = fscanf(f, "%llx %c %499s%*[^\n]\n", + &sym_addr, &sym_type, sym_name); + if (ret == EOF && feof(f)) + break; + if (ret != 3) { + pr_warn("failed to read kallasyms entry: %d\n", ret); + err = -EINVAL; + goto out; + } + + ext = find_extern_by_name(obj, sym_name); + if (!ext || ext->type != EXT_KSYM) + continue; + + if (ext->is_set && ext->ksym.addr != sym_addr) { + pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", + sym_name, ext->ksym.addr, sym_addr); + err = -EINVAL; + goto out; + } + if (!ext->is_set) { + ext->is_set = true; + ext->ksym.addr = sym_addr; + pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); + } + } + +out: + fclose(f); + return err; +} + static int bpf_object__resolve_externs(struct bpf_object *obj, const char *extra_kconfig) { - bool need_config = false; + bool need_config = false, need_kallsyms = false; struct extern_desc *ext; void *kcfg_data = NULL; int err, i; @@ -5676,6 +5801,8 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, } else if (ext->type == EXT_KCFG && strncmp(ext->name, "CONFIG_", 7) == 0) { need_config = true; + } else if (ext->type == EXT_KSYM) { + need_kallsyms = true; } else { pr_warn("unrecognized extern '%s'\n", ext->name); return -EINVAL; @@ -5699,6 +5826,11 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, if (err) return -EINVAL; } + if (need_kallsyms) { + err = bpf_object__read_kallsyms_file(obj); + if (err) + return -EINVAL; + } for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; -- cgit v1.2.3 From bd9bedf84b87289b9a87eebfe7917e54373e99f9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:17:01 -0700 Subject: libbpf: Wrap source argument of BPF_CORE_READ macro in parentheses Wrap source argument of BPF_CORE_READ family of macros into parentheses to allow uses like this: BPF_CORE_READ((struct cast_struct *)src, a, b, c); Fixes: 7db3822ab991 ("libbpf: Add BPF_CORE_READ/BPF_CORE_READ_INTO helpers") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200619231703.738941-8-andriin@fb.com --- tools/lib/bpf/bpf_core_read.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index 7009dc90e012..eae5cccff761 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -217,7 +217,7 @@ enum bpf_field_info_kind { */ #define BPF_CORE_READ_INTO(dst, src, a, ...) \ ({ \ - ___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__) \ + ___core_read(bpf_core_read, dst, (src), a, ##__VA_ARGS__) \ }) /* @@ -227,7 +227,7 @@ enum bpf_field_info_kind { */ #define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \ ({ \ - ___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \ + ___core_read(bpf_core_read_str, dst, (src), a, ##__VA_ARGS__)\ }) /* @@ -254,8 +254,8 @@ enum bpf_field_info_kind { */ #define BPF_CORE_READ(src, a, ...) \ ({ \ - ___type(src, a, ##__VA_ARGS__) __r; \ - BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__); \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ __r; \ }) -- cgit v1.2.3 From 135c783f4794fbdeace4a969dea6eabd27f8a501 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 23 Jun 2020 09:42:07 +0100 Subject: libbpf: Fix spelling mistake "kallasyms" -> "kallsyms" There is a spelling mistake in a pr_warn message. Fix it. Signed-off-by: Colin Ian King Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200623084207.149253-1-colin.king@canonical.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 18461deb1b19..deea27aadcef 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5741,7 +5741,7 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj) if (ret == EOF && feof(f)) break; if (ret != 3) { - pr_warn("failed to read kallasyms entry: %d\n", ret); + pr_warn("failed to read kallsyms entry: %d\n", ret); err = -EINVAL; goto out; } -- cgit v1.2.3 From 192b6638eea5d40c99964291671fc0371b858f6e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 23 Jun 2020 21:38:05 -0700 Subject: libbpf: Prevent loading vmlinux BTF twice Prevent loading/parsing vmlinux BTF twice in some cases: for CO-RE relocations and for BTF-aware hooks (tp_btf, fentry/fexit, etc). Fixes: a6ed02cac690 ("libbpf: Load btf_vmlinux only once per object.") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200624043805.1794620-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index deea27aadcef..6b4955d170ff 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2504,22 +2504,31 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) { + bool need_vmlinux_btf = false; struct bpf_program *prog; int err; + /* CO-RE relocations need kernel BTF */ + if (obj->btf_ext && obj->btf_ext->field_reloc_info.len) + need_vmlinux_btf = true; + bpf_object__for_each_program(prog, obj) { if (libbpf_prog_needs_vmlinux_btf(prog)) { - obj->btf_vmlinux = libbpf_find_kernel_btf(); - if (IS_ERR(obj->btf_vmlinux)) { - err = PTR_ERR(obj->btf_vmlinux); - pr_warn("Error loading vmlinux BTF: %d\n", err); - obj->btf_vmlinux = NULL; - return err; - } - return 0; + need_vmlinux_btf = true; + break; } } + if (!need_vmlinux_btf) + return 0; + + obj->btf_vmlinux = libbpf_find_kernel_btf(); + if (IS_ERR(obj->btf_vmlinux)) { + err = PTR_ERR(obj->btf_vmlinux); + pr_warn("Error loading vmlinux BTF: %d\n", err); + obj->btf_vmlinux = NULL; + return err; + } return 0; } @@ -4945,8 +4954,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) if (targ_btf_path) targ_btf = btf__parse_elf(targ_btf_path, NULL); else - targ_btf = libbpf_find_kernel_btf(); - if (IS_ERR(targ_btf)) { + targ_btf = obj->btf_vmlinux; + if (IS_ERR_OR_NULL(targ_btf)) { pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf)); return PTR_ERR(targ_btf); } @@ -4987,7 +4996,9 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) } out: - btf__free(targ_btf); + /* obj->btf_vmlinux is freed at the end of object load phase */ + if (targ_btf != obj->btf_vmlinux) + btf__free(targ_btf); if (!IS_ERR_OR_NULL(cand_cache)) { hashmap__for_each_entry(cand_cache, entry, i) { bpf_core_free_cands(entry->value); -- cgit v1.2.3 From d929758101fc0674008169dc1de963e3181c587b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 25 Jun 2020 16:26:28 -0700 Subject: libbpf: Support disabling auto-loading BPF programs Currently, bpf_object__load() (and by induction skeleton's load), will always attempt to prepare, relocate, and load into kernel every single BPF program found inside the BPF object file. This is often convenient and the right thing to do and what users expect. But there are plenty of cases (especially with BPF development constantly picking up the pace), where BPF application is intended to work with old kernels, with potentially reduced set of features. But on kernels supporting extra features, it would like to take a full advantage of them, by employing extra BPF program. This could be a choice of using fentry/fexit over kprobe/kretprobe, if kernel is recent enough and is built with BTF. Or BPF program might be providing optimized bpf_iter-based solution that user-space might want to use, whenever available. And so on. With libbpf and BPF CO-RE in particular, it's advantageous to not have to maintain two separate BPF object files to achieve this. So to enable such use cases, this patch adds ability to request not auto-loading chosen BPF programs. In such case, libbpf won't attempt to perform relocations (which might fail due to old kernel), won't try to resolve BTF types for BTF-aware (tp_btf/fentry/fexit/etc) program types, because BTF might not be present, and so on. Skeleton will also automatically skip auto-attachment step for such not loaded BPF programs. Overall, this feature allows to simplify development and deployment of real-world BPF applications with complicated compatibility requirements. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200625232629.3444003-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 48 ++++++++++++++++++++++++++++++++++++++++-------- tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 2 ++ 3 files changed, 44 insertions(+), 8 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6b4955d170ff..4ea7f4f1a691 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -230,6 +230,7 @@ struct bpf_program { struct bpf_insn *insns; size_t insns_cnt, main_prog_cnt; enum bpf_prog_type type; + bool load; struct reloc_desc *reloc_desc; int nr_reloc; @@ -541,6 +542,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx, prog->instances.fds = NULL; prog->instances.nr = -1; prog->type = BPF_PROG_TYPE_UNSPEC; + prog->load = true; return 0; errout: @@ -2513,6 +2515,8 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) need_vmlinux_btf = true; bpf_object__for_each_program(prog, obj) { + if (!prog->load) + continue; if (libbpf_prog_needs_vmlinux_btf(prog)) { need_vmlinux_btf = true; break; @@ -5445,6 +5449,12 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) { int err = 0, fd, i, btf_id; + if (prog->obj->loaded) { + pr_warn("prog '%s'('%s'): can't load after object was loaded\n", + prog->name, prog->section_name); + return -EINVAL; + } + if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { @@ -5533,16 +5543,21 @@ static bool bpf_program__is_function_storage(const struct bpf_program *prog, static int bpf_object__load_progs(struct bpf_object *obj, int log_level) { + struct bpf_program *prog; size_t i; int err; for (i = 0; i < obj->nr_programs; i++) { - if (bpf_program__is_function_storage(&obj->programs[i], obj)) + prog = &obj->programs[i]; + if (bpf_program__is_function_storage(prog, obj)) continue; - obj->programs[i].log_level |= log_level; - err = bpf_program__load(&obj->programs[i], - obj->license, - obj->kern_version); + if (!prog->load) { + pr_debug("prog '%s'('%s'): skipped loading\n", + prog->name, prog->section_name); + continue; + } + prog->log_level |= log_level; + err = bpf_program__load(prog, obj->license, obj->kern_version); if (err) return err; } @@ -5869,12 +5884,10 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) return -EINVAL; if (obj->loaded) { - pr_warn("object should not be loaded twice\n"); + pr_warn("object '%s': load can't be attempted twice\n", obj->name); return -EINVAL; } - obj->loaded = true; - err = bpf_object__probe_loading(obj); err = err ? : bpf_object__probe_caps(obj); err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); @@ -5889,6 +5902,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) btf__free(obj->btf_vmlinux); obj->btf_vmlinux = NULL; + obj->loaded = true; /* doesn't matter if successfully or not */ + if (err) goto out; @@ -6661,6 +6676,20 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) return title; } +bool bpf_program__autoload(const struct bpf_program *prog) +{ + return prog->load; +} + +int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) +{ + if (prog->obj->loaded) + return -EINVAL; + + prog->load = autoload; + return 0; +} + int bpf_program__fd(const struct bpf_program *prog) { return bpf_program__nth_fd(prog, 0); @@ -9283,6 +9312,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) const struct bpf_sec_def *sec_def; const char *sec_name = bpf_program__title(prog, false); + if (!prog->load) + continue; + sec_def = find_sec_def(sec_name); if (!sec_def || !sec_def->attach_fn) continue; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index fdd279fb1866..2335971ed0bd 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -200,6 +200,8 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy); +LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); /* returns program size in bytes */ LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 9914e0db4859..6544d2cd1ed6 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -286,4 +286,6 @@ LIBBPF_0.1.0 { bpf_map__set_value_size; bpf_map__type; bpf_map__value_size; + bpf_program__autoload; + bpf_program__set_autoload; } LIBBPF_0.0.9; -- cgit v1.2.3 From 30ad688094bcfe8721bfd4003f6a20c9b6ddf964 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 30 Jun 2020 08:21:24 -0700 Subject: libbpf: Make bpf_endian co-exist with vmlinux.h Make bpf_endian.h compatible with vmlinux.h. It is a frequent request from users wanting to use bpf_endian.h in their BPF applications using CO-RE and vmlinux.h. To achieve that, re-implement byte swap macros and drop all the header includes. This way it can be used both with linux header includes, as well as with a vmlinux.h. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200630152125.3631920-2-andriin@fb.com --- tools/lib/bpf/bpf_endian.h | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_endian.h b/tools/lib/bpf/bpf_endian.h index fbe28008450f..ec9db4feca9f 100644 --- a/tools/lib/bpf/bpf_endian.h +++ b/tools/lib/bpf/bpf_endian.h @@ -2,8 +2,35 @@ #ifndef __BPF_ENDIAN__ #define __BPF_ENDIAN__ -#include -#include +/* + * Isolate byte #n and put it into byte #m, for __u##b type. + * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: + * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx + * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 + * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn + * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 + */ +#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) + +#define ___bpf_swab16(x) ((__u16)( \ + ___bpf_mvb(x, 16, 0, 1) | \ + ___bpf_mvb(x, 16, 1, 0))) + +#define ___bpf_swab32(x) ((__u32)( \ + ___bpf_mvb(x, 32, 0, 3) | \ + ___bpf_mvb(x, 32, 1, 2) | \ + ___bpf_mvb(x, 32, 2, 1) | \ + ___bpf_mvb(x, 32, 3, 0))) + +#define ___bpf_swab64(x) ((__u64)( \ + ___bpf_mvb(x, 64, 0, 7) | \ + ___bpf_mvb(x, 64, 1, 6) | \ + ___bpf_mvb(x, 64, 2, 5) | \ + ___bpf_mvb(x, 64, 3, 4) | \ + ___bpf_mvb(x, 64, 4, 3) | \ + ___bpf_mvb(x, 64, 5, 2) | \ + ___bpf_mvb(x, 64, 6, 1) | \ + ___bpf_mvb(x, 64, 7, 0))) /* LLVM's BPF target selects the endianness of the CPU * it compiles on, or the user specifies (bpfel/bpfeb), @@ -23,16 +50,16 @@ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define __bpf_ntohs(x) __builtin_bswap16(x) # define __bpf_htons(x) __builtin_bswap16(x) -# define __bpf_constant_ntohs(x) ___constant_swab16(x) -# define __bpf_constant_htons(x) ___constant_swab16(x) +# define __bpf_constant_ntohs(x) ___bpf_swab16(x) +# define __bpf_constant_htons(x) ___bpf_swab16(x) # define __bpf_ntohl(x) __builtin_bswap32(x) # define __bpf_htonl(x) __builtin_bswap32(x) -# define __bpf_constant_ntohl(x) ___constant_swab32(x) -# define __bpf_constant_htonl(x) ___constant_swab32(x) +# define __bpf_constant_ntohl(x) ___bpf_swab32(x) +# define __bpf_constant_htonl(x) ___bpf_swab32(x) # define __bpf_be64_to_cpu(x) __builtin_bswap64(x) # define __bpf_cpu_to_be64(x) __builtin_bswap64(x) -# define __bpf_constant_be64_to_cpu(x) ___constant_swab64(x) -# define __bpf_constant_cpu_to_be64(x) ___constant_swab64(x) +# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) +# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define __bpf_ntohs(x) (x) # define __bpf_htons(x) (x) -- cgit v1.2.3 From e8b012e9fabe7eafc1b2c72414e174547683860d Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:26 -0700 Subject: libbpf: Add support for BPF_CGROUP_INET_SOCK_RELEASE Add auto-detection for the cgroup/sock_release programs. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-3-sdf@google.com --- tools/include/uapi/linux/bpf.h | 1 + tools/lib/bpf/libbpf.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'tools/lib') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index da9bf35a26f8..548a749aebb3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -226,6 +226,7 @@ enum bpf_attach_type { BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, __MAX_BPF_ATTACH_TYPE }; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4ea7f4f1a691..88a483627a2b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6917,6 +6917,10 @@ static const struct bpf_sec_def section_defs[] = { BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS), BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), + BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_CREATE), + BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_RELEASE), BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE), BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, -- cgit v1.2.3 From bfc96656a766bd0566565d8a7b6232b2b036fdfb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:13 -0700 Subject: libbpf: Make BTF finalization strict With valid ELF and valid BTF, there is no reason (apart from bugs) why BTF finalization should fail. So make it strict and return error if it fails. This makes CO-RE relocation more reliable, as they are not going to be just silently skipped, if BTF finalization failed. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 88a483627a2b..efd857ebd5ee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2473,19 +2473,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) return 0; err = btf__finalize_data(obj, obj->btf); - if (!err) - return 0; - - pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - - if (libbpf_needs_btf(obj)) { - pr_warn("BTF is required, but is missing or corrupted.\n"); - return -ENOENT; + if (err) { + pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); + return err; } + return 0; } -- cgit v1.2.3 From 81372e121802fd57892a0b44d93cc747d9568627 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:14 -0700 Subject: libbpf: Add btf__set_fd() for more control over loaded BTF FD Add setter for BTF FD to allow application more fine-grained control in more advanced scenarios. Storing BTF FD inside `struct btf` provides little benefit and probably would be better done differently (e.g., btf__load() could just return FD on success), but we are stuck with this due to backwards compatibility. The main problem is that it's impossible to load BTF and than free user-space memory, but keep FD intact, because `struct btf` assumes ownership of that FD upon successful load and will attempt to close it during btf__free(). To allow callers (e.g., libbpf itself for BTF sanitization) to have more control over this, add btf__set_fd() to allow to reset FD arbitrarily, if necessary. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-3-andriin@fb.com --- tools/lib/bpf/btf.c | 7 ++++++- tools/lib/bpf/btf.h | 1 + tools/lib/bpf/libbpf.map | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index bfef3d606b54..c8861c9e3635 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -389,7 +389,7 @@ void btf__free(struct btf *btf) if (!btf) return; - if (btf->fd != -1) + if (btf->fd >= 0) close(btf->fd); free(btf->data); @@ -700,6 +700,11 @@ int btf__fd(const struct btf *btf) return btf->fd; } +void btf__set_fd(struct btf *btf, int fd) +{ + btf->fd = fd; +} + const void *btf__get_raw_data(const struct btf *btf, __u32 *size) { *size = btf->data_size; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 06cd1731c154..173eff23c472 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -79,6 +79,7 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__fd(const struct btf *btf); +LIBBPF_API void btf__set_fd(struct btf *btf, int fd); LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6544d2cd1ed6..c5d5c7664c3b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -288,4 +288,5 @@ LIBBPF_0.1.0 { bpf_map__value_size; bpf_program__autoload; bpf_program__set_autoload; + btf__set_fd; } LIBBPF_0.0.9; -- cgit v1.2.3 From 0f0e55d8247c0742a9b026fc097fcc605383b9db Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:15 -0700 Subject: libbpf: Improve BTF sanitization handling Change sanitization process to preserve original BTF, which might be used by libbpf itself for Kconfig externs, CO-RE relocs, etc, even if kernel is old and doesn't support BTF. To achieve that, if libbpf detects the need for BTF sanitization, it would clone original BTF, sanitize it in-place, attempt to load it into kernel, and if successful, will preserve loaded BTF FD in original `struct btf`, while freeing sanitized local copy. If kernel doesn't support any BTF, original btf and btf_ext will still be preserved to be used later for CO-RE relocation and other BTF-dependent libbpf features, which don't dependon kernel BTF support. Patch takes care to not specify BTF and BTF.ext features when loading BPF programs and/or maps, if it was detected that kernel doesn't support BTF features. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-4-andriin@fb.com --- tools/lib/bpf/libbpf.c | 103 ++++++++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 45 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index efd857ebd5ee..460e5790a95f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2338,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx) return false; } -static void bpf_object__sanitize_btf(struct bpf_object *obj) +static bool btf_needs_sanitization(struct bpf_object *obj) +{ + bool has_func_global = obj->caps.btf_func_global; + bool has_datasec = obj->caps.btf_datasec; + bool has_func = obj->caps.btf_func; + + return !has_func || !has_datasec || !has_func_global; +} + +static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) { bool has_func_global = obj->caps.btf_func_global; bool has_datasec = obj->caps.btf_datasec; bool has_func = obj->caps.btf_func; - struct btf *btf = obj->btf; struct btf_type *t; int i, j, vlen; - if (!obj->btf || (has_func && has_datasec && has_func_global)) - return; - for (i = 1; i <= btf__get_nr_types(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); @@ -2402,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj) } } -static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) -{ - if (!obj->btf_ext) - return; - - if (!obj->caps.btf_func) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } -} - static bool libbpf_needs_btf(const struct bpf_object *obj) { return obj->efile.btf_maps_shndx >= 0 || @@ -2530,30 +2524,50 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) { + struct btf *kern_btf = obj->btf; + bool btf_mandatory, sanitize; int err = 0; if (!obj->btf) return 0; - bpf_object__sanitize_btf(obj); - bpf_object__sanitize_btf_ext(obj); + sanitize = btf_needs_sanitization(obj); + if (sanitize) { + const void *orig_data; + void *san_data; + __u32 sz; - err = btf__load(obj->btf); - if (err) { - pr_warn("Error loading %s into kernel: %d.\n", - BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - /* btf_ext can't exist without btf, so free it as well */ - if (obj->btf_ext) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } + /* clone BTF to sanitize a copy and leave the original intact */ + orig_data = btf__get_raw_data(obj->btf, &sz); + san_data = malloc(sz); + if (!san_data) + return -ENOMEM; + memcpy(san_data, orig_data, sz); + kern_btf = btf__new(san_data, sz); + if (IS_ERR(kern_btf)) + return PTR_ERR(kern_btf); - if (kernel_needs_btf(obj)) - return err; + bpf_object__sanitize_btf(obj, kern_btf); } - return 0; + + err = btf__load(kern_btf); + if (sanitize) { + if (!err) { + /* move fd to libbpf's BTF */ + btf__set_fd(obj->btf, btf__fd(kern_btf)); + btf__set_fd(kern_btf, -1); + } + btf__free(kern_btf); + } + if (err) { + btf_mandatory = kernel_needs_btf(obj); + pr_warn("Error loading .BTF into kernel: %d. %s\n", err, + btf_mandatory ? "BTF is mandatory, can't proceed." + : "BTF is optional, ignoring."); + if (!btf_mandatory) + err = 0; + } + return err; } static int bpf_object__elf_collect(struct bpf_object *obj) @@ -3777,7 +3791,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; - if (obj->btf && !bpf_map_find_btf_info(obj, map)) { + if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; @@ -5361,18 +5375,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; } - /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ - if (prog->obj->btf_ext) - btf_fd = bpf_object__btf_fd(prog->obj); - else - btf_fd = -1; - load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; - load_attr.func_info = prog->func_info; - load_attr.func_info_rec_size = prog->func_info_rec_size; - load_attr.func_info_cnt = prog->func_info_cnt; - load_attr.line_info = prog->line_info; - load_attr.line_info_rec_size = prog->line_info_rec_size; - load_attr.line_info_cnt = prog->line_info_cnt; + /* specify func_info/line_info only if kernel supports them */ + btf_fd = bpf_object__btf_fd(prog->obj); + if (btf_fd >= 0 && prog->obj->caps.btf_func) { + load_attr.prog_btf_fd = btf_fd; + load_attr.func_info = prog->func_info; + load_attr.func_info_rec_size = prog->func_info_rec_size; + load_attr.func_info_cnt = prog->func_info_cnt; + load_attr.line_info = prog->line_info; + load_attr.line_info_rec_size = prog->line_info_rec_size; + load_attr.line_info_cnt = prog->line_info_cnt; + } load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; -- cgit v1.2.3 From 0e289487308236903b19273f2ddb4f0adf732b9e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:17 -0700 Subject: libbpf: Handle missing BPF_OBJ_GET_INFO_BY_FD gracefully in perf_buffer perf_buffer__new() is relying on BPF_OBJ_GET_INFO_BY_FD availability for few sanity checks. OBJ_GET_INFO for maps is actually much more recent feature than perf_buffer support itself, so this causes unnecessary problems on old kernels before BPF_OBJ_GET_INFO_BY_FD was added. This patch makes those sanity checks optional and just assumes best if command is not supported. If user specified something incorrectly (e.g., wrong map type), kernel will reject it later anyway, except user won't get a nice explanation as to why it failed. This seems like a good trade off for supporting perf_buffer on old kernels. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-6-andriin@fb.com --- tools/lib/bpf/libbpf.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 460e5790a95f..6602eb479596 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8591,7 +8591,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p) { const char *online_cpus_file = "/sys/devices/system/cpu/online"; - struct bpf_map_info map = {}; + struct bpf_map_info map; char msg[STRERR_BUFSIZE]; struct perf_buffer *pb; bool *online = NULL; @@ -8604,19 +8604,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, return ERR_PTR(-EINVAL); } + /* best-effort sanity checks */ + memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; - pr_warn("failed to get map info for map FD %d: %s\n", - map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); - return ERR_PTR(err); - } - - if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { - pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", - map.name); - return ERR_PTR(-EINVAL); + /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return + * -EBADFD, -EFAULT, or -E2BIG on real error + */ + if (err != -EINVAL) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); + return ERR_PTR(err); + } + pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", + map_fd); + } else { + if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", + map.name); + return ERR_PTR(-EINVAL); + } } pb = calloc(1, sizeof(*pb)); @@ -8648,7 +8657,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = pb->cpu_cnt; goto error; } - if (map.max_entries < pb->cpu_cnt) + if (map.max_entries && map.max_entries < pb->cpu_cnt) pb->cpu_cnt = map.max_entries; } -- cgit v1.2.3 From 5c3320d7fece4612d4a413aa3c8e82cdb5b49fcb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 9 Jul 2020 18:10:23 -0700 Subject: libbpf: Fix memory leak and optimize BTF sanitization Coverity's static analysis helpfully reported a memory leak introduced by 0f0e55d8247c ("libbpf: Improve BTF sanitization handling"). While fixing it, I realized that btf__new() already creates a memory copy, so there is no need to do this. So this patch also fixes misleading btf__new() signature to make data into a `const void *` input parameter. And it avoids unnecessary memory allocation and copy in BTF sanitization code altogether. Fixes: 0f0e55d8247c ("libbpf: Improve BTF sanitization handling") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200710011023.1655008-1-andriin@fb.com --- tools/lib/bpf/btf.c | 2 +- tools/lib/bpf/btf.h | 2 +- tools/lib/bpf/libbpf.c | 11 +++-------- 3 files changed, 5 insertions(+), 10 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index c8861c9e3635..c9e760e120dc 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -397,7 +397,7 @@ void btf__free(struct btf *btf) free(btf); } -struct btf *btf__new(__u8 *data, __u32 size) +struct btf *btf__new(const void *data, __u32 size) { struct btf *btf; int err; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 173eff23c472..a3b7ef9b737f 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -63,7 +63,7 @@ struct btf_ext_header { }; LIBBPF_API void btf__free(struct btf *btf); -LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); +LIBBPF_API struct btf *btf__new(const void *data, __u32 size); LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6602eb479596..25e4f77be8d7 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2533,17 +2533,12 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) sanitize = btf_needs_sanitization(obj); if (sanitize) { - const void *orig_data; - void *san_data; + const void *raw_data; __u32 sz; /* clone BTF to sanitize a copy and leave the original intact */ - orig_data = btf__get_raw_data(obj->btf, &sz); - san_data = malloc(sz); - if (!san_data) - return -ENOMEM; - memcpy(san_data, orig_data, sz); - kern_btf = btf__new(san_data, sz); + raw_data = btf__get_raw_data(obj->btf, &sz); + kern_btf = btf__new(raw_data, sz); if (IS_ERR(kern_btf)) return PTR_ERR(kern_btf); -- cgit v1.2.3 From 7c819e701382d4969ca4837b8cbe157895f5d0bf Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 13 Jul 2020 16:24:08 -0700 Subject: libbpf: Support stripping modifiers for btf_dump One important use case when emitting const/volatile/restrict is undesirable is BPF skeleton generation of DATASEC layout. These are further memory-mapped and can be written/read from user-space directly. For important case of .rodata variables, bpftool strips away first-level modifiers, to make their use on user-space side simple and not requiring extra type casts to override compiler complaining about writing to const variables. This logic works mostly fine, but breaks in some more complicated cases. E.g.: const volatile int params[10]; Because in BTF it's a chain of ARRAY -> CONST -> VOLATILE -> INT, bpftool stops at ARRAY and doesn't strip CONST and VOLATILE. In skeleton this variable will be emitted as is. So when used from user-space, compiler will complain about writing to const array. This is problematic, as also mentioned in [0]. To solve this for arrays and other non-trivial cases (e.g., inner const/volatile fields inside the struct), teach btf_dump to strip away any modifier, when requested. This is done as an extra option on btf_dump__emit_type_decl() API. Reported-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200713232409.3062144-2-andriin@fb.com --- tools/lib/bpf/btf.h | 2 ++ tools/lib/bpf/btf_dump.c | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index a3b7ef9b737f..227d3f844801 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -144,6 +144,8 @@ struct btf_dump_emit_type_decl_opts { * necessary indentation already */ int indent_level; + /* strip all the const/volatile/restrict mods */ + bool strip_mods; }; #define btf_dump_emit_type_decl_opts__last_field indent_level diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index bbb430317260..e1c344504cae 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -60,6 +60,7 @@ struct btf_dump { const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; struct btf_dump_opts opts; + bool strip_mods; /* per-type auxiliary state */ struct btf_dump_type_aux_state *type_states; @@ -1032,7 +1033,9 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, fname = OPTS_GET(opts, field_name, ""); lvl = OPTS_GET(opts, indent_level, 0); + d->strip_mods = OPTS_GET(opts, strip_mods, false); btf_dump_emit_type_decl(d, id, fname, lvl); + d->strip_mods = false; return 0; } @@ -1045,6 +1048,10 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, stack_start = d->decl_stack_cnt; for (;;) { + t = btf__type_by_id(d->btf, id); + if (d->strip_mods && btf_is_mod(t)) + goto skip_mod; + err = btf_dump_push_decl_stack_id(d, id); if (err < 0) { /* @@ -1056,12 +1063,11 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, d->decl_stack_cnt = stack_start; return; } - +skip_mod: /* VOID */ if (id == 0) break; - t = btf__type_by_id(d->btf, id); switch (btf_kind(t)) { case BTF_KIND_PTR: case BTF_KIND_VOLATILE: -- cgit v1.2.3 From 0b20933d8cfe2bf6473e9b581b5d1ed9a2117ecc Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 13 Jul 2020 16:24:09 -0700 Subject: tools/bpftool: Strip away modifiers from global variables Reliably remove all the type modifiers from read-only (.rodata) global variable definitions, including cases of inner field const modifiers and arrays of const values. Also modify one of selftests to ensure that const volatile struct doesn't prevent user-space from modifying .rodata variable. Fixes: 985ead416df3 ("bpftool: Add skeleton codegen command") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200713232409.3062144-3-andriin@fb.com --- tools/bpf/bpftool/gen.c | 23 ++++++++++------------- tools/lib/bpf/btf.h | 2 +- tools/testing/selftests/bpf/prog_tests/skeleton.c | 6 +++--- tools/testing/selftests/bpf/progs/test_skeleton.c | 6 ++++-- 4 files changed, 18 insertions(+), 19 deletions(-) (limited to 'tools/lib') diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 10de76b296ba..b59d26e89367 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -88,7 +88,7 @@ static const char *get_map_ident(const struct bpf_map *map) return NULL; } -static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args) +static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) { vprintf(fmt, args); } @@ -104,17 +104,20 @@ static int codegen_datasec_def(struct bpf_object *obj, int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); const char *sec_ident; char var_ident[256]; + bool strip_mods = false; - if (strcmp(sec_name, ".data") == 0) + if (strcmp(sec_name, ".data") == 0) { sec_ident = "data"; - else if (strcmp(sec_name, ".bss") == 0) + } else if (strcmp(sec_name, ".bss") == 0) { sec_ident = "bss"; - else if (strcmp(sec_name, ".rodata") == 0) + } else if (strcmp(sec_name, ".rodata") == 0) { sec_ident = "rodata"; - else if (strcmp(sec_name, ".kconfig") == 0) + strip_mods = true; + } else if (strcmp(sec_name, ".kconfig") == 0) { sec_ident = "kconfig"; - else + } else { return 0; + } printf(" struct %s__%s {\n", obj_name, sec_ident); for (i = 0; i < vlen; i++, sec_var++) { @@ -123,16 +126,10 @@ static int codegen_datasec_def(struct bpf_object *obj, DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .field_name = var_ident, .indent_level = 2, + .strip_mods = strip_mods, ); int need_off = sec_var->offset, align_off, align; __u32 var_type_id = var->type; - const struct btf_type *t; - - t = btf__type_by_id(btf, var_type_id); - while (btf_is_mod(t)) { - var_type_id = t->type; - t = btf__type_by_id(btf, var_type_id); - } if (off > need_off) { p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 227d3f844801..491c7b41ffdc 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -147,7 +147,7 @@ struct btf_dump_emit_type_decl_opts { /* strip all the const/volatile/restrict mods */ bool strip_mods; }; -#define btf_dump_emit_type_decl_opts__last_field indent_level +#define btf_dump_emit_type_decl_opts__last_field strip_mods LIBBPF_API int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c index fa153cf67b1b..fe87b77af459 100644 --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -41,7 +41,7 @@ void test_skeleton(void) CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL); CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL); - CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0); + CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); /* validate we can pre-setup global variables, even in .bss */ @@ -49,7 +49,7 @@ void test_skeleton(void) data->in2 = 11; bss->in3 = 12; bss->in4 = 13; - rodata->in6 = 14; + rodata->in.in6 = 14; err = test_skeleton__load(skel); if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) @@ -60,7 +60,7 @@ void test_skeleton(void) CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL); CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12); CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); - CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14); + CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); /* now set new values and attach to get them into outX variables */ data->in1 = 1; diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c index 77ae86f44db5..374ccef704e1 100644 --- a/tools/testing/selftests/bpf/progs/test_skeleton.c +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -20,7 +20,9 @@ long long in4 __attribute__((aligned(64))) = 0; struct s in5 = {}; /* .rodata section */ -const volatile int in6 = 0; +const volatile struct { + const int in6; +} in = {}; /* .data section */ int out1 = -1; @@ -46,7 +48,7 @@ int handler(const void *ctx) out3 = in3; out4 = in4; out5 = in5; - out6 = in6; + out6 = in.in6; bpf_syscall = CONFIG_BPF_SYSCALL; kern_ver = LINUX_KERNEL_VERSION; -- cgit v1.2.3 From 4be556cf5aef416904743f2cee689351f91445b6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:40 +0200 Subject: libbpf: Add SEC name for xdp programs attached to CPUMAP As for DEVMAP, support SEC("xdp_cpumap/") as a short cut for loading the program with type BPF_PROG_TYPE_XDP and expected attach type BPF_XDP_CPUMAP. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/33174c41993a6d860d9c7c1f280a2477ee39ed11.1594734381.git.lorenzo@kernel.org --- tools/lib/bpf/libbpf.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4489f95f1d1a..f55fd8a5c008 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6912,6 +6912,8 @@ static const struct bpf_sec_def section_defs[] = { .attach_fn = attach_iter), BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, BPF_XDP_DEVMAP), + BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, + BPF_XDP_CPUMAP), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), -- cgit v1.2.3 From 499dd29d90bb78d4ac4f0d6648e26f4a339829b1 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:33 +0200 Subject: libbpf: Add support for SK_LOOKUP program type Make libbpf aware of the newly added program type, and assign it a section name. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-13-jakub@cloudflare.com --- tools/lib/bpf/libbpf.c | 3 +++ tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 2 ++ tools/lib/bpf/libbpf_probes.c | 3 +++ 4 files changed, 10 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f55fd8a5c008..846164c79df1 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6799,6 +6799,7 @@ BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); +BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); enum bpf_attach_type bpf_program__get_expected_attach_type(struct bpf_program *prog) @@ -6981,6 +6982,8 @@ static const struct bpf_sec_def section_defs[] = { BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT), BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS), + BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP, + BPF_SK_LOOKUP), }; #undef BPF_PROG_SEC_IMPL diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 2335971ed0bd..c2272132e929 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -350,6 +350,7 @@ LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, @@ -377,6 +378,7 @@ LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c5d5c7664c3b..6f0856abe299 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -287,6 +287,8 @@ LIBBPF_0.1.0 { bpf_map__type; bpf_map__value_size; bpf_program__autoload; + bpf_program__is_sk_lookup; bpf_program__set_autoload; + bpf_program__set_sk_lookup; btf__set_fd; } LIBBPF_0.0.9; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 10cd8d1891f5..5a3d3f078408 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -78,6 +78,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; break; + case BPF_PROG_TYPE_SK_LOOKUP: + xattr.expected_attach_type = BPF_SK_LOOKUP; + break; case BPF_PROG_TYPE_KPROBE: xattr.kern_version = get_kernel_version(); break; -- cgit v1.2.3 From da7a35062bcccd3f67779a357065ba707b02ff2b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Sun, 19 Jul 2020 23:17:41 -0700 Subject: libbpf bpf_helpers: Use __builtin_offsetof for offsetof The non-builtin route for offsetof has a dependency on size_t from stdlib.h/stdint.h that is undeclared and may break targets. The offsetof macro in bpf_helpers may disable the same macro in other headers that have a #ifdef offsetof guard. Rather than add additional dependencies improve the offsetof macro declared here to use the builtin that is available since llvm 3.7 (the first with a BPF backend). Signed-off-by: Ian Rogers Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200720061741.1514673-1-irogers@google.com --- tools/lib/bpf/bpf_helpers.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index a510d8ed716f..bc14db706b88 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -40,7 +40,7 @@ * Helper macro to manipulate data structures */ #ifndef offsetof -#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) +#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) #endif #ifndef container_of #define container_of(ptr, type, member) \ -- cgit v1.2.3 From cd31039a7347610863aa8b77a9162048999723d0 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:17 -0700 Subject: tools/libbpf: Add support for bpf map element iterator Add map_fd to bpf_iter_attach_opts and flags to bpf_link_create_opts. Later on, bpftool or selftest will be able to create a bpf map element iterator by passing map_fd to the kernel during link creation time. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184117.590673-1-yhs@fb.com --- tools/lib/bpf/bpf.c | 1 + tools/lib/bpf/bpf.h | 3 ++- tools/lib/bpf/libbpf.c | 10 +++++++++- tools/lib/bpf/libbpf.h | 3 ++- 4 files changed, 14 insertions(+), 3 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index a7329b671c41..e1bdf214f75f 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -598,6 +598,7 @@ int bpf_link_create(int prog_fd, int target_fd, attr.link_create.prog_fd = prog_fd; attr.link_create.target_fd = target_fd; attr.link_create.attach_type = attach_type; + attr.link_create.flags = OPTS_GET(opts, flags, 0); return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); } diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index dbef24ebcfcb..6d367e01d05e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -170,8 +170,9 @@ LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, struct bpf_link_create_opts { size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; }; -#define bpf_link_create_opts__last_field sz +#define bpf_link_create_opts__last_field flags LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 846164c79df1..a05aa7e2bab6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8282,13 +8282,20 @@ struct bpf_link * bpf_program__attach_iter(struct bpf_program *prog, const struct bpf_iter_attach_opts *opts) { + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, link_fd; + __u32 target_fd = 0; if (!OPTS_VALID(opts, bpf_iter_attach_opts)) return ERR_PTR(-EINVAL); + if (OPTS_HAS(opts, map_fd)) { + target_fd = opts->map_fd; + link_create_opts.flags = BPF_ITER_LINK_MAP_FD; + } + prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("program '%s': can't attach before loaded\n", @@ -8301,7 +8308,8 @@ bpf_program__attach_iter(struct bpf_program *prog, return ERR_PTR(-ENOMEM); link->detach = &bpf_link__detach_fd; - link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_ITER, NULL); + link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, + &link_create_opts); if (link_fd < 0) { link_fd = -errno; free(link); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c2272132e929..c6813791fa7e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -264,8 +264,9 @@ LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map); struct bpf_iter_attach_opts { size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 map_fd; }; -#define bpf_iter_attach_opts__last_field sz +#define bpf_iter_attach_opts__last_field map_fd LIBBPF_API struct bpf_link * bpf_program__attach_iter(struct bpf_program *prog, -- cgit v1.2.3 From d4b4dd6ce7709c2d2fe56dcfc15074ee18505bcb Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:46 -0700 Subject: libbpf: Print hint when PERF_EVENT_IOC_SET_BPF returns -EPROTO The kernel prevents potential unwinder warnings and crashes by blocking BPF program with bpf_get_[stack|stackid] on perf_event without PERF_SAMPLE_CALLCHAIN, or with exclude_callchain_[kernel|user]. Print a hint message in libbpf to help the user debug such issues. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-4-songliubraving@fb.com --- tools/lib/bpf/libbpf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a05aa7e2bab6..e51479d60285 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7833,6 +7833,9 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, pr_warn("program '%s': failed to attach to pfd %d: %s\n", bpf_program__title(prog, false), pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + if (err == -EPROTO) + pr_warn("program '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", + bpf_program__title(prog, false), pfd); return ERR_PTR(err); } if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { -- cgit v1.2.3 From dc8698cac7aada9b61a612cb819341d84591163e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:46:00 -0700 Subject: libbpf: Add support for BPF XDP link Sync UAPI header and add support for using bpf_link-based XDP attachment. Make xdp/ prog type set expected attach type. Kernel didn't enforce attach_type for XDP programs before, so there is no backwards compatiblity issues there. Also fix section_names selftest to recognize that xdp prog types now have expected attach type. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-8-andriin@fb.com --- tools/include/uapi/linux/bpf.h | 10 +++++++++- tools/lib/bpf/libbpf.c | 9 ++++++++- tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 1 + tools/testing/selftests/bpf/prog_tests/section_names.c | 2 +- 5 files changed, 21 insertions(+), 3 deletions(-) (limited to 'tools/lib') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 828c2f6438f2..e1ba4ae6a916 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -230,6 +230,7 @@ enum bpf_attach_type { BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, BPF_SK_LOOKUP, + BPF_XDP, __MAX_BPF_ATTACH_TYPE }; @@ -242,6 +243,7 @@ enum bpf_link_type { BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, MAX_BPF_LINK_TYPE, }; @@ -614,7 +616,10 @@ union bpf_attr { struct { /* struct used by BPF_LINK_CREATE command */ __u32 prog_fd; /* eBPF program to attach */ - __u32 target_fd; /* object to attach to */ + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ + }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ } link_create; @@ -4064,6 +4069,9 @@ struct bpf_link_info { __u32 netns_ino; __u32 attach_type; } netns; + struct { + __u32 ifindex; + } xdp; }; } __attribute__((aligned(8))); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e51479d60285..54830d603fee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6915,7 +6915,8 @@ static const struct bpf_sec_def section_defs[] = { BPF_XDP_DEVMAP), BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, BPF_XDP_CPUMAP), - BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), + BPF_EAPROG_SEC("xdp", BPF_PROG_TYPE_XDP, + BPF_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), @@ -8281,6 +8282,12 @@ bpf_program__attach_netns(struct bpf_program *prog, int netns_fd) return bpf_program__attach_fd(prog, netns_fd, "netns"); } +struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex) +{ + /* target_fd/target_ifindex use the same field in LINK_CREATE */ + return bpf_program__attach_fd(prog, ifindex, "xdp"); +} + struct bpf_link * bpf_program__attach_iter(struct bpf_program *prog, const struct bpf_iter_attach_opts *opts) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c6813791fa7e..9924385462ab 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -257,6 +257,8 @@ LIBBPF_API struct bpf_link * bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd); LIBBPF_API struct bpf_link * bpf_program__attach_netns(struct bpf_program *prog, int netns_fd); +LIBBPF_API struct bpf_link * +bpf_program__attach_xdp(struct bpf_program *prog, int ifindex); struct bpf_map; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6f0856abe299..ca49a6a7e5b2 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -286,6 +286,7 @@ LIBBPF_0.1.0 { bpf_map__set_value_size; bpf_map__type; bpf_map__value_size; + bpf_program__attach_xdp; bpf_program__autoload; bpf_program__is_sk_lookup; bpf_program__set_autoload; diff --git a/tools/testing/selftests/bpf/prog_tests/section_names.c b/tools/testing/selftests/bpf/prog_tests/section_names.c index 713167449c98..8b571890c57e 100644 --- a/tools/testing/selftests/bpf/prog_tests/section_names.c +++ b/tools/testing/selftests/bpf/prog_tests/section_names.c @@ -35,7 +35,7 @@ static struct sec_name_test tests[] = { {-EINVAL, 0}, }, {"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} }, - {"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} }, + {"xdp", {0, BPF_PROG_TYPE_XDP, BPF_XDP}, {0, BPF_XDP} }, {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} }, {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} }, {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} }, -- cgit v1.2.3 From 50450fc716c1a570ee8d8bfe198ef5d3cfca36e4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 Jul 2020 16:21:48 -0700 Subject: libbpf: Make destructors more robust by handling ERR_PTR(err) cases Most of libbpf "constructors" on failure return ERR_PTR(err) result encoded as a pointer. It's a common mistake to eventually pass such malformed pointers into xxx__destroy()/xxx__free() "destructors". So instead of fixing up clean up code in selftests and user programs, handle such error pointers in destructors themselves. This works beautifully for NULL pointers passed to destructors, so might as well just work for error pointers. Suggested-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729232148.896125-1-andriin@fb.com --- tools/lib/bpf/btf.c | 4 ++-- tools/lib/bpf/btf_dump.c | 2 +- tools/lib/bpf/libbpf.c | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index c9e760e120dc..ded5b29965f9 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -386,7 +386,7 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, void btf__free(struct btf *btf) { - if (!btf) + if (IS_ERR_OR_NULL(btf)) return; if (btf->fd >= 0) @@ -1025,7 +1025,7 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) void btf_ext__free(struct btf_ext *btf_ext) { - if (!btf_ext) + if (IS_ERR_OR_NULL(btf_ext)) return; free(btf_ext->data); free(btf_ext); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index e1c344504cae..cf711168d34a 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -183,7 +183,7 @@ void btf_dump__free(struct btf_dump *d) { int i, cnt; - if (!d) + if (IS_ERR_OR_NULL(d)) return; free(d->type_states); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 54830d603fee..b9f11f854985 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6504,7 +6504,7 @@ void bpf_object__close(struct bpf_object *obj) { size_t i; - if (!obj) + if (IS_ERR_OR_NULL(obj)) return; if (obj->clear_priv) @@ -7690,7 +7690,7 @@ int bpf_link__destroy(struct bpf_link *link) { int err = 0; - if (!link) + if (IS_ERR_OR_NULL(link)) return 0; if (!link->disconnected && link->detach) @@ -8502,7 +8502,7 @@ void perf_buffer__free(struct perf_buffer *pb) { int i; - if (!pb) + if (IS_ERR_OR_NULL(pb)) return; if (pb->cpu_bufs) { for (i = 0; i < pb->cpu_cnt; i++) { @@ -9379,8 +9379,7 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) for (i = 0; i < s->prog_cnt; i++) { struct bpf_link **link = s->progs[i].link; - if (!IS_ERR_OR_NULL(*link)) - bpf_link__destroy(*link); + bpf_link__destroy(*link); *link = NULL; } } -- cgit v1.2.3 From 1acf8f90ea7ee59006d0474275922145ac291331 Mon Sep 17 00:00:00 2001 From: Jerry Crunchtime Date: Fri, 31 Jul 2020 17:08:01 +0200 Subject: libbpf: Fix register in PT_REGS MIPS macros The o32, n32 and n64 calling conventions require the return value to be stored in $v0 which maps to $2 register, i.e., the register 2. Fixes: c1932cd ("bpf: Add MIPS support to samples/bpf.") Signed-off-by: Jerry Crunchtime Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/43707d31-0210-e8f0-9226-1af140907641@web.de --- tools/lib/bpf/bpf_tracing.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 58eceb884df3..eebf020cbe3e 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -215,7 +215,7 @@ struct pt_regs; #define PT_REGS_PARM5(x) ((x)->regs[8]) #define PT_REGS_RET(x) ((x)->regs[31]) #define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ -#define PT_REGS_RC(x) ((x)->regs[1]) +#define PT_REGS_RC(x) ((x)->regs[2]) #define PT_REGS_SP(x) ((x)->regs[29]) #define PT_REGS_IP(x) ((x)->cp0_epc) @@ -226,7 +226,7 @@ struct pt_regs; #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8]) #define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31]) #define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30]) -#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[2]) #define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29]) #define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc) -- cgit v1.2.3 From 2e49527e52486dac910460b1b3f6fce6e21c6b48 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Jul 2020 11:28:27 -0700 Subject: libbpf: Add bpf_link detach APIs Add low-level bpf_link_detach() API. Also add higher-level bpf_link__detach() one. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200731182830.286260-3-andriin@fb.com --- tools/include/uapi/linux/bpf.h | 5 +++++ tools/lib/bpf/bpf.c | 10 ++++++++++ tools/lib/bpf/bpf.h | 2 ++ tools/lib/bpf/libbpf.c | 5 +++++ tools/lib/bpf/libbpf.h | 1 + tools/lib/bpf/libbpf.map | 2 ++ 6 files changed, 25 insertions(+) (limited to 'tools/lib') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index eb5e0c38eb2c..b134e679e9db 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -117,6 +117,7 @@ enum bpf_cmd { BPF_LINK_GET_NEXT_ID, BPF_ENABLE_STATS, BPF_ITER_CREATE, + BPF_LINK_DETACH, }; enum bpf_map_type { @@ -634,6 +635,10 @@ union bpf_attr { __u32 old_prog_fd; } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { /* struct used by BPF_ENABLE_STATS command */ __u32 type; } enable_stats; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index e1bdf214f75f..eab14c97c15d 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -603,6 +603,16 @@ int bpf_link_create(int prog_fd, int target_fd, return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); } +int bpf_link_detach(int link_fd) +{ + union bpf_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.link_detach.link_fd = link_fd; + + return sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); +} + int bpf_link_update(int link_fd, int new_prog_fd, const struct bpf_link_update_opts *opts) { diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6d367e01d05e..28855fd5b5f4 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -178,6 +178,8 @@ LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, const struct bpf_link_create_opts *opts); +LIBBPF_API int bpf_link_detach(int link_fd); + struct bpf_link_update_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; /* extra flags */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b9f11f854985..7be04e45d29c 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7748,6 +7748,11 @@ struct bpf_link *bpf_link__open(const char *path) return link; } +int bpf_link__detach(struct bpf_link *link) +{ + return bpf_link_detach(link->fd) ? -errno : 0; +} + int bpf_link__pin(struct bpf_link *link, const char *path) { int err; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 9924385462ab..3ed1399bfbbc 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -229,6 +229,7 @@ LIBBPF_API int bpf_link__unpin(struct bpf_link *link); LIBBPF_API int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog); LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); +LIBBPF_API int bpf_link__detach(struct bpf_link *link); LIBBPF_API int bpf_link__destroy(struct bpf_link *link); LIBBPF_API struct bpf_link * diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index ca49a6a7e5b2..099863411f7d 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -273,6 +273,8 @@ LIBBPF_0.0.9 { LIBBPF_0.1.0 { global: + bpf_link__detach; + bpf_link_detach; bpf_map__ifindex; bpf_map__key_size; bpf_map__map_flags; -- cgit v1.2.3 From 94a1fedd63edb672933bef44ca9213937e377c05 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 1 Aug 2020 18:32:17 -0700 Subject: libbpf: Add btf__parse_raw() and generic btf__parse() APIs Add public APIs to parse BTF from raw data file (e.g., /sys/kernel/btf/vmlinux), as well as generic btf__parse(), which will try to determine correct format, currently either raw or ELF. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200802013219.864880-2-andriin@fb.com --- tools/lib/bpf/btf.c | 114 ++++++++++++++++++++++++++++++++--------------- tools/lib/bpf/btf.h | 5 ++- tools/lib/bpf/libbpf.map | 2 + 3 files changed, 83 insertions(+), 38 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index ded5b29965f9..856b09a04563 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -562,6 +562,83 @@ done: return btf; } +struct btf *btf__parse_raw(const char *path) +{ + void *data = NULL; + struct btf *btf; + FILE *f = NULL; + __u16 magic; + int err = 0; + long sz; + + f = fopen(path, "rb"); + if (!f) { + err = -errno; + goto err_out; + } + + /* check BTF magic */ + if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) { + err = -EIO; + goto err_out; + } + if (magic != BTF_MAGIC) { + /* definitely not a raw BTF */ + err = -EPROTO; + goto err_out; + } + + /* get file size */ + if (fseek(f, 0, SEEK_END)) { + err = -errno; + goto err_out; + } + sz = ftell(f); + if (sz < 0) { + err = -errno; + goto err_out; + } + /* rewind to the start */ + if (fseek(f, 0, SEEK_SET)) { + err = -errno; + goto err_out; + } + + /* pre-alloc memory and read all of BTF data */ + data = malloc(sz); + if (!data) { + err = -ENOMEM; + goto err_out; + } + if (fread(data, 1, sz, f) < sz) { + err = -EIO; + goto err_out; + } + + /* finally parse BTF data */ + btf = btf__new(data, sz); + +err_out: + free(data); + if (f) + fclose(f); + return err ? ERR_PTR(err) : btf; +} + +struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) +{ + struct btf *btf; + + if (btf_ext) + *btf_ext = NULL; + + btf = btf__parse_raw(path); + if (!IS_ERR(btf) || PTR_ERR(btf) != -EPROTO) + return btf; + + return btf__parse_elf(path, btf_ext); +} + static int compare_vsi_off(const void *_a, const void *_b) { const struct btf_var_secinfo *a = _a; @@ -2951,41 +3028,6 @@ static int btf_dedup_remap_types(struct btf_dedup *d) return 0; } -static struct btf *btf_load_raw(const char *path) -{ - struct btf *btf; - size_t read_cnt; - struct stat st; - void *data; - FILE *f; - - if (stat(path, &st)) - return ERR_PTR(-errno); - - data = malloc(st.st_size); - if (!data) - return ERR_PTR(-ENOMEM); - - f = fopen(path, "rb"); - if (!f) { - btf = ERR_PTR(-errno); - goto cleanup; - } - - read_cnt = fread(data, 1, st.st_size, f); - fclose(f); - if (read_cnt < st.st_size) { - btf = ERR_PTR(-EBADF); - goto cleanup; - } - - btf = btf__new(data, read_cnt); - -cleanup: - free(data); - return btf; -} - /* * Probe few well-known locations for vmlinux kernel image and try to load BTF * data out of it to use for target BTF. @@ -3021,7 +3063,7 @@ struct btf *libbpf_find_kernel_btf(void) continue; if (locations[i].raw_btf) - btf = btf_load_raw(path); + btf = btf__parse_raw(path); else btf = btf__parse_elf(path, NULL); diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 491c7b41ffdc..f4a1a1d2b9a3 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -64,8 +64,9 @@ struct btf_ext_header { LIBBPF_API void btf__free(struct btf *btf); LIBBPF_API struct btf *btf__new(const void *data, __u32 size); -LIBBPF_API struct btf *btf__parse_elf(const char *path, - struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_raw(const char *path); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 099863411f7d..0c4722bfdd0a 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -293,5 +293,7 @@ LIBBPF_0.1.0 { bpf_program__is_sk_lookup; bpf_program__set_autoload; bpf_program__set_sk_lookup; + btf__parse; + btf__parse_raw; btf__set_fd; } LIBBPF_0.0.9; -- cgit v1.2.3