summaryrefslogtreecommitdiff
path: root/tools/lib
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-12-04 18:48:11 +0300
committerJakub Kicinski <kuba@kernel.org>2020-12-04 18:48:12 +0300
commita1dd1d86973182458da7798a95f26cfcbea599b4 (patch)
tree1adda22ea30ccfac7651a7eed7b7c90356f8243a /tools/lib
parent55fd59b003f6e8fd88cf16590e79823d7ccf3026 (diff)
parenteceae70bdeaeb6b8ceb662983cf663ff352fbc96 (diff)
downloadlinux-a1dd1d86973182458da7798a95f26cfcbea599b4.tar.xz
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-12-03 The main changes are: 1) Support BTF in kernel modules, from Andrii. 2) Introduce preferred busy-polling, from Björn. 3) bpf_ima_inode_hash() and bpf_bprm_opts_set() helpers, from KP Singh. 4) Memcg-based memory accounting for bpf objects, from Roman. 5) Allow bpf_{s,g}etsockopt from cgroup bind{4,6} hooks, from Stanislav. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (118 commits) selftests/bpf: Fix invalid use of strncat in test_sockmap libbpf: Use memcpy instead of strncpy to please GCC selftests/bpf: Add fentry/fexit/fmod_ret selftest for kernel module selftests/bpf: Add tp_btf CO-RE reloc test for modules libbpf: Support attachment of BPF tracing programs to kernel modules libbpf: Factor out low-level BPF program loading helper bpf: Allow to specify kernel module BTFs when attaching BPF programs bpf: Remove hard-coded btf_vmlinux assumption from BPF verifier selftests/bpf: Add CO-RE relocs selftest relying on kernel module BTF selftests/bpf: Add support for marking sub-tests as skipped selftests/bpf: Add bpf_testmod kernel module for testing libbpf: Add kernel module BTF support for CO-RE relocations libbpf: Refactor CO-RE relocs to not assume a single BTF object libbpf: Add internal helper to load BTF data by FD bpf: Keep module's btf_data_size intact after load bpf: Fix bpf_put_raw_tracepoint()'s use of __module_address() selftests/bpf: Add Userspace tests for TCP_WINDOW_CLAMP bpf: Adds support for setting window clamp samples/bpf: Fix spelling mistake "recieving" -> "receiving" bpf: Fix cold build of test_progs-no_alu32 ... ==================== Link: https://lore.kernel.org/r/20201204021936.85653-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/bpf.c104
-rw-r--r--tools/lib/bpf/btf.c74
-rw-r--r--tools/lib/bpf/btf.h1
-rw-r--r--tools/lib/bpf/libbpf.c527
-rw-r--r--tools/lib/bpf/libbpf.map3
-rw-r--r--tools/lib/bpf/libbpf_internal.h31
-rw-r--r--tools/lib/bpf/xsk.c92
-rw-r--r--tools/lib/bpf/xsk.h22
8 files changed, 659 insertions, 195 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index d27e34133973..bba48ff4c5c0 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -67,11 +67,12 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
{
+ int retries = 5;
int fd;
do {
fd = sys_bpf(BPF_PROG_LOAD, attr, size);
- } while (fd < 0 && errno == EAGAIN);
+ } while (fd < 0 && errno == EAGAIN && retries-- > 0);
return fd;
}
@@ -214,59 +215,55 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
return info;
}
-int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz)
+int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
{
void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
- __u32 log_level;
int fd;
- if (!load_attr || !log_buf != !log_buf_sz)
+ if (!load_attr->log_buf != !load_attr->log_buf_sz)
return -EINVAL;
- log_level = load_attr->log_level;
- if (log_level > (4 | 2 | 1) || (log_level && !log_buf))
+ if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf))
return -EINVAL;
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
- if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
- attr.prog_type == BPF_PROG_TYPE_LSM) {
- attr.attach_btf_id = load_attr->attach_btf_id;
- } else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
- attr.prog_type == BPF_PROG_TYPE_EXT) {
- attr.attach_btf_id = load_attr->attach_btf_id;
+
+ if (load_attr->attach_prog_fd)
attr.attach_prog_fd = load_attr->attach_prog_fd;
- } else {
- attr.prog_ifindex = load_attr->prog_ifindex;
- attr.kern_version = load_attr->kern_version;
- }
- attr.insn_cnt = (__u32)load_attr->insns_cnt;
+ else
+ attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd;
+ attr.attach_btf_id = load_attr->attach_btf_id;
+
+ attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.kern_version = load_attr->kern_version;
+
+ attr.insn_cnt = (__u32)load_attr->insn_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
- attr.log_level = log_level;
- if (log_level) {
- attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_buf_sz;
- } else {
- attr.log_buf = ptr_to_u64(NULL);
- attr.log_size = 0;
+ attr.log_level = load_attr->log_level;
+ if (attr.log_level) {
+ attr.log_buf = ptr_to_u64(load_attr->log_buf);
+ attr.log_size = load_attr->log_buf_sz;
}
attr.prog_btf_fd = load_attr->prog_btf_fd;
+ attr.prog_flags = load_attr->prog_flags;
+
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
attr.func_info = ptr_to_u64(load_attr->func_info);
+
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt;
attr.line_info = ptr_to_u64(load_attr->line_info);
+
if (load_attr->name)
memcpy(attr.prog_name, load_attr->name,
- min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
- attr.prog_flags = load_attr->prog_flags;
+ min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1));
fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
@@ -306,19 +303,19 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
}
fd = sys_bpf_prog_load(&attr, sizeof(attr));
-
if (fd >= 0)
goto done;
}
- if (log_level || !log_buf)
+ if (load_attr->log_level || !load_attr->log_buf)
goto done;
/* Try again with log */
- attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_buf_sz;
+ attr.log_buf = ptr_to_u64(load_attr->log_buf);
+ attr.log_size = load_attr->log_buf_sz;
attr.log_level = 1;
- log_buf[0] = 0;
+ load_attr->log_buf[0] = 0;
+
fd = sys_bpf_prog_load(&attr, sizeof(attr));
done:
free(finfo);
@@ -326,6 +323,49 @@ done:
return fd;
}
+int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
+ char *log_buf, size_t log_buf_sz)
+{
+ struct bpf_prog_load_params p = {};
+
+ if (!load_attr || !log_buf != !log_buf_sz)
+ return -EINVAL;
+
+ p.prog_type = load_attr->prog_type;
+ p.expected_attach_type = load_attr->expected_attach_type;
+ switch (p.prog_type) {
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ case BPF_PROG_TYPE_LSM:
+ p.attach_btf_id = load_attr->attach_btf_id;
+ break;
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_EXT:
+ p.attach_btf_id = load_attr->attach_btf_id;
+ p.attach_prog_fd = load_attr->attach_prog_fd;
+ break;
+ default:
+ p.prog_ifindex = load_attr->prog_ifindex;
+ p.kern_version = load_attr->kern_version;
+ }
+ p.insn_cnt = load_attr->insns_cnt;
+ p.insns = load_attr->insns;
+ p.license = load_attr->license;
+ p.log_level = load_attr->log_level;
+ p.log_buf = log_buf;
+ p.log_buf_sz = log_buf_sz;
+ p.prog_btf_fd = load_attr->prog_btf_fd;
+ p.func_info_rec_size = load_attr->func_info_rec_size;
+ p.func_info_cnt = load_attr->func_info_cnt;
+ p.func_info = load_attr->func_info;
+ p.line_info_rec_size = load_attr->line_info_rec_size;
+ p.line_info_cnt = load_attr->line_info_cnt;
+ p.line_info = load_attr->line_info;
+ p.name = load_attr->name;
+ p.prog_flags = load_attr->prog_flags;
+
+ return libbpf__bpf_prog_load(&p);
+}
+
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
__u32 kern_version, char *log_buf,
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 2d0d064c6d31..3c3f2bc6c652 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -432,6 +432,11 @@ __u32 btf__get_nr_types(const struct btf *btf)
return btf->start_id + btf->nr_types - 1;
}
+const struct btf *btf__base_btf(const struct btf *btf)
+{
+ return btf->base_btf;
+}
+
/* internal helper returning non-const pointer to a type */
static struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
{
@@ -674,12 +679,12 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
{
- __u32 i;
+ __u32 i, nr_types = btf__get_nr_types(btf);
if (!strcmp(type_name, "void"))
return 0;
- for (i = 1; i <= btf->nr_types; i++) {
+ for (i = 1; i <= nr_types; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
const char *name = btf__name_by_offset(btf, t->name_off);
@@ -693,12 +698,12 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
__u32 kind)
{
- __u32 i;
+ __u32 i, nr_types = btf__get_nr_types(btf);
if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
return 0;
- for (i = 1; i <= btf->nr_types; i++) {
+ for (i = 1; i <= nr_types; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
const char *name;
@@ -1318,35 +1323,27 @@ const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
return btf__str_by_offset(btf, offset);
}
-int btf__get_from_id(__u32 id, struct btf **btf)
+struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
{
- struct bpf_btf_info btf_info = { 0 };
+ struct bpf_btf_info btf_info;
__u32 len = sizeof(btf_info);
__u32 last_size;
- int btf_fd;
+ struct btf *btf;
void *ptr;
int err;
- err = 0;
- *btf = NULL;
- btf_fd = bpf_btf_get_fd_by_id(id);
- if (btf_fd < 0)
- return 0;
-
/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
* let's start with a sane default - 4KiB here - and resize it only if
* bpf_obj_get_info_by_fd() needs a bigger buffer.
*/
- btf_info.btf_size = 4096;
- last_size = btf_info.btf_size;
+ last_size = 4096;
ptr = malloc(last_size);
- if (!ptr) {
- err = -ENOMEM;
- goto exit_free;
- }
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
- memset(ptr, 0, last_size);
+ memset(&btf_info, 0, sizeof(btf_info));
btf_info.btf = ptr_to_u64(ptr);
+ btf_info.btf_size = last_size;
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
if (!err && btf_info.btf_size > last_size) {
@@ -1355,31 +1352,48 @@ int btf__get_from_id(__u32 id, struct btf **btf)
last_size = btf_info.btf_size;
temp_ptr = realloc(ptr, last_size);
if (!temp_ptr) {
- err = -ENOMEM;
+ btf = ERR_PTR(-ENOMEM);
goto exit_free;
}
ptr = temp_ptr;
- memset(ptr, 0, last_size);
+
+ len = sizeof(btf_info);
+ memset(&btf_info, 0, sizeof(btf_info));
btf_info.btf = ptr_to_u64(ptr);
+ btf_info.btf_size = last_size;
+
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
}
if (err || btf_info.btf_size > last_size) {
- err = errno;
+ btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
goto exit_free;
}
- *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
- if (IS_ERR(*btf)) {
- err = PTR_ERR(*btf);
- *btf = NULL;
- }
+ btf = btf_new(ptr, btf_info.btf_size, base_btf);
exit_free:
- close(btf_fd);
free(ptr);
+ return btf;
+}
- return err;
+int btf__get_from_id(__u32 id, struct btf **btf)
+{
+ struct btf *res;
+ int btf_fd;
+
+ *btf = NULL;
+ btf_fd = bpf_btf_get_fd_by_id(id);
+ if (btf_fd < 0)
+ return -errno;
+
+ res = btf_get_from_fd(btf_fd, NULL);
+ close(btf_fd);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ *btf = res;
+ return 0;
}
int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 1093f6fe6800..1237bcd1dd17 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -51,6 +51,7 @@ LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
const char *type_name, __u32 kind);
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
+LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 28baee7ba1ca..9be88a90a4aa 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -176,6 +176,8 @@ enum kern_feature_id {
FEAT_PROBE_READ_KERN,
/* BPF_PROG_BIND_MAP is supported */
FEAT_PROG_BIND_MAP,
+ /* Kernel support for module BTFs */
+ FEAT_MODULE_BTF,
__FEAT_CNT,
};
@@ -276,6 +278,7 @@ struct bpf_program {
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
int prog_ifindex;
+ __u32 attach_btf_obj_fd;
__u32 attach_btf_id;
__u32 attach_prog_fd;
void *func_info;
@@ -402,6 +405,13 @@ struct extern_desc {
static LIST_HEAD(bpf_objects_list);
+struct module_btf {
+ struct btf *btf;
+ char *name;
+ __u32 id;
+ int fd;
+};
+
struct bpf_object {
char name[BPF_OBJ_NAME_LEN];
char license[64];
@@ -462,11 +472,19 @@ struct bpf_object {
struct list_head list;
struct btf *btf;
+ struct btf_ext *btf_ext;
+
/* Parse and load BTF vmlinux if any of the programs in the object need
* it at load time.
*/
struct btf *btf_vmlinux;
- struct btf_ext *btf_ext;
+ /* vmlinux BTF override for CO-RE relocations */
+ struct btf *btf_vmlinux_override;
+ /* Lazily initialized kernel module BTFs */
+ struct module_btf *btf_modules;
+ bool btf_modules_loaded;
+ size_t btf_module_cnt;
+ size_t btf_module_cap;
void *priv;
bpf_object_clear_priv_t clear_priv;
@@ -3960,6 +3978,35 @@ static int probe_prog_bind_map(void)
return ret >= 0;
}
+static int probe_module_btf(void)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+ struct bpf_btf_info info;
+ __u32 len = sizeof(info);
+ char name[16];
+ int fd, err;
+
+ fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
+ if (fd < 0)
+ return 0; /* BTF not supported at all */
+
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
+
+ /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
+ * kernel's module BTF support coincides with support for
+ * name/name_len fields in struct bpf_btf_info.
+ */
+ err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ close(fd);
+ return !err;
+}
+
enum kern_feature_result {
FEAT_UNKNOWN = 0,
FEAT_SUPPORTED = 1,
@@ -4003,7 +4050,10 @@ static struct kern_feature_desc {
},
[FEAT_PROG_BIND_MAP] = {
"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
- }
+ },
+ [FEAT_MODULE_BTF] = {
+ "module BTF support", probe_module_btf,
+ },
};
static bool kernel_supports(enum kern_feature_id feat_id)
@@ -4603,46 +4653,43 @@ static size_t bpf_core_essential_name_len(const char *name)
return n;
}
-/* dynamically sized list of type IDs */
-struct ids_vec {
- __u32 *data;
+struct core_cand
+{
+ const struct btf *btf;
+ const struct btf_type *t;
+ const char *name;
+ __u32 id;
+};
+
+/* dynamically sized list of type IDs and its associated struct btf */
+struct core_cand_list {
+ struct core_cand *cands;
int len;
};
-static void bpf_core_free_cands(struct ids_vec *cand_ids)
+static void bpf_core_free_cands(struct core_cand_list *cands)
{
- free(cand_ids->data);
- free(cand_ids);
+ free(cands->cands);
+ free(cands);
}
-static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
- __u32 local_type_id,
- const struct btf *targ_btf)
+static int bpf_core_add_cands(struct core_cand *local_cand,
+ size_t local_essent_len,
+ const struct btf *targ_btf,
+ const char *targ_btf_name,
+ int targ_start_id,
+ struct core_cand_list *cands)
{
- size_t local_essent_len, targ_essent_len;
- const char *local_name, *targ_name;
- const struct btf_type *t, *local_t;
- struct ids_vec *cand_ids;
- __u32 *new_ids;
- int i, err, n;
-
- local_t = btf__type_by_id(local_btf, local_type_id);
- if (!local_t)
- return ERR_PTR(-EINVAL);
-
- local_name = btf__name_by_offset(local_btf, local_t->name_off);
- if (str_is_empty(local_name))
- return ERR_PTR(-EINVAL);
- local_essent_len = bpf_core_essential_name_len(local_name);
-
- cand_ids = calloc(1, sizeof(*cand_ids));
- if (!cand_ids)
- return ERR_PTR(-ENOMEM);
+ struct core_cand *new_cands, *cand;
+ const struct btf_type *t;
+ const char *targ_name;
+ size_t targ_essent_len;
+ int n, i;
n = btf__get_nr_types(targ_btf);
- for (i = 1; i <= n; i++) {
+ for (i = targ_start_id; i <= n; i++) {
t = btf__type_by_id(targ_btf, i);
- if (btf_kind(t) != btf_kind(local_t))
+ if (btf_kind(t) != btf_kind(local_cand->t))
continue;
targ_name = btf__name_by_offset(targ_btf, t->name_off);
@@ -4653,24 +4700,174 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
if (targ_essent_len != local_essent_len)
continue;
- if (strncmp(local_name, targ_name, local_essent_len) == 0) {
- pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n",
- local_type_id, btf_kind_str(local_t),
- local_name, i, btf_kind_str(t), targ_name);
- new_ids = libbpf_reallocarray(cand_ids->data,
- cand_ids->len + 1,
- sizeof(*cand_ids->data));
- if (!new_ids) {
- err = -ENOMEM;
- goto err_out;
- }
- cand_ids->data = new_ids;
- cand_ids->data[cand_ids->len++] = i;
+ if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
+ continue;
+
+ pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
+ local_cand->id, btf_kind_str(local_cand->t),
+ local_cand->name, i, btf_kind_str(t), targ_name,
+ targ_btf_name);
+ new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
+ sizeof(*cands->cands));
+ if (!new_cands)
+ return -ENOMEM;
+
+ cand = &new_cands[cands->len];
+ cand->btf = targ_btf;
+ cand->t = t;
+ cand->name = targ_name;
+ cand->id = i;
+
+ cands->cands = new_cands;
+ cands->len++;
+ }
+ return 0;
+}
+
+static int load_module_btfs(struct bpf_object *obj)
+{
+ struct bpf_btf_info info;
+ struct module_btf *mod_btf;
+ struct btf *btf;
+ char name[64];
+ __u32 id = 0, len;
+ int err, fd;
+
+ if (obj->btf_modules_loaded)
+ return 0;
+
+ /* don't do this again, even if we find no module BTFs */
+ obj->btf_modules_loaded = true;
+
+ /* kernel too old to support module BTFs */
+ if (!kernel_supports(FEAT_MODULE_BTF))
+ return 0;
+
+ while (true) {
+ err = bpf_btf_get_next_id(id, &id);
+ if (err && errno == ENOENT)
+ return 0;
+ if (err) {
+ err = -errno;
+ pr_warn("failed to iterate BTF objects: %d\n", err);
+ return err;
+ }
+
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue; /* expected race: BTF was unloaded */
+ err = -errno;
+ pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
+ return err;
+ }
+
+ len = sizeof(info);
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ err = -errno;
+ pr_warn("failed to get BTF object #%d info: %d\n", id, err);
+ goto err_out;
+ }
+
+ /* ignore non-module BTFs */
+ if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
+ close(fd);
+ continue;
}
+
+ btf = btf_get_from_fd(fd, obj->btf_vmlinux);
+ if (IS_ERR(btf)) {
+ pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
+ name, id, PTR_ERR(btf));
+ err = PTR_ERR(btf);
+ goto err_out;
+ }
+
+ err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
+ sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
+ if (err)
+ goto err_out;
+
+ mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
+
+ mod_btf->btf = btf;
+ mod_btf->id = id;
+ mod_btf->fd = fd;
+ mod_btf->name = strdup(name);
+ if (!mod_btf->name) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ continue;
+
+err_out:
+ close(fd);
+ return err;
}
- return cand_ids;
+
+ return 0;
+}
+
+static struct core_cand_list *
+bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
+{
+ struct core_cand local_cand = {};
+ struct core_cand_list *cands;
+ const struct btf *main_btf;
+ size_t local_essent_len;
+ int err, i;
+
+ local_cand.btf = local_btf;
+ local_cand.t = btf__type_by_id(local_btf, local_type_id);
+ if (!local_cand.t)
+ return ERR_PTR(-EINVAL);
+
+ local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
+ if (str_is_empty(local_cand.name))
+ return ERR_PTR(-EINVAL);
+ local_essent_len = bpf_core_essential_name_len(local_cand.name);
+
+ cands = calloc(1, sizeof(*cands));
+ if (!cands)
+ return ERR_PTR(-ENOMEM);
+
+ /* Attempt to find target candidates in vmlinux BTF first */
+ main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
+ err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
+ if (err)
+ goto err_out;
+
+ /* if vmlinux BTF has any candidate, don't got for module BTFs */
+ if (cands->len)
+ return cands;
+
+ /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
+ if (obj->btf_vmlinux_override)
+ return cands;
+
+ /* now look through module BTFs, trying to still find candidates */
+ err = load_module_btfs(obj);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ err = bpf_core_add_cands(&local_cand, local_essent_len,
+ obj->btf_modules[i].btf,
+ obj->btf_modules[i].name,
+ btf__get_nr_types(obj->btf_vmlinux) + 1,
+ cands);
+ if (err)
+ goto err_out;
+ }
+
+ return cands;
err_out:
- bpf_core_free_cands(cand_ids);
+ bpf_core_free_cands(cands);
return ERR_PTR(err);
}
@@ -5664,7 +5861,6 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
const struct bpf_core_relo *relo,
int relo_idx,
const struct btf *local_btf,
- const struct btf *targ_btf,
struct hashmap *cand_cache)
{
struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
@@ -5672,8 +5868,8 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
struct bpf_core_relo_res cand_res, targ_res;
const struct btf_type *local_type;
const char *local_name;
- struct ids_vec *cand_ids;
- __u32 local_id, cand_id;
+ struct core_cand_list *cands = NULL;
+ __u32 local_id;
const char *spec_str;
int i, j, err;
@@ -5720,24 +5916,24 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
return -EOPNOTSUPP;
}
- if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
- cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
- if (IS_ERR(cand_ids)) {
- pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
+ if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
+ cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
+ if (IS_ERR(cands)) {
+ pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
prog->name, relo_idx, local_id, btf_kind_str(local_type),
- local_name, PTR_ERR(cand_ids));
- return PTR_ERR(cand_ids);
+ local_name, PTR_ERR(cands));
+ return PTR_ERR(cands);
}
- err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
+ err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
if (err) {
- bpf_core_free_cands(cand_ids);
+ bpf_core_free_cands(cands);
return err;
}
}
- for (i = 0, j = 0; i < cand_ids->len; i++) {
- cand_id = cand_ids->data[i];
- err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec);
+ for (i = 0, j = 0; i < cands->len; i++) {
+ err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
+ cands->cands[i].id, &cand_spec);
if (err < 0) {
pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
prog->name, relo_idx, i);
@@ -5781,7 +5977,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
return -EINVAL;
}
- cand_ids->data[j++] = cand_spec.root_type_id;
+ cands->cands[j++] = cands->cands[i];
}
/*
@@ -5793,7 +5989,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
* depending on relo's kind.
*/
if (j > 0)
- cand_ids->len = j;
+ cands->len = j;
/*
* If no candidates were found, it might be both a programmer error,
@@ -5837,20 +6033,19 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
struct hashmap_entry *entry;
struct hashmap *cand_cache = NULL;
struct bpf_program *prog;
- struct btf *targ_btf;
const char *sec_name;
int i, err = 0, insn_idx, sec_idx;
if (obj->btf_ext->core_relo_info.len == 0)
return 0;
- if (targ_btf_path)
- targ_btf = btf__parse(targ_btf_path, NULL);
- else
- targ_btf = obj->btf_vmlinux;
- if (IS_ERR_OR_NULL(targ_btf)) {
- pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
- return PTR_ERR(targ_btf);
+ if (targ_btf_path) {
+ obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
+ if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
+ err = PTR_ERR(obj->btf_vmlinux_override);
+ pr_warn("failed to parse target BTF: %d\n", err);
+ return err;
+ }
}
cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
@@ -5902,8 +6097,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
if (!prog->load)
continue;
- err = bpf_core_apply_relo(prog, rec, i, obj->btf,
- targ_btf, cand_cache);
+ err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
prog->name, i, err);
@@ -5913,9 +6107,10 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
}
out:
- /* obj->btf_vmlinux is freed at the end of object load phase */
- if (targ_btf != obj->btf_vmlinux)
- btf__free(targ_btf);
+ /* obj->btf_vmlinux and module BTFs are freed after object load */
+ btf__free(obj->btf_vmlinux_override);
+ obj->btf_vmlinux_override = NULL;
+
if (!IS_ERR_OR_NULL(cand_cache)) {
hashmap__for_each_entry(cand_cache, entry, i) {
bpf_core_free_cands(entry->value);
@@ -6626,16 +6821,25 @@ static int
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd)
{
- struct bpf_load_program_attr load_attr;
+ struct bpf_prog_load_params load_attr = {};
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL;
int btf_fd, ret;
+ if (prog->type == BPF_PROG_TYPE_UNSPEC) {
+ /*
+ * The program type must be set. Most likely we couldn't find a proper
+ * section definition at load time, and thus we didn't infer the type.
+ */
+ pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
+ prog->name, prog->sec_name);
+ return -EINVAL;
+ }
+
if (!insns || !insns_cnt)
return -EINVAL;
- memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
load_attr.prog_type = prog->type;
/* old kernels might not support specifying expected_attach_type */
if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
@@ -6646,19 +6850,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
if (kernel_supports(FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
- load_attr.insns_cnt = insns_cnt;
+ load_attr.insn_cnt = insns_cnt;
load_attr.license = license;
- if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
- prog->type == BPF_PROG_TYPE_LSM) {
- load_attr.attach_btf_id = prog->attach_btf_id;
- } else if (prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_EXT) {
+ load_attr.attach_btf_id = prog->attach_btf_id;
+ if (prog->attach_prog_fd)
load_attr.attach_prog_fd = prog->attach_prog_fd;
- load_attr.attach_btf_id = prog->attach_btf_id;
- } else {
- load_attr.kern_version = kern_version;
- load_attr.prog_ifindex = prog->prog_ifindex;
- }
+ else
+ load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
+ load_attr.attach_btf_id = prog->attach_btf_id;
+ load_attr.kern_version = kern_version;
+ load_attr.prog_ifindex = prog->prog_ifindex;
+
/* specify func_info/line_info only if kernel supports them */
btf_fd = bpf_object__btf_fd(prog->obj);
if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
@@ -6682,7 +6884,9 @@ retry_load:
*log_buf = 0;
}
- ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
+ load_attr.log_buf = log_buf;
+ load_attr.log_buf_sz = log_buf_size;
+ ret = libbpf__bpf_prog_load(&load_attr);
if (ret >= 0) {
if (log_buf && load_attr.log_level)
@@ -6723,9 +6927,9 @@ retry_load:
pr_warn("-- BEGIN DUMP LOG ---\n");
pr_warn("\n%s\n", log_buf);
pr_warn("-- END LOG --\n");
- } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
+ } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
pr_warn("Program too large (%zu insns), at most %d insns\n",
- load_attr.insns_cnt, BPF_MAXINSNS);
+ load_attr.insn_cnt, BPF_MAXINSNS);
ret = -LIBBPF_ERRNO__PROG2BIG;
} else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
/* Wrong program type? */
@@ -6733,7 +6937,9 @@ retry_load:
load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
load_attr.expected_attach_type = 0;
- fd = bpf_load_program_xattr(&load_attr, NULL, 0);
+ load_attr.log_buf = NULL;
+ load_attr.log_buf_sz = 0;
+ fd = libbpf__bpf_prog_load(&load_attr);
if (fd >= 0) {
close(fd);
ret = -LIBBPF_ERRNO__PROGTYPE;
@@ -6746,11 +6952,11 @@ out:
return ret;
}
-static int libbpf_find_attach_btf_id(struct bpf_program *prog);
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
- int err = 0, fd, i, btf_id;
+ int err = 0, fd, i;
if (prog->obj->loaded) {
pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
@@ -6760,10 +6966,14 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
if ((prog->type == BPF_PROG_TYPE_TRACING ||
prog->type == BPF_PROG_TYPE_LSM ||
prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
- btf_id = libbpf_find_attach_btf_id(prog);
- if (btf_id <= 0)
- return btf_id;
- prog->attach_btf_id = btf_id;
+ int btf_obj_fd = 0, btf_type_id = 0;
+
+ err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
+ if (err)
+ return err;
+
+ prog->attach_btf_obj_fd = btf_obj_fd;
+ prog->attach_btf_id = btf_type_id;
}
if (prog->instances.nr < 0 || !prog->instances.fds) {
@@ -6923,9 +7133,12 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
bpf_object__for_each_program(prog, obj) {
prog->sec_def = find_sec_def(prog->sec_name);
- if (!prog->sec_def)
+ if (!prog->sec_def) {
/* couldn't guess, but user might manually specify */
+ pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
+ prog->name, prog->sec_name);
continue;
+ }
if (prog->sec_def->is_sleepable)
prog->prog_flags |= BPF_F_SLEEPABLE;
@@ -7271,6 +7484,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level);
+ /* clean up module BTFs */
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ close(obj->btf_modules[i].fd);
+ btf__free(obj->btf_modules[i].btf);
+ free(obj->btf_modules[i].name);
+ }
+ free(obj->btf_modules);
+
+ /* clean up vmlinux BTF */
btf__free(obj->btf_vmlinux);
obj->btf_vmlinux = NULL;
@@ -7649,6 +7871,16 @@ bool bpf_map__is_pinned(const struct bpf_map *map)
return map->pinned;
}
+static void sanitize_pin_path(char *s)
+{
+ /* bpffs disallows periods in path names */
+ while (*s) {
+ if (*s == '.')
+ *s = '_';
+ s++;
+ }
+}
+
int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
{
struct bpf_map *map;
@@ -7678,6 +7910,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
err = -ENAMETOOLONG;
goto err_unpin_maps;
}
+ sanitize_pin_path(buf);
pin_path = buf;
} else if (!map->pin_path) {
continue;
@@ -7722,6 +7955,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
return -EINVAL;
else if (len >= PATH_MAX)
return -ENAMETOOLONG;
+ sanitize_pin_path(buf);
pin_path = buf;
} else if (!map->pin_path) {
continue;
@@ -8607,8 +8841,8 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
return btf__find_by_name_kind(btf, btf_type_name, kind);
}
-static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
- enum bpf_attach_type attach_type)
+static inline int find_attach_btf_id(struct btf *btf, const char *name,
+ enum bpf_attach_type attach_type)
{
int err;
@@ -8624,9 +8858,6 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
else
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
- if (err <= 0)
- pr_warn("%s is not found in vmlinux BTF\n", name);
-
return err;
}
@@ -8642,7 +8873,10 @@ int libbpf_find_vmlinux_btf_id(const char *name,
return -EINVAL;
}
- err = __find_vmlinux_btf_id(btf, name, attach_type);
+ err = find_attach_btf_id(btf, name, attach_type);
+ if (err <= 0)
+ pr_warn("%s is not found in vmlinux BTF\n", name);
+
btf__free(btf);
return err;
}
@@ -8680,11 +8914,49 @@ out:
return err;
}
-static int libbpf_find_attach_btf_id(struct bpf_program *prog)
+static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
+ enum bpf_attach_type attach_type,
+ int *btf_obj_fd, int *btf_type_id)
+{
+ int ret, i;
+
+ ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
+ if (ret > 0) {
+ *btf_obj_fd = 0; /* vmlinux BTF */
+ *btf_type_id = ret;
+ return 0;
+ }
+ if (ret != -ENOENT)
+ return ret;
+
+ ret = load_module_btfs(obj);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ const struct module_btf *mod = &obj->btf_modules[i];
+
+ ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
+ if (ret > 0) {
+ *btf_obj_fd = mod->fd;
+ *btf_type_id = ret;
+ return 0;
+ }
+ if (ret == -ENOENT)
+ continue;
+
+ return ret;
+ }
+
+ return -ESRCH;
+}
+
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
{
enum bpf_attach_type attach_type = prog->expected_attach_type;
__u32 attach_prog_fd = prog->attach_prog_fd;
- const char *name = prog->sec_name;
+ const char *name = prog->sec_name, *attach_name;
+ const struct bpf_sec_def *sec = NULL;
int i, err;
if (!name)
@@ -8695,17 +8967,37 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
continue;
if (strncmp(name, section_defs[i].sec, section_defs[i].len))
continue;
- if (attach_prog_fd)
- err = libbpf_find_prog_btf_id(name + section_defs[i].len,
- attach_prog_fd);
- else
- err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
- name + section_defs[i].len,
- attach_type);
+
+ sec = &section_defs[i];
+ break;
+ }
+
+ if (!sec) {
+ pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
+ return -ESRCH;
+ }
+ attach_name = name + sec->len;
+
+ /* BPF program's BTF ID */
+ if (attach_prog_fd) {
+ err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
+ if (err < 0) {
+ pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
+ attach_prog_fd, attach_name, err);
+ return err;
+ }
+ *btf_obj_fd = 0;
+ *btf_type_id = err;
+ return 0;
+ }
+
+ /* kernel/module BTF ID */
+ err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ if (err) {
+ pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
return err;
}
- pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
- return -ESRCH;
+ return 0;
}
int libbpf_attach_type_by_name(const char *name,
@@ -10594,6 +10886,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
return btf_id;
prog->attach_btf_id = btf_id;
+ prog->attach_btf_obj_fd = 0;
prog->attach_prog_fd = attach_prog_fd;
return 0;
}
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 29ff4807b909..7c4126542e2b 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -340,9 +340,12 @@ LIBBPF_0.2.0 {
LIBBPF_0.3.0 {
global:
+ btf__base_btf;
btf__parse_elf_split;
btf__parse_raw_split;
btf__parse_split;
btf__new_empty_split;
btf__new_split;
+ xsk_setup_xdp_prog;
+ xsk_socket__update_xskmap;
} LIBBPF_0.2.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index d99bc847bf84..969d0ac592ba 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -151,10 +151,41 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
+struct bpf_prog_load_params {
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type expected_attach_type;
+ const char *name;
+ const struct bpf_insn *insns;
+ size_t insn_cnt;
+ const char *license;
+ __u32 kern_version;
+ __u32 attach_prog_fd;
+ __u32 attach_btf_obj_fd;
+ __u32 attach_btf_id;
+ __u32 prog_ifindex;
+ __u32 prog_btf_fd;
+ __u32 prog_flags;
+
+ __u32 func_info_rec_size;
+ const void *func_info;
+ __u32 func_info_cnt;
+
+ __u32 line_info_rec_size;
+ const void *line_info;
+ __u32 line_info_cnt;
+
+ __u32 log_level;
+ char *log_buf;
+ size_t log_buf_sz;
+};
+
+int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
+
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
__u32 *size);
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
+struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
struct btf_ext_info {
/*
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 9bc537d0b92d..e3e41ceeb1bc 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -566,8 +566,35 @@ static int xsk_set_bpf_maps(struct xsk_socket *xsk)
&xsk->fd, 0);
}
-static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
+static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
{
+ char ifname[IFNAMSIZ];
+ struct xsk_ctx *ctx;
+ char *interface;
+
+ ctx = calloc(1, sizeof(*ctx));
+ if (!ctx)
+ return -ENOMEM;
+
+ interface = if_indextoname(ifindex, &ifname[0]);
+ if (!interface) {
+ free(ctx);
+ return -errno;
+ }
+
+ ctx->ifindex = ifindex;
+ memcpy(ctx->ifname, ifname, IFNAMSIZ -1);
+ ctx->ifname[IFNAMSIZ - 1] = 0;
+
+ xsk->ctx = ctx;
+
+ return 0;
+}
+
+static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp,
+ int *xsks_map_fd)
+{
+ struct xsk_socket *xsk = _xdp;
struct xsk_ctx *ctx = xsk->ctx;
__u32 prog_id = 0;
int err;
@@ -584,8 +611,7 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
err = xsk_load_xdp_prog(xsk);
if (err) {
- xsk_delete_bpf_maps(xsk);
- return err;
+ goto err_load_xdp_prog;
}
} else {
ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
@@ -598,15 +624,29 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
}
}
- if (xsk->rx)
+ if (xsk->rx) {
err = xsk_set_bpf_maps(xsk);
- if (err) {
- xsk_delete_bpf_maps(xsk);
- close(ctx->prog_fd);
- return err;
+ if (err) {
+ if (!prog_id) {
+ goto err_set_bpf_maps;
+ } else {
+ close(ctx->prog_fd);
+ return err;
+ }
+ }
}
+ if (xsks_map_fd)
+ *xsks_map_fd = ctx->xsks_map_fd;
return 0;
+
+err_set_bpf_maps:
+ close(ctx->prog_fd);
+ bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
+err_load_xdp_prog:
+ xsk_delete_bpf_maps(xsk);
+
+ return err;
}
static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
@@ -689,6 +729,40 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
return ctx;
}
+static void xsk_destroy_xsk_struct(struct xsk_socket *xsk)
+{
+ free(xsk->ctx);
+ free(xsk);
+}
+
+int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd)
+{
+ xsk->ctx->xsks_map_fd = fd;
+ return xsk_set_bpf_maps(xsk);
+}
+
+int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd)
+{
+ struct xsk_socket *xsk;
+ int res;
+
+ xsk = calloc(1, sizeof(*xsk));
+ if (!xsk)
+ return -ENOMEM;
+
+ res = xsk_create_xsk_struct(ifindex, xsk);
+ if (res) {
+ free(xsk);
+ return -EINVAL;
+ }
+
+ res = __xsk_setup_xdp_prog(xsk, xsks_map_fd);
+
+ xsk_destroy_xsk_struct(xsk);
+
+ return res;
+}
+
int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
const char *ifname,
__u32 queue_id, struct xsk_umem *umem,
@@ -838,7 +912,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
ctx->prog_fd = -1;
if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
- err = xsk_setup_xdp_prog(xsk);
+ err = __xsk_setup_xdp_prog(xsk, NULL);
if (err)
goto out_mmap_tx;
}
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index 1069c46364ff..e9f121f5d129 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -113,8 +113,7 @@ static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
return (entries > nb) ? nb : entries;
}
-static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
- size_t nb, __u32 *idx)
+static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
{
if (xsk_prod_nb_free(prod, nb) < nb)
return 0;
@@ -125,7 +124,7 @@ static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
return nb;
}
-static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
+static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
{
/* Make sure everything has been written to the ring before indicating
* this to the kernel by writing the producer pointer.
@@ -135,10 +134,9 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
*prod->producer += nb;
}
-static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
- size_t nb, __u32 *idx)
+static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
{
- size_t entries = xsk_cons_nb_avail(cons, nb);
+ __u32 entries = xsk_cons_nb_avail(cons, nb);
if (entries > 0) {
/* Make sure we do not speculatively read the data before
@@ -153,7 +151,12 @@ static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
return entries;
}
-static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
+static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
+{
+ cons->cached_cons -= nb;
+}
+
+static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
{
/* Make sure data has been read before indicating we are done
* with the entries by updating the consumer pointer.
@@ -201,6 +204,11 @@ struct xsk_umem_config {
__u32 flags;
};
+LIBBPF_API int xsk_setup_xdp_prog(int ifindex,
+ int *xsks_map_fd);
+LIBBPF_API int xsk_socket__update_xskmap(struct xsk_socket *xsk,
+ int xsks_map_fd);
+
/* Flags for the libbpf_flags field. */
#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)