summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/btf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf/btf.c')
-rw-r--r--tools/lib/bpf/btf.c139
1 files changed, 105 insertions, 34 deletions
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index fadf089ae8fe..9aa19c89f758 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -454,7 +454,7 @@ const struct btf *btf__base_btf(const struct btf *btf)
}
/* internal helper returning non-const pointer to a type */
-struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
+struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
{
if (type_id == 0)
return &btf_void;
@@ -610,6 +610,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
case BTF_KIND_RESTRICT:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
type_id = t->type;
break;
case BTF_KIND_ARRAY:
@@ -1123,54 +1124,86 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
-int btf__load_into_kernel(struct btf *btf)
+int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
{
- __u32 log_buf_size = 0, raw_size;
- char *log_buf = NULL;
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ __u32 buf_sz = 0, raw_size;
+ char *buf = NULL, *tmp;
void *raw_data;
int err = 0;
if (btf->fd >= 0)
return libbpf_err(-EEXIST);
+ if (log_sz && !log_buf)
+ return libbpf_err(-EINVAL);
-retry_load:
- if (log_buf_size) {
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- return libbpf_err(-ENOMEM);
-
- *log_buf = 0;
- }
-
+ /* cache native raw data representation */
raw_data = btf_get_raw_data(btf, &raw_size, false);
if (!raw_data) {
err = -ENOMEM;
goto done;
}
- /* cache native raw data representation */
btf->raw_size = raw_size;
btf->raw_data = raw_data;
- btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false);
+retry_load:
+ /* if log_level is 0, we won't provide log_buf/log_size to the kernel,
+ * initially. Only if BTF loading fails, we bump log_level to 1 and
+ * retry, using either auto-allocated or custom log_buf. This way
+ * non-NULL custom log_buf provides a buffer just in case, but hopes
+ * for successful load and no need for log_buf.
+ */
+ if (log_level) {
+ /* if caller didn't provide custom log_buf, we'll keep
+ * allocating our own progressively bigger buffers for BTF
+ * verification log
+ */
+ if (!log_buf) {
+ buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
+ tmp = realloc(buf, buf_sz);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto done;
+ }
+ buf = tmp;
+ buf[0] = '\0';
+ }
+
+ opts.log_buf = log_buf ? log_buf : buf;
+ opts.log_size = log_buf ? log_sz : buf_sz;
+ opts.log_level = log_level;
+ }
+
+ btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
if (btf->fd < 0) {
- if (!log_buf || errno == ENOSPC) {
- log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
- log_buf_size << 1);
- free(log_buf);
+ /* time to turn on verbose mode and try again */
+ if (log_level == 0) {
+ log_level = 1;
goto retry_load;
}
+ /* only retry if caller didn't provide custom log_buf, but
+ * make sure we can never overflow buf_sz
+ */
+ if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
+ goto retry_load;
err = -errno;
- pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
- if (*log_buf)
- pr_warn("%s\n", log_buf);
- goto done;
+ pr_warn("BTF loading error: %d\n", err);
+ /* don't print out contents of custom log_buf */
+ if (!log_buf && buf[0])
+ pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
}
done:
- free(log_buf);
+ free(buf);
return libbpf_err(err);
}
+
+int btf__load_into_kernel(struct btf *btf)
+{
+ return btf_load_into_kernel(btf, NULL, 0, 0);
+}
+
int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel")));
int btf__fd(const struct btf *btf)
@@ -2730,15 +2763,11 @@ void btf_ext__free(struct btf_ext *btf_ext)
free(btf_ext);
}
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
+struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
{
struct btf_ext *btf_ext;
int err;
- err = btf_ext_parse_hdr(data, size);
- if (err)
- return libbpf_err_ptr(err);
-
btf_ext = calloc(1, sizeof(struct btf_ext));
if (!btf_ext)
return libbpf_err_ptr(-ENOMEM);
@@ -2751,6 +2780,10 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
}
memcpy(btf_ext->data, data, size);
+ err = btf_ext_parse_hdr(btf_ext->data, size);
+ if (err)
+ goto done;
+
if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
err = -EINVAL;
goto done;
@@ -3074,7 +3107,7 @@ done:
return libbpf_err(err);
}
-COMPAT_VERSION(bpf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
+COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
{
LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
@@ -3476,8 +3509,8 @@ static long btf_hash_struct(struct btf_type *t)
}
/*
- * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
- * IDs. This check is performed during type graph equivalence check and
+ * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
+ * type IDs. This check is performed during type graph equivalence check and
* referenced types equivalence is checked separately.
*/
static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
@@ -3850,6 +3883,31 @@ static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
return btf_equal_array(t1, t2);
}
+/* Check if given two types are identical STRUCT/UNION definitions */
+static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
+{
+ const struct btf_member *m1, *m2;
+ struct btf_type *t1, *t2;
+ int n, i;
+
+ t1 = btf_type_by_id(d->btf, id1);
+ t2 = btf_type_by_id(d->btf, id2);
+
+ if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
+ return false;
+
+ if (!btf_shallow_equal_struct(t1, t2))
+ return false;
+
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
+ for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
+ if (m1->type != m2->type)
+ return false;
+ }
+ return true;
+}
+
/*
* Check equivalence of BTF type graph formed by candidate struct/union (we'll
* call it "candidate graph" in this description for brevity) to a type graph
@@ -3961,6 +4019,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
hypot_type_id = d->hypot_map[canon_id];
if (hypot_type_id <= BTF_MAX_NR_TYPES) {
+ if (hypot_type_id == cand_id)
+ return 1;
/* In some cases compiler will generate different DWARF types
* for *identical* array type definitions and use them for
* different fields within the *same* struct. This breaks type
@@ -3969,8 +4029,18 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
* types within a single CU. So work around that by explicitly
* allowing identical array types here.
*/
- return hypot_type_id == cand_id ||
- btf_dedup_identical_arrays(d, hypot_type_id, cand_id);
+ if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
+ return 1;
+ /* It turns out that similar situation can happen with
+ * struct/union sometimes, sigh... Handle the case where
+ * structs/unions are exactly the same, down to the referenced
+ * type IDs. Anything more complicated (e.g., if referenced
+ * types are different, but equivalent) is *way more*
+ * complicated and requires a many-to-many equivalence mapping.
+ */
+ if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
+ return 1;
+ return 0;
}
if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
@@ -4023,6 +4093,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_TYPE_TAG:
if (cand_type->info != canon_type->info)
return 0;
return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);