summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-02-22 02:22:45 +0300
committerDavid S. Miller <davem@davemloft.net>2020-02-22 02:22:45 +0300
commitb105e8e281ac2dbea4229982ad57fbefab05963d (patch)
tree58ea551205dd646d809c789fccb2381b60b00d9e /tools
parente65ee2fb54d4745d7b7d9061d7fe33c5c5bf3b06 (diff)
parenteb1e1478b6f4e70d99fee3f49bb7f7143c8c871d (diff)
downloadlinux-b105e8e281ac2dbea4229982ad57fbefab05963d.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2020-02-21 The following pull-request contains BPF updates for your *net-next* tree. We've added 25 non-merge commits during the last 4 day(s) which contain a total of 33 files changed, 2433 insertions(+), 161 deletions(-). The main changes are: 1) Allow for adding TCP listen sockets into sock_map/hash so they can be used with reuseport BPF programs, from Jakub Sitnicki. 2) Add a new bpf_program__set_attach_target() helper for adding libbpf support to specify the tracepoint/function dynamically, from Eelco Chaudron. 3) Add bpf_read_branch_records() BPF helper which helps use cases like profile guided optimizations, from Daniel Xu. 4) Enable bpf_perf_event_read_value() in all tracing programs, from Song Liu. 5) Relax BTF mandatory check if only used for libbpf itself e.g. to process BTF defined maps, from Andrii Nakryiko. 6) Move BPF selftests -mcpu compilation attribute from 'probe' to 'v3' as it has been observed that former fails in envs with low memlock, from Yonghong Song. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
-rw-r--r--tools/include/uapi/linux/bpf.h25
-rw-r--r--tools/lib/bpf/libbpf.c38
-rw-r--r--tools/lib/bpf/libbpf.h4
-rw-r--r--tools/lib/bpf/libbpf.map5
-rw-r--r--tools/testing/selftests/bpf/Makefile4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_branches.c170
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c124
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c1496
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_branches.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c98
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c4
-rw-r--r--tools/testing/selftests/bpf/test_maps.c6
15 files changed, 2091 insertions, 37 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 22f235260a3a..906e9f2752db 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -2890,6 +2890,25 @@ union bpf_attr {
* Obtain the 64bit jiffies
* Return
* The 64 bit jiffies
+ *
+ * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * Description
+ * For an eBPF program attached to a perf event, retrieve the
+ * branch records (struct perf_branch_entry) associated to *ctx*
+ * and store it in the buffer pointed by *buf* up to size
+ * *size* bytes.
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
+ * instead return the number of bytes required to store all the
+ * branch entries. If this flag is set, *buf* may be NULL.
+ *
+ * **-EINVAL** if arguments invalid or **size** not a multiple
+ * of sizeof(struct perf_branch_entry).
+ *
+ * **-ENOENT** if architecture does not support branch records.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3010,7 +3029,8 @@ union bpf_attr {
FN(probe_read_kernel_str), \
FN(tcp_send_ack), \
FN(send_signal_thread), \
- FN(jiffies64),
+ FN(jiffies64), \
+ FN(read_branch_records),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3089,6 +3109,9 @@ enum bpf_func_id {
/* BPF_FUNC_sk_storage_get flags */
#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+/* BPF_FUNC_read_branch_records flags. */
+#define BPF_F_GET_BRANCH_RECORDS_SIZE (1ULL << 0)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7469c7dcc15e..996162801f7a 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2286,9 +2286,7 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
{
- return obj->efile.btf_maps_shndx >= 0 ||
- obj->efile.st_ops_shndx >= 0 ||
- obj->nr_extern > 0;
+ return obj->efile.st_ops_shndx >= 0 || obj->nr_extern > 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -4945,8 +4943,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i, btf_id;
- if (prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_EXT) {
+ if ((prog->type == BPF_PROG_TYPE_TRACING ||
+ prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
btf_id = libbpf_find_attach_btf_id(prog);
if (btf_id <= 0)
return btf_id;
@@ -6589,6 +6587,9 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
else
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ if (err <= 0)
+ pr_warn("%s is not found in vmlinux BTF\n", name);
+
return err;
}
@@ -6661,8 +6662,6 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
name + section_defs[i].len,
attach_type);
- if (err <= 0)
- pr_warn("%s is not found in vmlinux BTF\n", name);
return err;
}
pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
@@ -8138,6 +8137,31 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
}
}
+int bpf_program__set_attach_target(struct bpf_program *prog,
+ int attach_prog_fd,
+ const char *attach_func_name)
+{
+ int btf_id;
+
+ if (!prog || attach_prog_fd < 0 || !attach_func_name)
+ return -EINVAL;
+
+ if (attach_prog_fd)
+ btf_id = libbpf_find_prog_btf_id(attach_func_name,
+ attach_prog_fd);
+ else
+ btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
+ attach_func_name,
+ prog->expected_attach_type);
+
+ if (btf_id < 0)
+ return btf_id;
+
+ prog->attach_btf_id = btf_id;
+ prog->attach_prog_fd = attach_prog_fd;
+ return 0;
+}
+
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
{
int err = 0, n, len, start, end = -1;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 3fe12c9d1f92..02fc58a21a7f 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -334,6 +334,10 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
+LIBBPF_API int
+bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
+ const char *attach_func_name);
+
LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b035122142bb..7b014c8cdece 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -235,3 +235,8 @@ LIBBPF_0.0.7 {
btf__align_of;
libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
+
+LIBBPF_0.0.8 {
+ global:
+ bpf_program__set_attach_target;
+} LIBBPF_0.0.7;
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 257a1aaaa37d..2a583196fa51 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -209,7 +209,7 @@ define CLANG_BPF_BUILD_RULE
$(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -target bpf -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
- $(LLC) -mattr=dwarfris -march=bpf -mcpu=probe $4 -filetype=obj -o $2
+ $(LLC) -mattr=dwarfris -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
@@ -223,7 +223,7 @@ define CLANG_NATIVE_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
- $(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2
+ $(LLC) -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
new file mode 100644
index 000000000000..e35c444902a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <sched.h>
+#include <sys/socket.h>
+#include <test_progs.h>
+#include "bpf/libbpf_internal.h"
+#include "test_perf_branches.skel.h"
+
+static void check_good_sample(struct test_perf_branches *skel)
+{
+ int written_global = skel->bss->written_global_out;
+ int required_size = skel->bss->required_size_out;
+ int written_stack = skel->bss->written_stack_out;
+ int pbe_size = sizeof(struct perf_branch_entry);
+ int duration = 0;
+
+ if (CHECK(!skel->bss->valid, "output not valid",
+ "no valid sample from prog"))
+ return;
+
+ /*
+ * It's hard to validate the contents of the branch entries b/c it
+ * would require some kind of disassembler and also encoding the
+ * valid jump instructions for supported architectures. So just check
+ * the easy stuff for now.
+ */
+ CHECK(required_size <= 0, "read_branches_size", "err %d\n", required_size);
+ CHECK(written_stack < 0, "read_branches_stack", "err %d\n", written_stack);
+ CHECK(written_stack % pbe_size != 0, "read_branches_stack",
+ "stack bytes written=%d not multiple of struct size=%d\n",
+ written_stack, pbe_size);
+ CHECK(written_global < 0, "read_branches_global", "err %d\n", written_global);
+ CHECK(written_global % pbe_size != 0, "read_branches_global",
+ "global bytes written=%d not multiple of struct size=%d\n",
+ written_global, pbe_size);
+ CHECK(written_global < written_stack, "read_branches_size",
+ "written_global=%d < written_stack=%d\n", written_global, written_stack);
+}
+
+static void check_bad_sample(struct test_perf_branches *skel)
+{
+ int written_global = skel->bss->written_global_out;
+ int required_size = skel->bss->required_size_out;
+ int written_stack = skel->bss->written_stack_out;
+ int duration = 0;
+
+ if (CHECK(!skel->bss->valid, "output not valid",
+ "no valid sample from prog"))
+ return;
+
+ CHECK((required_size != -EINVAL && required_size != -ENOENT),
+ "read_branches_size", "err %d\n", required_size);
+ CHECK((written_stack != -EINVAL && written_stack != -ENOENT),
+ "read_branches_stack", "written %d\n", written_stack);
+ CHECK((written_global != -EINVAL && written_global != -ENOENT),
+ "read_branches_global", "written %d\n", written_global);
+}
+
+static void test_perf_branches_common(int perf_fd,
+ void (*cb)(struct test_perf_branches *))
+{
+ struct test_perf_branches *skel;
+ int err, i, duration = 0;
+ bool detached = false;
+ struct bpf_link *link;
+ volatile int j = 0;
+ cpu_set_t cpu_set;
+
+ skel = test_perf_branches__open_and_load();
+ if (CHECK(!skel, "test_perf_branches_load",
+ "perf_branches skeleton failed\n"))
+ return;
+
+ /* attach perf_event */
+ link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd);
+ if (CHECK(IS_ERR(link), "attach_perf_event", "err %ld\n", PTR_ERR(link)))
+ goto out_destroy_skel;
+
+ /* generate some branches on cpu 0 */
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+ if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err))
+ goto out_destroy;
+ /* spin the loop for a while (random high number) */
+ for (i = 0; i < 1000000; ++i)
+ ++j;
+
+ test_perf_branches__detach(skel);
+ detached = true;
+
+ cb(skel);
+out_destroy:
+ bpf_link__destroy(link);
+out_destroy_skel:
+ if (!detached)
+ test_perf_branches__detach(skel);
+ test_perf_branches__destroy(skel);
+}
+
+static void test_perf_branches_hw(void)
+{
+ struct perf_event_attr attr = {0};
+ int duration = 0;
+ int pfd;
+
+ /* create perf event */
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.config = PERF_COUNT_HW_CPU_CYCLES;
+ attr.freq = 1;
+ attr.sample_freq = 4000;
+ attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
+ attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
+ pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+
+ /*
+ * Some setups don't support branch records (virtual machines, !x86),
+ * so skip test in this case.
+ */
+ if (pfd == -1) {
+ if (errno == ENOENT || errno == EOPNOTSUPP) {
+ printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n",
+ __func__);
+ test__skip();
+ return;
+ }
+ if (CHECK(pfd < 0, "perf_event_open", "err %d errno %d\n",
+ pfd, errno))
+ return;
+ }
+
+ test_perf_branches_common(pfd, check_good_sample);
+
+ close(pfd);
+}
+
+/*
+ * Tests negative case -- run bpf_read_branch_records() on improperly configured
+ * perf event.
+ */
+static void test_perf_branches_no_hw(void)
+{
+ struct perf_event_attr attr = {0};
+ int duration = 0;
+ int pfd;
+
+ /* create perf event */
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ attr.freq = 1;
+ attr.sample_freq = 4000;
+ pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))
+ return;
+
+ test_perf_branches_common(pfd, check_bad_sample);
+
+ close(pfd);
+}
+
+void test_perf_branches(void)
+{
+ if (test__start_subtest("perf_branches_hw"))
+ test_perf_branches_hw();
+ if (test__start_subtest("perf_branches_no_hw"))
+ test_perf_branches_no_hw();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 0800036ed654..68d452bb9fd9 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -36,6 +36,7 @@ static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
static __u32 expected_results[NR_RESULTS];
static int sk_fds[REUSEPORT_ARRAY_SIZE];
static int reuseport_array = -1, outer_map = -1;
+static enum bpf_map_type inner_map_type;
static int select_by_skb_data_prog;
static int saved_tcp_syncookie = -1;
static struct bpf_object *obj;
@@ -63,13 +64,15 @@ static union sa46 {
} \
})
-static int create_maps(void)
+static int create_maps(enum bpf_map_type inner_type)
{
struct bpf_create_map_attr attr = {};
+ inner_map_type = inner_type;
+
/* Creating reuseport_array */
attr.name = "reuseport_array";
- attr.map_type = BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
+ attr.map_type = inner_type;
attr.key_size = sizeof(__u32);
attr.value_size = sizeof(__u32);
attr.max_entries = REUSEPORT_ARRAY_SIZE;
@@ -728,12 +731,36 @@ static void cleanup_per_test(bool no_inner_map)
static void cleanup(void)
{
- if (outer_map != -1)
+ if (outer_map != -1) {
close(outer_map);
- if (reuseport_array != -1)
+ outer_map = -1;
+ }
+
+ if (reuseport_array != -1) {
close(reuseport_array);
- if (obj)
+ reuseport_array = -1;
+ }
+
+ if (obj) {
bpf_object__close(obj);
+ obj = NULL;
+ }
+
+ memset(expected_results, 0, sizeof(expected_results));
+}
+
+static const char *maptype_str(enum bpf_map_type type)
+{
+ switch (type) {
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ return "reuseport_sockarray";
+ case BPF_MAP_TYPE_SOCKMAP:
+ return "sockmap";
+ case BPF_MAP_TYPE_SOCKHASH:
+ return "sockhash";
+ default:
+ return "unknown";
+ }
}
static const char *family_str(sa_family_t family)
@@ -781,13 +808,21 @@ static void test_config(int sotype, sa_family_t family, bool inany)
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
- snprintf(s, sizeof(s), "%s/%s %s %s",
+ snprintf(s, sizeof(s), "%s %s/%s %s %s",
+ maptype_str(inner_map_type),
family_str(family), sotype_str(sotype),
inany ? "INANY" : "LOOPBACK", t->name);
if (!test__start_subtest(s))
continue;
+ if (sotype == SOCK_DGRAM &&
+ inner_map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
+ /* SOCKMAP/SOCKHASH don't support UDP yet */
+ test__skip();
+ continue;
+ }
+
setup_per_test(sotype, family, inany, t->no_inner_map);
t->fn(sotype, family);
cleanup_per_test(t->no_inner_map);
@@ -816,13 +851,20 @@ static void test_all(void)
test_config(c->sotype, c->family, c->inany);
}
-void test_select_reuseport(void)
+void test_map_type(enum bpf_map_type mt)
{
- if (create_maps())
+ if (create_maps(mt))
goto out;
if (prepare_bpf_obj())
goto out;
+ test_all();
+out:
+ cleanup();
+}
+
+void test_select_reuseport(void)
+{
saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
if (saved_tcp_fo < 0)
goto out;
@@ -835,8 +877,9 @@ void test_select_reuseport(void)
if (disable_syncookie())
goto out;
- test_all();
+ test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ test_map_type(BPF_MAP_TYPE_SOCKMAP);
+ test_map_type(BPF_MAP_TYPE_SOCKHASH);
out:
- cleanup();
restore_sysctls();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
new file mode 100644
index 000000000000..06b86addc181
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+/*
+ * Tests for sockmap/sockhash holding kTLS sockets.
+ */
+
+#include "test_progs.h"
+
+#define MAX_TEST_NAME 80
+#define TCP_ULP 31
+
+static int tcp_server(int family)
+{
+ int err, s;
+
+ s = socket(family, SOCK_STREAM, 0);
+ if (CHECK_FAIL(s == -1)) {
+ perror("socket");
+ return -1;
+ }
+
+ err = listen(s, SOMAXCONN);
+ if (CHECK_FAIL(err)) {
+ perror("listen");
+ return -1;
+ }
+
+ return s;
+}
+
+static int disconnect(int fd)
+{
+ struct sockaddr unspec = { AF_UNSPEC };
+
+ return connect(fd, &unspec, sizeof(unspec));
+}
+
+/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */
+static void test_sockmap_ktls_disconnect_after_delete(int family, int map)
+{
+ struct sockaddr_storage addr = {0};
+ socklen_t len = sizeof(addr);
+ int err, cli, srv, zero = 0;
+
+ srv = tcp_server(family);
+ if (srv == -1)
+ return;
+
+ err = getsockname(srv, (struct sockaddr *)&addr, &len);
+ if (CHECK_FAIL(err)) {
+ perror("getsockopt");
+ goto close_srv;
+ }
+
+ cli = socket(family, SOCK_STREAM, 0);
+ if (CHECK_FAIL(cli == -1)) {
+ perror("socket");
+ goto close_srv;
+ }
+
+ err = connect(cli, (struct sockaddr *)&addr, len);
+ if (CHECK_FAIL(err)) {
+ perror("connect");
+ goto close_cli;
+ }
+
+ err = bpf_map_update_elem(map, &zero, &cli, 0);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_map_update_elem");
+ goto close_cli;
+ }
+
+ err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
+ if (CHECK_FAIL(err)) {
+ perror("setsockopt(TCP_ULP)");
+ goto close_cli;
+ }
+
+ err = bpf_map_delete_elem(map, &zero);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_map_delete_elem");
+ goto close_cli;
+ }
+
+ err = disconnect(cli);
+ if (CHECK_FAIL(err))
+ perror("disconnect");
+
+close_cli:
+ close(cli);
+close_srv:
+ close(srv);
+}
+
+static void run_tests(int family, enum bpf_map_type map_type)
+{
+ char test_name[MAX_TEST_NAME];
+ int map;
+
+ map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
+ if (CHECK_FAIL(map == -1)) {
+ perror("bpf_map_create");
+ return;
+ }
+
+ snprintf(test_name, MAX_TEST_NAME,
+ "sockmap_ktls disconnect_after_delete %s %s",
+ family == AF_INET ? "IPv4" : "IPv6",
+ map_type == BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH");
+ if (!test__start_subtest(test_name))
+ return;
+
+ test_sockmap_ktls_disconnect_after_delete(family, map);
+
+ close(map);
+}
+
+void test_sockmap_ktls(void)
+{
+ run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP);
+ run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH);
+ run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP);
+ run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
new file mode 100644
index 000000000000..b1b2acea0638
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -0,0 +1,1496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+/*
+ * Test suite for SOCKMAP/SOCKHASH holding listening sockets.
+ * Covers:
+ * 1. BPF map operations - bpf_map_{update,lookup delete}_elem
+ * 2. BPF redirect helpers - bpf_{sk,msg}_redirect_map
+ * 3. BPF reuseport helper - bpf_sk_select_reuseport
+ */
+
+#include <linux/compiler.h>
+#include <errno.h>
+#include <error.h>
+#include <limits.h>
+#include <netinet/in.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_util.h"
+#include "test_progs.h"
+#include "test_sockmap_listen.skel.h"
+
+#define MAX_STRERR_LEN 256
+#define MAX_TEST_NAME 80
+
+#define _FAIL(errnum, fmt...) \
+ ({ \
+ error_at_line(0, (errnum), __func__, __LINE__, fmt); \
+ CHECK_FAIL(true); \
+ })
+#define FAIL(fmt...) _FAIL(0, fmt)
+#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
+#define FAIL_LIBBPF(err, msg) \
+ ({ \
+ char __buf[MAX_STRERR_LEN]; \
+ libbpf_strerror((err), __buf, sizeof(__buf)); \
+ FAIL("%s: %s", (msg), __buf); \
+ })
+
+/* Wrappers that fail the test on error and report it. */
+
+#define xaccept(fd, addr, len) \
+ ({ \
+ int __ret = accept((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("accept"); \
+ __ret; \
+ })
+
+#define xbind(fd, addr, len) \
+ ({ \
+ int __ret = bind((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("bind"); \
+ __ret; \
+ })
+
+#define xclose(fd) \
+ ({ \
+ int __ret = close((fd)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("close"); \
+ __ret; \
+ })
+
+#define xconnect(fd, addr, len) \
+ ({ \
+ int __ret = connect((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("connect"); \
+ __ret; \
+ })
+
+#define xgetsockname(fd, addr, len) \
+ ({ \
+ int __ret = getsockname((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockname"); \
+ __ret; \
+ })
+
+#define xgetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = getsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xlisten(fd, backlog) \
+ ({ \
+ int __ret = listen((fd), (backlog)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("listen"); \
+ __ret; \
+ })
+
+#define xsetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = setsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("setsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xsocket(family, sotype, flags) \
+ ({ \
+ int __ret = socket(family, sotype, flags); \
+ if (__ret == -1) \
+ FAIL_ERRNO("socket"); \
+ __ret; \
+ })
+
+#define xbpf_map_delete_elem(fd, key) \
+ ({ \
+ int __ret = bpf_map_delete_elem((fd), (key)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("map_delete"); \
+ __ret; \
+ })
+
+#define xbpf_map_lookup_elem(fd, key, val) \
+ ({ \
+ int __ret = bpf_map_lookup_elem((fd), (key), (val)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("map_lookup"); \
+ __ret; \
+ })
+
+#define xbpf_map_update_elem(fd, key, val, flags) \
+ ({ \
+ int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("map_update"); \
+ __ret; \
+ })
+
+#define xbpf_prog_attach(prog, target, type, flags) \
+ ({ \
+ int __ret = \
+ bpf_prog_attach((prog), (target), (type), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("prog_attach(" #type ")"); \
+ __ret; \
+ })
+
+#define xbpf_prog_detach2(prog, target, type) \
+ ({ \
+ int __ret = bpf_prog_detach2((prog), (target), (type)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("prog_detach2(" #type ")"); \
+ __ret; \
+ })
+
+#define xpthread_create(thread, attr, func, arg) \
+ ({ \
+ int __ret = pthread_create((thread), (attr), (func), (arg)); \
+ errno = __ret; \
+ if (__ret) \
+ FAIL_ERRNO("pthread_create"); \
+ __ret; \
+ })
+
+#define xpthread_join(thread, retval) \
+ ({ \
+ int __ret = pthread_join((thread), (retval)); \
+ errno = __ret; \
+ if (__ret) \
+ FAIL_ERRNO("pthread_join"); \
+ __ret; \
+ })
+
+static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len)
+{
+ struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
+
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = 0;
+ addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ *len = sizeof(*addr4);
+}
+
+static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len)
+{
+ struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
+
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = 0;
+ addr6->sin6_addr = in6addr_loopback;
+ *len = sizeof(*addr6);
+}
+
+static void init_addr_loopback(int family, struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ switch (family) {
+ case AF_INET:
+ init_addr_loopback4(ss, len);
+ return;
+ case AF_INET6:
+ init_addr_loopback6(ss, len);
+ return;
+ default:
+ FAIL("unsupported address family %d", family);
+ }
+}
+
+static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
+{
+ return (struct sockaddr *)ss;
+}
+
+static int enable_reuseport(int s, int progfd)
+{
+ int err, one = 1;
+
+ err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+ if (err)
+ return -1;
+ err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
+ sizeof(progfd));
+ if (err)
+ return -1;
+
+ return 0;
+}
+
+static int listen_loopback_reuseport(int family, int sotype, int progfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ int err, s;
+
+ init_addr_loopback(family, &addr, &len);
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return -1;
+
+ if (progfd >= 0)
+ enable_reuseport(s, progfd);
+
+ err = xbind(s, sockaddr(&addr), len);
+ if (err)
+ goto close;
+
+ err = xlisten(s, SOMAXCONN);
+ if (err)
+ goto close;
+
+ return s;
+close:
+ xclose(s);
+ return -1;
+}
+
+static int listen_loopback(int family, int sotype)
+{
+ return listen_loopback_reuseport(family, sotype, -1);
+}
+
+static void test_insert_invalid(int family, int sotype, int mapfd)
+{
+ u32 key = 0;
+ u64 value;
+ int err;
+
+ value = -1;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EINVAL)
+ FAIL_ERRNO("map_update: expected EINVAL");
+
+ value = INT_MAX;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EBADF)
+ FAIL_ERRNO("map_update: expected EBADF");
+}
+
+static void test_insert_opened(int family, int sotype, int mapfd)
+{
+ u32 key = 0;
+ u64 value;
+ int err, s;
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return;
+
+ errno = 0;
+ value = s;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EOPNOTSUPP)
+ FAIL_ERRNO("map_update: expected EOPNOTSUPP");
+
+ xclose(s);
+}
+
+static void test_insert_bound(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ u32 key = 0;
+ u64 value;
+ int err, s;
+
+ init_addr_loopback(family, &addr, &len);
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return;
+
+ err = xbind(s, sockaddr(&addr), len);
+ if (err)
+ goto close;
+
+ errno = 0;
+ value = s;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EOPNOTSUPP)
+ FAIL_ERRNO("map_update: expected EOPNOTSUPP");
+close:
+ xclose(s);
+}
+
+static void test_insert_listening(int family, int sotype, int mapfd)
+{
+ u64 value;
+ u32 key;
+ int s;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xclose(s);
+}
+
+static void test_delete_after_insert(int family, int sotype, int mapfd)
+{
+ u64 value;
+ u32 key;
+ int s;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xbpf_map_delete_elem(mapfd, &key);
+ xclose(s);
+}
+
+static void test_delete_after_close(int family, int sotype, int mapfd)
+{
+ int err, s;
+ u64 value;
+ u32 key;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ xclose(s);
+
+ errno = 0;
+ err = bpf_map_delete_elem(mapfd, &key);
+ if (!err || (errno != EINVAL && errno != ENOENT))
+ /* SOCKMAP and SOCKHASH return different error codes */
+ FAIL_ERRNO("map_delete: expected EINVAL/EINVAL");
+}
+
+static void test_lookup_after_insert(int family, int sotype, int mapfd)
+{
+ u64 cookie, value;
+ socklen_t len;
+ u32 key;
+ int s;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ len = sizeof(cookie);
+ xgetsockopt(s, SOL_SOCKET, SO_COOKIE, &cookie, &len);
+
+ xbpf_map_lookup_elem(mapfd, &key, &value);
+
+ if (value != cookie) {
+ FAIL("map_lookup: have %#llx, want %#llx",
+ (unsigned long long)value, (unsigned long long)cookie);
+ }
+
+ xclose(s);
+}
+
+static void test_lookup_after_delete(int family, int sotype, int mapfd)
+{
+ int err, s;
+ u64 value;
+ u32 key;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xbpf_map_delete_elem(mapfd, &key);
+
+ errno = 0;
+ err = bpf_map_lookup_elem(mapfd, &key, &value);
+ if (!err || errno != ENOENT)
+ FAIL_ERRNO("map_lookup: expected ENOENT");
+
+ xclose(s);
+}
+
+static void test_lookup_32_bit_value(int family, int sotype, int mapfd)
+{
+ u32 key, value32;
+ int err, s;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ mapfd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(key),
+ sizeof(value32), 1, 0);
+ if (mapfd < 0) {
+ FAIL_ERRNO("map_create");
+ goto close;
+ }
+
+ key = 0;
+ value32 = s;
+ xbpf_map_update_elem(mapfd, &key, &value32, BPF_NOEXIST);
+
+ errno = 0;
+ err = bpf_map_lookup_elem(mapfd, &key, &value32);
+ if (!err || errno != ENOSPC)
+ FAIL_ERRNO("map_lookup: expected ENOSPC");
+
+ xclose(mapfd);
+close:
+ xclose(s);
+}
+
+static void test_update_listening(int family, int sotype, int mapfd)
+{
+ int s1, s2;
+ u64 value;
+ u32 key;
+
+ s1 = listen_loopback(family, sotype);
+ if (s1 < 0)
+ return;
+
+ s2 = listen_loopback(family, sotype);
+ if (s2 < 0)
+ goto close_s1;
+
+ key = 0;
+ value = s1;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ value = s2;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_EXIST);
+ xclose(s2);
+close_s1:
+ xclose(s1);
+}
+
+/* Exercise the code path where we destroy child sockets that never
+ * got accept()'ed, aka orphans, when parent socket gets closed.
+ */
+static void test_destroy_orphan_child(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ int err, s, c;
+ u64 value;
+ u32 key;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ c = xsocket(family, sotype, 0);
+ if (c == -1)
+ goto close_srv;
+
+ xconnect(c, sockaddr(&addr), len);
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+/* Perform a passive open after removing listening socket from SOCKMAP
+ * to ensure that callbacks get restored properly.
+ */
+static void test_clone_after_delete(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ int err, s, c;
+ u64 value;
+ u32 key;
+
+ s = listen_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xbpf_map_delete_elem(mapfd, &key);
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv;
+
+ xconnect(c, sockaddr(&addr), len);
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+/* Check that child socket that got created while parent was in a
+ * SOCKMAP, but got accept()'ed only after the parent has been removed
+ * from SOCKMAP, gets cloned without parent psock state or callbacks.
+ */
+static void test_accept_after_delete(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ const u32 zero = 0;
+ int err, s, c, p;
+ socklen_t len;
+ u64 value;
+
+ s = listen_loopback(family, sotype);
+ if (s == -1)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ value = s;
+ err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c == -1)
+ goto close_srv;
+
+ /* Create child while parent is in sockmap */
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ /* Remove parent from sockmap */
+ err = xbpf_map_delete_elem(mapfd, &zero);
+ if (err)
+ goto close_cli;
+
+ p = xaccept(s, NULL, NULL);
+ if (p == -1)
+ goto close_cli;
+
+ /* Check that child sk_user_data is not set */
+ value = p;
+ xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+
+ xclose(p);
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+/* Check that child socket that got created and accepted while parent
+ * was in a SOCKMAP is cloned without parent psock state or callbacks.
+ */
+static void test_accept_before_delete(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ const u32 zero = 0, one = 1;
+ int err, s, c, p;
+ socklen_t len;
+ u64 value;
+
+ s = listen_loopback(family, sotype);
+ if (s == -1)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ value = s;
+ err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c == -1)
+ goto close_srv;
+
+ /* Create & accept child while parent is in sockmap */
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ p = xaccept(s, NULL, NULL);
+ if (p == -1)
+ goto close_cli;
+
+ /* Check that child sk_user_data is not set */
+ value = p;
+ xbpf_map_update_elem(mapfd, &one, &value, BPF_NOEXIST);
+
+ xclose(p);
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+struct connect_accept_ctx {
+ int sockfd;
+ unsigned int done;
+ unsigned int nr_iter;
+};
+
+static bool is_thread_done(struct connect_accept_ctx *ctx)
+{
+ return READ_ONCE(ctx->done);
+}
+
+static void *connect_accept_thread(void *arg)
+{
+ struct connect_accept_ctx *ctx = arg;
+ struct sockaddr_storage addr;
+ int family, socktype;
+ socklen_t len;
+ int err, i, s;
+
+ s = ctx->sockfd;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto done;
+
+ len = sizeof(family);
+ err = xgetsockopt(s, SOL_SOCKET, SO_DOMAIN, &family, &len);
+ if (err)
+ goto done;
+
+ len = sizeof(socktype);
+ err = xgetsockopt(s, SOL_SOCKET, SO_TYPE, &socktype, &len);
+ if (err)
+ goto done;
+
+ for (i = 0; i < ctx->nr_iter; i++) {
+ int c, p;
+
+ c = xsocket(family, socktype, 0);
+ if (c < 0)
+ break;
+
+ err = xconnect(c, (struct sockaddr *)&addr, sizeof(addr));
+ if (err) {
+ xclose(c);
+ break;
+ }
+
+ p = xaccept(s, NULL, NULL);
+ if (p < 0) {
+ xclose(c);
+ break;
+ }
+
+ xclose(p);
+ xclose(c);
+ }
+done:
+ WRITE_ONCE(ctx->done, 1);
+ return NULL;
+}
+
+static void test_syn_recv_insert_delete(int family, int sotype, int mapfd)
+{
+ struct connect_accept_ctx ctx = { 0 };
+ struct sockaddr_storage addr;
+ socklen_t len;
+ u32 zero = 0;
+ pthread_t t;
+ int err, s;
+ u64 value;
+
+ s = listen_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close;
+
+ ctx.sockfd = s;
+ ctx.nr_iter = 1000;
+
+ err = xpthread_create(&t, NULL, connect_accept_thread, &ctx);
+ if (err)
+ goto close;
+
+ value = s;
+ while (!is_thread_done(&ctx)) {
+ err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ if (err)
+ break;
+
+ err = xbpf_map_delete_elem(mapfd, &zero);
+ if (err)
+ break;
+ }
+
+ xpthread_join(t, NULL);
+close:
+ xclose(s);
+}
+
+static void *listen_thread(void *arg)
+{
+ struct sockaddr unspec = { AF_UNSPEC };
+ struct connect_accept_ctx *ctx = arg;
+ int err, i, s;
+
+ s = ctx->sockfd;
+
+ for (i = 0; i < ctx->nr_iter; i++) {
+ err = xlisten(s, 1);
+ if (err)
+ break;
+ err = xconnect(s, &unspec, sizeof(unspec));
+ if (err)
+ break;
+ }
+
+ WRITE_ONCE(ctx->done, 1);
+ return NULL;
+}
+
+static void test_race_insert_listen(int family, int socktype, int mapfd)
+{
+ struct connect_accept_ctx ctx = { 0 };
+ const u32 zero = 0;
+ const int one = 1;
+ pthread_t t;
+ int err, s;
+ u64 value;
+
+ s = xsocket(family, socktype, 0);
+ if (s < 0)
+ return;
+
+ err = xsetsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+ if (err)
+ goto close;
+
+ ctx.sockfd = s;
+ ctx.nr_iter = 10000;
+
+ err = pthread_create(&t, NULL, listen_thread, &ctx);
+ if (err)
+ goto close;
+
+ value = s;
+ while (!is_thread_done(&ctx)) {
+ err = bpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ /* Expecting EOPNOTSUPP before listen() */
+ if (err && errno != EOPNOTSUPP) {
+ FAIL_ERRNO("map_update");
+ break;
+ }
+
+ err = bpf_map_delete_elem(mapfd, &zero);
+ /* Expecting no entry after unhash on connect(AF_UNSPEC) */
+ if (err && errno != EINVAL && errno != ENOENT) {
+ FAIL_ERRNO("map_delete");
+ break;
+ }
+ }
+
+ xpthread_join(t, NULL);
+close:
+ xclose(s);
+}
+
+static void zero_verdict_count(int mapfd)
+{
+ unsigned int zero = 0;
+ int key;
+
+ key = SK_DROP;
+ xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY);
+ key = SK_PASS;
+ xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY);
+}
+
+enum redir_mode {
+ REDIR_INGRESS,
+ REDIR_EGRESS,
+};
+
+static const char *redir_mode_str(enum redir_mode mode)
+{
+ switch (mode) {
+ case REDIR_INGRESS:
+ return "ingress";
+ case REDIR_EGRESS:
+ return "egress";
+ default:
+ return "unknown";
+ }
+}
+
+static void redir_to_connected(int family, int sotype, int sock_mapfd,
+ int verd_mapfd, enum redir_mode mode)
+{
+ const char *log_prefix = redir_mode_str(mode);
+ struct sockaddr_storage addr;
+ int s, c0, c1, p0, p1;
+ unsigned int pass;
+ socklen_t len;
+ int err, n;
+ u64 value;
+ u32 key;
+ char b;
+
+ zero_verdict_count(verd_mapfd);
+
+ s = listen_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ c0 = xsocket(family, sotype, 0);
+ if (c0 < 0)
+ goto close_srv;
+ err = xconnect(c0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+
+ p0 = xaccept(s, NULL, NULL);
+ if (p0 < 0)
+ goto close_cli0;
+
+ c1 = xsocket(family, sotype, 0);
+ if (c1 < 0)
+ goto close_peer0;
+ err = xconnect(c1, sockaddr(&addr), len);
+ if (err)
+ goto close_cli1;
+
+ p1 = xaccept(s, NULL, NULL);
+ if (p1 < 0)
+ goto close_cli1;
+
+ key = 0;
+ value = p0;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer1;
+
+ key = 1;
+ value = p1;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer1;
+
+ n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1);
+ if (n < 0)
+ FAIL_ERRNO("%s: write", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete write", log_prefix);
+ if (n < 1)
+ goto close_peer1;
+
+ key = SK_PASS;
+ err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
+ if (err)
+ goto close_peer1;
+ if (pass != 1)
+ FAIL("%s: want pass count 1, have %d", log_prefix, pass);
+
+ n = read(c0, &b, 1);
+ if (n < 0)
+ FAIL_ERRNO("%s: read", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete read", log_prefix);
+
+close_peer1:
+ xclose(p1);
+close_cli1:
+ xclose(c1);
+close_peer0:
+ xclose(p0);
+close_cli0:
+ xclose(c0);
+close_srv:
+ xclose(s);
+}
+
+static void test_skb_redir_to_connected(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err)
+ return;
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (err)
+ goto detach;
+
+ redir_to_connected(family, sotype, sock_map, verdict_map,
+ REDIR_INGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
+detach:
+ xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
+}
+
+static void test_msg_redir_to_connected(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0);
+ if (err)
+ return;
+
+ redir_to_connected(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
+}
+
+static void redir_to_listening(int family, int sotype, int sock_mapfd,
+ int verd_mapfd, enum redir_mode mode)
+{
+ const char *log_prefix = redir_mode_str(mode);
+ struct sockaddr_storage addr;
+ int s, c, p, err, n;
+ unsigned int drop;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_mapfd);
+
+ s = listen_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv;
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ p = xaccept(s, NULL, NULL);
+ if (p < 0)
+ goto close_cli;
+
+ key = 0;
+ value = s;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer;
+
+ key = 1;
+ value = p;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer;
+
+ n = write(mode == REDIR_INGRESS ? c : p, "a", 1);
+ if (n < 0 && errno != EACCES)
+ FAIL_ERRNO("%s: write", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete write", log_prefix);
+ if (n < 1)
+ goto close_peer;
+
+ key = SK_DROP;
+ err = xbpf_map_lookup_elem(verd_mapfd, &key, &drop);
+ if (err)
+ goto close_peer;
+ if (drop != 1)
+ FAIL("%s: want drop count 1, have %d", log_prefix, drop);
+
+close_peer:
+ xclose(p);
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+static void test_skb_redir_to_listening(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err)
+ return;
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (err)
+ goto detach;
+
+ redir_to_listening(family, sotype, sock_map, verdict_map,
+ REDIR_INGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
+detach:
+ xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
+}
+
+static void test_msg_redir_to_listening(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0);
+ if (err)
+ return;
+
+ redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
+}
+
+static void test_reuseport_select_listening(int family, int sotype,
+ int sock_map, int verd_map,
+ int reuseport_prog)
+{
+ struct sockaddr_storage addr;
+ unsigned int pass;
+ int s, c, p, err;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_map);
+
+ s = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ key = 0;
+ value = s;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv;
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ p = xaccept(s, NULL, NULL);
+ if (p < 0)
+ goto close_cli;
+
+ key = SK_PASS;
+ err = xbpf_map_lookup_elem(verd_map, &key, &pass);
+ if (err)
+ goto close_peer;
+ if (pass != 1)
+ FAIL("want pass count 1, have %d", pass);
+
+close_peer:
+ xclose(p);
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+static void test_reuseport_select_connected(int family, int sotype,
+ int sock_map, int verd_map,
+ int reuseport_prog)
+{
+ struct sockaddr_storage addr;
+ int s, c0, c1, p0, err;
+ unsigned int drop;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_map);
+
+ s = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ if (s < 0)
+ return;
+
+ /* Populate sock_map[0] to avoid ENOENT on first connection */
+ key = 0;
+ value = s;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ c0 = xsocket(family, sotype, 0);
+ if (c0 < 0)
+ goto close_srv;
+
+ err = xconnect(c0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+
+ p0 = xaccept(s, NULL, NULL);
+ if (err)
+ goto close_cli0;
+
+ /* Update sock_map[0] to redirect to a connected socket */
+ key = 0;
+ value = p0;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_EXIST);
+ if (err)
+ goto close_peer0;
+
+ c1 = xsocket(family, sotype, 0);
+ if (c1 < 0)
+ goto close_peer0;
+
+ errno = 0;
+ err = connect(c1, sockaddr(&addr), len);
+ if (!err || errno != ECONNREFUSED)
+ FAIL_ERRNO("connect: expected ECONNREFUSED");
+
+ key = SK_DROP;
+ err = xbpf_map_lookup_elem(verd_map, &key, &drop);
+ if (err)
+ goto close_cli1;
+ if (drop != 1)
+ FAIL("want drop count 1, have %d", drop);
+
+close_cli1:
+ xclose(c1);
+close_peer0:
+ xclose(p0);
+close_cli0:
+ xclose(c0);
+close_srv:
+ xclose(s);
+}
+
+/* Check that redirecting across reuseport groups is not allowed. */
+static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
+ int verd_map, int reuseport_prog)
+{
+ struct sockaddr_storage addr;
+ int s1, s2, c, err;
+ unsigned int drop;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_map);
+
+ /* Create two listeners, each in its own reuseport group */
+ s1 = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ if (s1 < 0)
+ return;
+
+ s2 = listen_loopback_reuseport(family, sotype, reuseport_prog);
+ if (s2 < 0)
+ goto close_srv1;
+
+ key = 0;
+ value = s1;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv2;
+
+ key = 1;
+ value = s2;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+
+ /* Connect to s2, reuseport BPF selects s1 via sock_map[0] */
+ len = sizeof(addr);
+ err = xgetsockname(s2, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv2;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv2;
+
+ err = connect(c, sockaddr(&addr), len);
+ if (err && errno != ECONNREFUSED) {
+ FAIL_ERRNO("connect: expected ECONNREFUSED");
+ goto close_cli;
+ }
+
+ /* Expect drop, can't redirect outside of reuseport group */
+ key = SK_DROP;
+ err = xbpf_map_lookup_elem(verd_map, &key, &drop);
+ if (err)
+ goto close_cli;
+ if (drop != 1)
+ FAIL("want drop count 1, have %d", drop);
+
+close_cli:
+ xclose(c);
+close_srv2:
+ xclose(s2);
+close_srv1:
+ xclose(s1);
+}
+
+#define TEST(fn) \
+ { \
+ fn, #fn \
+ }
+
+static void test_ops_cleanup(const struct bpf_map *map)
+{
+ const struct bpf_map_def *def;
+ int err, mapfd;
+ u32 key;
+
+ def = bpf_map__def(map);
+ mapfd = bpf_map__fd(map);
+
+ for (key = 0; key < def->max_entries; key++) {
+ err = bpf_map_delete_elem(mapfd, &key);
+ if (err && errno != EINVAL && errno != ENOENT)
+ FAIL_ERRNO("map_delete: expected EINVAL/ENOENT");
+ }
+}
+
+static const char *family_str(sa_family_t family)
+{
+ switch (family) {
+ case AF_INET:
+ return "IPv4";
+ case AF_INET6:
+ return "IPv6";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *map_type_str(const struct bpf_map *map)
+{
+ const struct bpf_map_def *def;
+
+ def = bpf_map__def(map);
+ if (IS_ERR(def))
+ return "invalid";
+
+ switch (def->type) {
+ case BPF_MAP_TYPE_SOCKMAP:
+ return "sockmap";
+ case BPF_MAP_TYPE_SOCKHASH:
+ return "sockhash";
+ default:
+ return "unknown";
+ }
+}
+
+static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
+ int family, int sotype)
+{
+ const struct op_test {
+ void (*fn)(int family, int sotype, int mapfd);
+ const char *name;
+ } tests[] = {
+ /* insert */
+ TEST(test_insert_invalid),
+ TEST(test_insert_opened),
+ TEST(test_insert_bound),
+ TEST(test_insert_listening),
+ /* delete */
+ TEST(test_delete_after_insert),
+ TEST(test_delete_after_close),
+ /* lookup */
+ TEST(test_lookup_after_insert),
+ TEST(test_lookup_after_delete),
+ TEST(test_lookup_32_bit_value),
+ /* update */
+ TEST(test_update_listening),
+ /* races with insert/delete */
+ TEST(test_destroy_orphan_child),
+ TEST(test_syn_recv_insert_delete),
+ TEST(test_race_insert_listen),
+ /* child clone */
+ TEST(test_clone_after_delete),
+ TEST(test_accept_after_delete),
+ TEST(test_accept_before_delete),
+ };
+ const char *family_name, *map_name;
+ const struct op_test *t;
+ char s[MAX_TEST_NAME];
+ int map_fd;
+
+ family_name = family_str(family);
+ map_name = map_type_str(map);
+ map_fd = bpf_map__fd(map);
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
+ t->name);
+
+ if (!test__start_subtest(s))
+ continue;
+
+ t->fn(family, sotype, map_fd);
+ test_ops_cleanup(map);
+ }
+}
+
+static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
+ int family, int sotype)
+{
+ const struct redir_test {
+ void (*fn)(struct test_sockmap_listen *skel,
+ struct bpf_map *map, int family, int sotype);
+ const char *name;
+ } tests[] = {
+ TEST(test_skb_redir_to_connected),
+ TEST(test_skb_redir_to_listening),
+ TEST(test_msg_redir_to_connected),
+ TEST(test_msg_redir_to_listening),
+ };
+ const char *family_name, *map_name;
+ const struct redir_test *t;
+ char s[MAX_TEST_NAME];
+
+ family_name = family_str(family);
+ map_name = map_type_str(map);
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
+ t->name);
+ if (!test__start_subtest(s))
+ continue;
+
+ t->fn(skel, map, family, sotype);
+ }
+}
+
+static void test_reuseport(struct test_sockmap_listen *skel,
+ struct bpf_map *map, int family, int sotype)
+{
+ const struct reuseport_test {
+ void (*fn)(int family, int sotype, int socket_map,
+ int verdict_map, int reuseport_prog);
+ const char *name;
+ } tests[] = {
+ TEST(test_reuseport_select_listening),
+ TEST(test_reuseport_select_connected),
+ TEST(test_reuseport_mixed_groups),
+ };
+ int socket_map, verdict_map, reuseport_prog;
+ const char *family_name, *map_name;
+ const struct reuseport_test *t;
+ char s[MAX_TEST_NAME];
+
+ family_name = family_str(family);
+ map_name = map_type_str(map);
+
+ socket_map = bpf_map__fd(map);
+ verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ reuseport_prog = bpf_program__fd(skel->progs.prog_reuseport);
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
+ t->name);
+
+ if (!test__start_subtest(s))
+ continue;
+
+ t->fn(family, sotype, socket_map, verdict_map, reuseport_prog);
+ }
+}
+
+static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
+ int family)
+{
+ test_ops(skel, map, family, SOCK_STREAM);
+ test_redir(skel, map, family, SOCK_STREAM);
+ test_reuseport(skel, map, family, SOCK_STREAM);
+}
+
+void test_sockmap_listen(void)
+{
+ struct test_sockmap_listen *skel;
+
+ skel = test_sockmap_listen__open_and_load();
+ if (!skel) {
+ FAIL("skeleton open/load failed");
+ return;
+ }
+
+ skel->bss->test_sockmap = true;
+ run_tests(skel, skel->maps.sock_map, AF_INET);
+ run_tests(skel, skel->maps.sock_map, AF_INET6);
+
+ skel->bss->test_sockmap = false;
+ run_tests(skel, skel->maps.sock_hash, AF_INET);
+ run_tests(skel, skel->maps.sock_hash, AF_INET6);
+
+ test_sockmap_listen__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 1f6ccdaed1ac..781c8d11604b 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -55,31 +55,40 @@ void test_trampoline_count(void)
/* attach 'allowed' 40 trampoline programs */
for (i = 0; i < MAX_TRAMP_PROGS; i++) {
obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
+ obj = NULL;
goto cleanup;
+ }
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
inst[i].obj = obj;
+ obj = NULL;
if (rand() % 2) {
- link = load(obj, fentry_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link)))
+ link = load(inst[i].obj, fentry_name);
+ if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
+ link = NULL;
goto cleanup;
+ }
inst[i].link_fentry = link;
} else {
- link = load(obj, fexit_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link)))
+ link = load(inst[i].obj, fexit_name);
+ if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
+ link = NULL;
goto cleanup;
+ }
inst[i].link_fexit = link;
}
}
/* and try 1 extra.. */
obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
+ obj = NULL;
goto cleanup;
+ }
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
@@ -104,7 +113,9 @@ void test_trampoline_count(void)
cleanup_extra:
bpf_object__close(obj);
cleanup:
- while (--i) {
+ if (i >= MAX_TRAMP_PROGS)
+ i = MAX_TRAMP_PROGS - 1;
+ for (; i >= 0; i--) {
bpf_link__destroy(inst[i].link_fentry);
bpf_link__destroy(inst[i].link_fexit);
bpf_object__close(inst[i].obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
index 6b56bdc73ebc..4ba011031d4c 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
@@ -14,7 +14,7 @@ void test_xdp_bpf2bpf(void)
struct test_xdp *pkt_skel = NULL;
struct test_xdp_bpf2bpf *ftrace_skel = NULL;
struct vip key4 = {.protocol = 6, .family = AF_INET};
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct bpf_program *prog;
/* Load XDP program to introspect */
pkt_skel = test_xdp__open_and_load();
@@ -27,11 +27,21 @@ void test_xdp_bpf2bpf(void)
bpf_map_update_elem(map_fd, &key4, &value4, 0);
/* Load trace program */
- opts.attach_prog_fd = pkt_fd,
- ftrace_skel = test_xdp_bpf2bpf__open_opts(&opts);
+ ftrace_skel = test_xdp_bpf2bpf__open();
if (CHECK(!ftrace_skel, "__open", "ftrace skeleton failed\n"))
goto out;
+ /* Demonstrate the bpf_program__set_attach_target() API rather than
+ * the load with options, i.e. opts.attach_prog_fd.
+ */
+ prog = ftrace_skel->progs.trace_on_entry;
+ bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY);
+ bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
+
+ prog = ftrace_skel->progs.trace_on_exit;
+ bpf_program__set_expected_attach_type(prog, BPF_TRACE_FEXIT);
+ bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
+
err = test_xdp_bpf2bpf__load(ftrace_skel);
if (CHECK(err, "__load", "ftrace skeleton failed\n"))
goto out;
diff --git a/tools/testing/selftests/bpf/progs/test_perf_branches.c b/tools/testing/selftests/bpf/progs/test_perf_branches.c
new file mode 100644
index 000000000000..0f7e27d97567
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_perf_branches.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <stddef.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_trace_helpers.h"
+
+int valid = 0;
+int required_size_out = 0;
+int written_stack_out = 0;
+int written_global_out = 0;
+
+struct {
+ __u64 _a;
+ __u64 _b;
+ __u64 _c;
+} fpbe[30] = {0};
+
+SEC("perf_event")
+int perf_branches(void *ctx)
+{
+ __u64 entries[4 * 3] = {0};
+ int required_size, written_stack, written_global;
+
+ /* write to stack */
+ written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
+ /* ignore spurious events */
+ if (!written_stack)
+ return 1;
+
+ /* get required size */
+ required_size = bpf_read_branch_records(ctx, NULL, 0,
+ BPF_F_GET_BRANCH_RECORDS_SIZE);
+
+ written_global = bpf_read_branch_records(ctx, fpbe, sizeof(fpbe), 0);
+ /* ignore spurious events */
+ if (!written_global)
+ return 1;
+
+ required_size_out = required_size;
+ written_stack_out = written_stack;
+ written_global_out = written_global;
+ valid = 1;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
new file mode 100644
index 000000000000..a3a366c57ce1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+
+#include <errno.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, unsigned int);
+} verdict_map SEC(".maps");
+
+static volatile bool test_sockmap; /* toggled by user-space */
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ unsigned int *count;
+ __u32 zero = 0;
+ int verdict;
+
+ if (test_sockmap)
+ verdict = bpf_sk_redirect_map(skb, &sock_map, zero, 0);
+ else
+ verdict = bpf_sk_redirect_hash(skb, &sock_hash, &zero, 0);
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
+SEC("sk_msg")
+int prog_msg_verdict(struct sk_msg_md *msg)
+{
+ unsigned int *count;
+ __u32 zero = 0;
+ int verdict;
+
+ if (test_sockmap)
+ verdict = bpf_msg_redirect_map(msg, &sock_map, zero, 0);
+ else
+ verdict = bpf_msg_redirect_hash(msg, &sock_hash, &zero, 0);
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
+SEC("sk_reuseport")
+int prog_reuseport(struct sk_reuseport_md *reuse)
+{
+ unsigned int *count;
+ int err, verdict;
+ __u32 zero = 0;
+
+ if (test_sockmap)
+ err = bpf_sk_select_reuseport(reuse, &sock_map, &zero, 0);
+ else
+ err = bpf_sk_select_reuseport(reuse, &sock_hash, &zero, 0);
+ verdict = err ? SK_DROP : SK_PASS;
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
index cb8a04ab7a78..b840fc9e3ed5 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
@@ -28,7 +28,7 @@ struct xdp_buff {
} __attribute__((preserve_access_index));
__u64 test_result_fentry = 0;
-SEC("fentry/_xdp_tx_iptunnel")
+SEC("fentry/FUNC")
int BPF_PROG(trace_on_entry, struct xdp_buff *xdp)
{
test_result_fentry = xdp->rxq->dev->ifindex;
@@ -36,7 +36,7 @@ int BPF_PROG(trace_on_entry, struct xdp_buff *xdp)
}
__u64 test_result_fexit = 0;
-SEC("fexit/_xdp_tx_iptunnel")
+SEC("fexit/FUNC")
int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret)
{
test_result_fexit = ret;
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 02eae1e864c2..c6766b2cff85 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -756,11 +756,7 @@ static void test_sockmap(unsigned int tasks, void *data)
/* Test update without programs */
for (i = 0; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
- if (i < 2 && !err) {
- printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
- i, sfd[i]);
- goto out_sockmap;
- } else if (i >= 2 && err) {
+ if (err) {
printf("Failed noprog update sockmap '%i:%i'\n",
i, sfd[i]);
goto out_sockmap;