summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-17 09:56:34 +0300
committerDavid S. Miller <davem@davemloft.net>2019-02-17 09:56:34 +0300
commit885e63195980ab25abc67336f0c44d4cb4e6e72b (patch)
treee16c90fe7e0400646dc73771790b5be47d143012 /tools
parentf186a82b10dc229f9cd1e9f27f90cb0ce86e879d (diff)
parent5aab392c55c96f9bb26d9294f965f156a87ee81c (diff)
downloadlinux-885e63195980ab25abc67336f0c44d4cb4e6e72b.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-02-16 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) numerous libbpf API improvements, from Andrii, Andrey, Yonghong. 2) test all bpf progs in alu32 mode, from Jiong. 3) skb->sk access and bpf_sk_fullsock(), bpf_tcp_sock() helpers, from Martin. 4) support for IP encap in lwt bpf progs, from Peter. 5) remove XDP_QUERY_XSK_UMEM dead code, from Jan. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst4
-rw-r--r--tools/include/uapi/linux/bpf.h98
-rw-r--r--tools/include/uapi/linux/if_link.h1
-rw-r--r--tools/lib/bpf/bpf.c69
-rw-r--r--tools/lib/bpf/bpf.h1
-rw-r--r--tools/lib/bpf/btf.c182
-rw-r--r--tools/lib/bpf/btf.h6
-rw-r--r--tools/lib/bpf/libbpf.c25
-rw-r--r--tools/lib/bpf/libbpf.h4
-rw-r--r--tools/lib/bpf/libbpf.map6
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile52
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h4
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h9
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c (renamed from tools/testing/selftests/bpf/bpf_flow.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c (renamed from tools/testing/selftests/bpf/connect4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c (renamed from tools/testing/selftests/bpf/connect6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c (renamed from tools/testing/selftests/bpf/dev_cgroup.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/get_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c (renamed from tools/testing/selftests/bpf/netcnt_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_map_ret0.c (renamed from tools/testing/selftests/bpf/sample_map_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_ret0.c (renamed from tools/testing/selftests/bpf/sample_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c (renamed from tools/testing/selftests/bpf/sendmsg4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c (renamed from tools/testing/selftests/bpf/sendmsg6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c (renamed from tools/testing/selftests/bpf/socket_cookie_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c (renamed from tools/testing/selftests/bpf/sockmap_parse_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c (renamed from tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c (renamed from tools/testing/selftests/bpf/sockmap_verdict_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_adjust_tail.c (renamed from tools/testing/selftests/bpf/test_adjust_tail.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c (renamed from tools/testing/selftests/bpf/test_btf_haskv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c (renamed from tools/testing/selftests/bpf/test_btf_nokv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c (renamed from tools/testing/selftests/bpf/test_get_stack_rawtp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c (renamed from tools/testing/selftests/bpf/test_l4lb.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c (renamed from tools/testing/selftests/bpf/test_l4lb_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c (renamed from tools/testing/selftests/bpf/test_lirc_mode2_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c85
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c (renamed from tools/testing/selftests/bpf/test_lwt_seg6local.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c (renamed from tools/testing/selftests/bpf/test_map_in_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c (renamed from tools/testing/selftests/bpf/test_map_lock.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_obj_id.c (renamed from tools/testing/selftests/bpf/test_obj_id.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c (renamed from tools/testing/selftests/bpf/test_pkt_access.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c (renamed from tools/testing/selftests/bpf/test_pkt_md_access.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_map.c (renamed from tools/testing/selftests/bpf/test_queue_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c (renamed from tools/testing/selftests/bpf/test_select_reuseport_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c (renamed from tools/testing/selftests/bpf/test_sk_lookup_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c152
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockhash_kern.c (renamed from tools/testing/selftests/bpf/test_sockhash_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.c (renamed from tools/testing/selftests/bpf/test_sockmap_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c (renamed from tools/testing/selftests/bpf/test_spin_lock.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_map.c (renamed from tools/testing/selftests/bpf/test_stack_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c (renamed from tools/testing/selftests/bpf/test_stacktrace_build_id.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c (renamed from tools/testing/selftests/bpf/test_stacktrace_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c (renamed from tools/testing/selftests/bpf/test_tcp_estats.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c (renamed from tools/testing/selftests/bpf/test_tcpbpf_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c (renamed from tools/testing/selftests/bpf/test_tcpnotify_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c (renamed from tools/testing/selftests/bpf/test_tracepoint.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c (renamed from tools/testing/selftests/bpf/test_tunnel_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c (renamed from tools/testing/selftests/bpf/test_xdp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c (renamed from tools/testing/selftests/bpf/test_xdp_meta.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c (renamed from tools/testing/selftests/bpf/test_xdp_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c (renamed from tools/testing/selftests/bpf/test_xdp_redirect.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c (renamed from tools/testing/selftests/bpf/test_xdp_vlan.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c (renamed from tools/testing/selftests/bpf/xdp_dummy.c)0
-rw-r--r--tools/testing/selftests/bpf/test_btf.c39
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh376
-rw-r--r--tools/testing/selftests/bpf/test_sock.c9
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c327
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c384
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c2
71 files changed, 1658 insertions, 182 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 27153bb816ac..4f2188845dd8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,7 +16,7 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** }
+ *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
@@ -34,6 +34,8 @@ SYNOPSIS
*NET-COMMANDS* := { **show** | **list** | **help** }
+ *FEATURE-COMMANDS* := { **probe** | **help** }
+
DESCRIPTION
===========
*bpftool* allows for inspection and simple modification of BPF objects
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 1777fa0c61e4..bcdd2474eee7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -2016,6 +2016,19 @@ union bpf_attr {
* Only works if *skb* contains an IPv6 packet. Insert a
* Segment Routing Header (**struct ipv6_sr_hdr**) inside
* the IPv6 header.
+ * **BPF_LWT_ENCAP_IP**
+ * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
+ * must be IPv4 or IPv6, followed by zero or more
+ * additional headers, up to LWT_BPF_MAX_HEADROOM total
+ * bytes in all prepended headers. Please note that
+ * if skb_is_gso(skb) is true, no more than two headers
+ * can be prepended, and the inner header, if present,
+ * should be either GRE or UDP/GUE.
+ *
+ * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
+ * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
+ * by bpf programs of types BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT.
*
* A call to this helper is susceptible to change the underlaying
* packet buffer. Therefore, at load time, all checks on pointers
@@ -2329,6 +2342,23 @@ union bpf_attr {
* "**y**".
* Return
* 0
+ *
+ * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_sock** pointer such
+ * that all the fields in bpf_sock can be accessed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or NULL in
+ * case of failure.
+ *
+ * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_tcp_sock** pointer from a
+ * **struct bpf_sock** pointer.
+ *
+ * Return
+ * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2425,7 +2455,9 @@ union bpf_attr {
FN(msg_pop_data), \
FN(rc_pointer_rel), \
FN(spin_lock), \
- FN(spin_unlock),
+ FN(spin_unlock), \
+ FN(sk_fullsock), \
+ FN(tcp_sock),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2498,7 +2530,8 @@ enum bpf_hdr_start_off {
/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6,
- BPF_LWT_ENCAP_SEG6_INLINE
+ BPF_LWT_ENCAP_SEG6_INLINE,
+ BPF_LWT_ENCAP_IP,
};
#define __bpf_md_ptr(type, name) \
@@ -2545,6 +2578,7 @@ struct __sk_buff {
__u64 tstamp;
__u32 wire_len;
__u32 gso_segs;
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
struct bpf_tunnel_key {
@@ -2586,7 +2620,15 @@ enum bpf_ret_code {
BPF_DROP = 2,
/* 3-6 reserved */
BPF_REDIRECT = 7,
- /* >127 are reserved for prog type specific return codes */
+ /* >127 are reserved for prog type specific return codes.
+ *
+ * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
+ * changed and should be routed based on its new L3 header.
+ * (This is an L3 redirect, as opposed to L2 redirect
+ * represented by BPF_REDIRECT above).
+ */
+ BPF_LWT_REROUTE = 128,
};
struct bpf_sock {
@@ -2596,14 +2638,52 @@ struct bpf_sock {
__u32 protocol;
__u32 mark;
__u32 priority;
- __u32 src_ip4; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ /* IP address also allows 1 and 2 bytes access */
+ __u32 src_ip4;
+ __u32 src_ip6[4];
+ __u32 src_port; /* host byte order */
+ __u32 dst_port; /* network byte order */
+ __u32 dst_ip4;
+ __u32 dst_ip6[4];
+ __u32 state;
+};
+
+struct bpf_tcp_sock {
+ __u32 snd_cwnd; /* Sending congestion window */
+ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ __u32 rtt_min;
+ __u32 snd_ssthresh; /* Slow start size threshold */
+ __u32 rcv_nxt; /* What we want to receive next */
+ __u32 snd_nxt; /* Next sequence we send */
+ __u32 snd_una; /* First byte we want an ack for */
+ __u32 mss_cache; /* Cached effective mss, not including SACKS */
+ __u32 ecn_flags; /* ECN status bits. */
+ __u32 rate_delivered; /* saved rate sample: packets delivered */
+ __u32 rate_interval_us; /* saved rate sample: time elapsed */
+ __u32 packets_out; /* Packets which are "in flight" */
+ __u32 retrans_out; /* Retransmitted packets out */
+ __u32 total_retrans; /* Total retransmits for entire connection */
+ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
*/
- __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
+ * total number of data segments in.
+ */
+ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ __u32 lost_out; /* Lost packets */
+ __u32 sacked_out; /* SACK'd packets */
+ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
*/
- __u32 src_port; /* Allows 4-byte read.
- * Stored in host byte order
+ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
*/
};
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index d6533828123a..5b225ff63b48 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -925,6 +925,7 @@ enum {
enum {
LINK_XSTATS_TYPE_UNSPEC,
LINK_XSTATS_TYPE_BRIDGE,
+ LINK_XSTATS_TYPE_BOND,
__LINK_XSTATS_TYPE_MAX
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 3defad77dc7a..9cd015574e83 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -22,6 +22,7 @@
*/
#include <stdlib.h>
+#include <string.h>
#include <memory.h>
#include <unistd.h>
#include <asm/unistd.h>
@@ -214,23 +215,35 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
{
void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
+ __u32 log_level;
__u32 name_len;
int fd;
- if (!load_attr)
+ if (!load_attr || !log_buf != !log_buf_sz)
+ return -EINVAL;
+
+ log_level = load_attr->log_level;
+ if (log_level > 2 || (log_level && !log_buf))
return -EINVAL;
name_len = load_attr->name ? strlen(load_attr->name) : 0;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.insn_cnt = (__u32)load_attr->insns_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
- attr.log_buf = ptr_to_u64(NULL);
- attr.log_size = 0;
- attr.log_level = 0;
+
+ attr.log_level = log_level;
+ if (log_level) {
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_buf_sz;
+ } else {
+ attr.log_buf = ptr_to_u64(NULL);
+ attr.log_size = 0;
+ }
+
attr.kern_version = load_attr->kern_version;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.prog_btf_fd = load_attr->prog_btf_fd;
@@ -286,7 +299,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
goto done;
}
- if (!log_buf || !log_buf_sz)
+ if (log_level || !log_buf)
goto done;
/* Try again with log */
@@ -327,7 +340,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = type;
attr.insn_cnt = (__u32)insns_cnt;
attr.insns = ptr_to_u64(insns);
@@ -347,7 +360,7 @@ int bpf_map_update_elem(int fd, const void *key, const void *value,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -360,7 +373,7 @@ int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -372,7 +385,7 @@ int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -385,7 +398,7 @@ int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -397,7 +410,7 @@ int bpf_map_delete_elem(int fd, const void *key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
@@ -408,7 +421,7 @@ int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.next_key = ptr_to_u64(next_key);
@@ -420,7 +433,7 @@ int bpf_obj_pin(int fd, const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
attr.bpf_fd = fd;
@@ -431,7 +444,7 @@ int bpf_obj_get(const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
@@ -442,7 +455,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -455,7 +468,7 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_type = type;
@@ -466,7 +479,7 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -480,7 +493,7 @@ int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.query.target_fd = target_fd;
attr.query.attach_type = type;
attr.query.query_flags = query_flags;
@@ -501,7 +514,7 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = prog_fd;
attr.test.data_in = ptr_to_u64(data);
attr.test.data_out = ptr_to_u64(data_out);
@@ -526,7 +539,7 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
if (!test_attr->data_out && test_attr->data_size_out > 0)
return -EINVAL;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = test_attr->prog_fd;
attr.test.data_in = ptr_to_u64(test_attr->data_in);
attr.test.data_out = ptr_to_u64(test_attr->data_out);
@@ -546,7 +559,7 @@ int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
@@ -561,7 +574,7 @@ int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
@@ -575,7 +588,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_id = id;
return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -585,7 +598,7 @@ int bpf_map_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_id = id;
return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -595,7 +608,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.btf_id = id;
return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -606,7 +619,7 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.info.bpf_fd = prog_fd;
attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info);
@@ -622,7 +635,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index ed09eed2dc3b..6ffdd79bea89 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -85,6 +85,7 @@ struct bpf_load_program_attr {
__u32 line_info_rec_size;
const void *line_info;
__u32 line_info_cnt;
+ __u32 log_level;
};
/* Flags to direct loading requirements */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index ab6528c935a1..68b50e9bbde1 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -16,7 +16,8 @@
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
-#define BTF_MAX_NR_TYPES 65535
+#define BTF_MAX_NR_TYPES 0x7fffffff
+#define BTF_MAX_STR_OFFSET 0x7fffffff
#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
((k) == BTF_KIND_VOLATILE) || \
@@ -41,9 +42,8 @@ struct btf {
struct btf_ext_info {
/*
- * info points to a deep copy of the individual info section
- * (e.g. func_info and line_info) from the .BTF.ext.
- * It does not include the __u32 rec_size.
+ * info points to the individual info section (e.g. func_info and
+ * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
*/
void *info;
__u32 rec_size;
@@ -51,8 +51,13 @@ struct btf_ext_info {
};
struct btf_ext {
+ union {
+ struct btf_ext_header *hdr;
+ void *data;
+ };
struct btf_ext_info func_info;
struct btf_ext_info line_info;
+ __u32 data_size;
};
struct btf_ext_info_sec {
@@ -171,7 +176,7 @@ static int btf_parse_str_sec(struct btf *btf)
const char *start = btf->nohdr_data + hdr->str_off;
const char *end = start + btf->hdr->str_len;
- if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
+ if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
start[0] || end[-1]) {
pr_debug("Invalid BTF string section\n");
return -EINVAL;
@@ -366,8 +371,6 @@ void btf__free(struct btf *btf)
struct btf *btf__new(__u8 *data, __u32 size)
{
- __u32 log_buf_size = 0;
- char *log_buf = NULL;
struct btf *btf;
int err;
@@ -377,15 +380,6 @@ struct btf *btf__new(__u8 *data, __u32 size)
btf->fd = -1;
- log_buf = malloc(BPF_LOG_BUF_SIZE);
- if (!log_buf) {
- err = -ENOMEM;
- goto done;
- }
-
- *log_buf = 0;
- log_buf_size = BPF_LOG_BUF_SIZE;
-
btf->data = malloc(size);
if (!btf->data) {
err = -ENOMEM;
@@ -395,17 +389,6 @@ struct btf *btf__new(__u8 *data, __u32 size)
memcpy(btf->data, data, size);
btf->data_size = size;
- btf->fd = bpf_load_btf(btf->data, btf->data_size,
- log_buf, log_buf_size, false);
-
- if (btf->fd == -1) {
- err = -errno;
- pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
- if (log_buf && *log_buf)
- pr_warning("%s\n", log_buf);
- goto done;
- }
-
err = btf_parse_hdr(btf);
if (err)
goto done;
@@ -417,8 +400,6 @@ struct btf *btf__new(__u8 *data, __u32 size)
err = btf_parse_type_sec(btf);
done:
- free(log_buf);
-
if (err) {
btf__free(btf);
return ERR_PTR(err);
@@ -427,16 +408,45 @@ done:
return btf;
}
+int btf__load(struct btf *btf)
+{
+ __u32 log_buf_size = BPF_LOG_BUF_SIZE;
+ char *log_buf = NULL;
+ int err = 0;
+
+ if (btf->fd >= 0)
+ return -EEXIST;
+
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
+
+ *log_buf = 0;
+
+ btf->fd = bpf_load_btf(btf->data, btf->data_size,
+ log_buf, log_buf_size, false);
+ if (btf->fd < 0) {
+ err = -errno;
+ pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
+ if (*log_buf)
+ pr_warning("%s\n", log_buf);
+ goto done;
+ }
+
+done:
+ free(log_buf);
+ return err;
+}
+
int btf__fd(const struct btf *btf)
{
return btf->fd;
}
-void btf__get_strings(const struct btf *btf, const char **strings,
- __u32 *str_len)
+const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
{
- *strings = btf->strings;
- *str_len = btf->hdr->str_len;
+ *size = btf->data_size;
+ return btf->data;
}
const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
@@ -474,7 +484,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
@@ -488,7 +498,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
ptr = temp_ptr;
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
}
@@ -583,7 +593,7 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
return 0;
}
-struct btf_ext_sec_copy_param {
+struct btf_ext_sec_setup_param {
__u32 off;
__u32 len;
__u32 min_rec_size;
@@ -591,20 +601,14 @@ struct btf_ext_sec_copy_param {
const char *desc;
};
-static int btf_ext_copy_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- struct btf_ext_sec_copy_param *ext_sec)
+static int btf_ext_setup_info(struct btf_ext *btf_ext,
+ struct btf_ext_sec_setup_param *ext_sec)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
/* The start of the info sec (including the __u32 record_size). */
- const void *info;
-
- /* data and data_size do not include btf_ext_header from now on */
- data = data + hdr->hdr_len;
- data_size -= hdr->hdr_len;
+ void *info;
if (ext_sec->off & 0x03) {
pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
@@ -612,16 +616,15 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
return -EINVAL;
}
- if (data_size < ext_sec->off ||
- ext_sec->len > data_size - ext_sec->off) {
+ info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
+ info_left = ext_sec->len;
+
+ if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
- ext_sec->desc, ext_sec->off, ext_sec->len);
+ ext_sec->desc, ext_sec->off, ext_sec->len);
return -EINVAL;
}
- info = data + ext_sec->off;
- info_left = ext_sec->len;
-
/* At least a record size */
if (info_left < sizeof(__u32)) {
pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
@@ -633,7 +636,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
if (record_size < ext_sec->min_rec_size ||
record_size & 0x03) {
pr_debug("%s section in .BTF.ext has invalid record size %u\n",
- ext_sec->desc, record_size);
+ ext_sec->desc, record_size);
return -EINVAL;
}
@@ -679,42 +682,35 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
- ext_info->info = malloc(ext_info->len);
- if (!ext_info->info)
- return -ENOMEM;
- memcpy(ext_info->info, info + sizeof(__u32), ext_info->len);
+ ext_info->info = info + sizeof(__u32);
return 0;
}
-static int btf_ext_copy_func_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size)
+static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->func_info_off,
- .len = hdr->func_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->func_info_off,
+ .len = btf_ext->hdr->func_info_len,
.min_rec_size = sizeof(struct bpf_func_info_min),
.ext_info = &btf_ext->func_info,
.desc = "func_info"
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param);
+ return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_copy_line_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size)
+static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->line_info_off,
- .len = hdr->line_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->line_info_off,
+ .len = btf_ext->hdr->line_info_len,
.min_rec_size = sizeof(struct bpf_line_info_min),
.ext_info = &btf_ext->line_info,
.desc = "line_info",
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param);
+ return btf_ext_setup_info(btf_ext, &param);
}
static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
@@ -754,9 +750,7 @@ void btf_ext__free(struct btf_ext *btf_ext)
{
if (!btf_ext)
return;
-
- free(btf_ext->func_info.info);
- free(btf_ext->line_info.info);
+ free(btf_ext->data);
free(btf_ext);
}
@@ -773,13 +767,23 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
if (!btf_ext)
return ERR_PTR(-ENOMEM);
- err = btf_ext_copy_func_info(btf_ext, data, size);
- if (err) {
- btf_ext__free(btf_ext);
- return ERR_PTR(err);
+ btf_ext->data_size = size;
+ btf_ext->data = malloc(size);
+ if (!btf_ext->data) {
+ err = -ENOMEM;
+ goto done;
}
+ memcpy(btf_ext->data, data, size);
- err = btf_ext_copy_line_info(btf_ext, data, size);
+ err = btf_ext_setup_func_info(btf_ext);
+ if (err)
+ goto done;
+
+ err = btf_ext_setup_line_info(btf_ext);
+ if (err)
+ goto done;
+
+done:
if (err) {
btf_ext__free(btf_ext);
return ERR_PTR(err);
@@ -788,6 +792,12 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
return btf_ext;
}
+const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
+{
+ *size = btf_ext->data_size;
+ return btf_ext->data;
+}
+
static int btf_ext_reloc_info(const struct btf *btf,
const struct btf_ext_info *ext_info,
const char *sec_name, __u32 insns_cnt,
@@ -836,7 +846,8 @@ static int btf_ext_reloc_info(const struct btf *btf,
return -ENOENT;
}
-int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **func_info, __u32 *cnt)
{
@@ -844,7 +855,8 @@ int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ex
insns_cnt, func_info, cnt);
}
-int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **line_info, __u32 *cnt)
{
@@ -1871,7 +1883,7 @@ static int btf_dedup_prim_types(struct btf_dedup *d)
*/
static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
{
- return d->map[type_id] <= BTF_MAX_TYPE;
+ return d->map[type_id] <= BTF_MAX_NR_TYPES;
}
/*
@@ -2022,7 +2034,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
canon_id = resolve_fwd_id(d, canon_id);
hypot_type_id = d->hypot_map[canon_id];
- if (hypot_type_id <= BTF_MAX_TYPE)
+ if (hypot_type_id <= BTF_MAX_NR_TYPES)
return hypot_type_id == cand_id;
if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
@@ -2241,7 +2253,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
__u32 h;
/* already deduped or is in process of deduping (loop detected) */
- if (d->map[type_id] <= BTF_MAX_TYPE)
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
return 0;
t = d->btf->types[type_id];
@@ -2318,7 +2330,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
if (d->map[type_id] == BTF_IN_PROGRESS_ID)
return -ELOOP;
- if (d->map[type_id] <= BTF_MAX_TYPE)
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
return resolve_type_id(d, type_id);
t = d->btf->types[type_id];
@@ -2498,7 +2510,7 @@ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
resolved_type_id = resolve_type_id(d, type_id);
new_type_id = d->hypot_map[resolved_type_id];
- if (new_type_id > BTF_MAX_TYPE)
+ if (new_type_id > BTF_MAX_NR_TYPES)
return -EINVAL;
return new_type_id;
}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b393da90cc85..94bbc249b0f1 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -57,6 +57,7 @@ struct btf_ext_header {
LIBBPF_API void btf__free(struct btf *btf);
LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
@@ -65,8 +66,7 @@ LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__fd(const struct btf *btf);
-LIBBPF_API void btf__get_strings(const struct btf *btf, const char **strings,
- __u32 *str_len);
+LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
@@ -76,6 +76,8 @@ LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
+LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext* btf_ext,
+ __u32 *size);
LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 47969aa0faf8..b38dcbe7460a 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -307,7 +307,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
return -EINVAL;
}
- bzero(prog, sizeof(*prog));
+ memset(prog, 0, sizeof(*prog));
prog->section_name = strdup(section_name);
if (!prog->section_name) {
@@ -835,7 +835,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
obj->efile.maps_shndx = idx;
else if (strcmp(name, BTF_ELF_SEC) == 0) {
obj->btf = btf__new(data->d_buf, data->d_size);
- if (IS_ERR(obj->btf)) {
+ if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_ELF_SEC, PTR_ERR(obj->btf));
obj->btf = NULL;
@@ -1113,6 +1113,20 @@ err_free_new_name:
return -errno;
}
+int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+{
+ if (!map || !max_entries)
+ return -EINVAL;
+
+ /* If map already created, its attributes can't be changed. */
+ if (map->fd >= 0)
+ return -EBUSY;
+
+ map->def.max_entries = max_entries;
+
+ return 0;
+}
+
static int
bpf_object__probe_name(struct bpf_object *obj)
{
@@ -1576,7 +1590,7 @@ bpf_program__load(struct bpf_program *prog,
struct bpf_prog_prep_result result;
bpf_program_prep_t preprocessor = prog->preprocessor;
- bzero(&result, sizeof(result));
+ memset(&result, 0, sizeof(result));
err = preprocessor(prog, i, prog->insns,
prog->insns_cnt, &result);
if (err) {
@@ -2317,6 +2331,11 @@ unsigned int bpf_object__kversion(struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
+struct btf *bpf_object__btf(struct bpf_object *obj)
+{
+ return obj ? obj->btf : NULL;
+}
+
int bpf_object__btf_fd(const struct bpf_object *obj)
{
return obj->btf ? btf__fd(obj->btf) : -1;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 69a7c25eaccc..6c0168f8bba5 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -89,6 +89,9 @@ LIBBPF_API int bpf_object__load(struct bpf_object *obj);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
+
+struct btf;
+LIBBPF_API struct btf *bpf_object__btf(struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
@@ -294,6 +297,7 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 89c1149e32ee..99dfa710c818 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -130,15 +130,19 @@ LIBBPF_0.0.2 {
bpf_probe_helper;
bpf_probe_map_type;
bpf_probe_prog_type;
+ bpf_map__resize;
bpf_map_lookup_elem_flags;
+ bpf_object__btf;
bpf_object__find_map_fd_by_name;
bpf_get_link_xdp_id;
btf__dedup;
btf__get_map_kv_tids;
btf__get_nr_types;
- btf__get_strings;
+ btf__get_raw_data;
+ btf__load;
btf_ext__free;
btf_ext__func_info_rec_size;
+ btf_ext__get_raw_data;
btf_ext__line_info_rec_size;
btf_ext__new;
btf_ext__reloc_func_info;
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index dd093bd91aa9..e47168d1257d 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -29,3 +29,4 @@ test_netcnt
test_section_names
test_tcpnotify_user
test_libbpf
+alu32
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 383d2ff13fc7..ccffaa0a0787 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -23,42 +23,19 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
- test_netcnt test_tcpnotify_user
-
-BPF_OBJ_FILES = \
- test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
- sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o \
- test_tcpnotify_kern.o sample_map_ret0.o test_tcpbpf_kern.o \
- sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o \
- test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o \
- test_tunnel_kern.o test_sockhash_kern.o test_lwt_seg6local.o \
- sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
- get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
- test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o test_xdp_vlan.o \
- xdp_dummy.o test_map_in_map.o test_spin_lock.o test_map_lock.o
-
-# Objects are built with default compilation flags and with sub-register
-# code-gen enabled.
-BPF_OBJ_FILES_DUAL_COMPILE = \
- test_pkt_access.o test_pkt_access.o test_xdp.o test_adjust_tail.o \
- test_l4lb.o test_l4lb_noinline.o test_xdp_noinline.o test_tcp_estats.o \
- test_obj_id.o test_pkt_md_access.o test_tracepoint.o \
- test_stacktrace_map.o test_stacktrace_map.o test_stacktrace_build_id.o \
- test_stacktrace_build_id.o test_get_stack_rawtp.o \
- test_get_stack_rawtp.o test_tracepoint.o test_sk_lookup_kern.o \
- test_queue_map.o test_stack_map.o
-
-TEST_GEN_FILES = $(BPF_OBJ_FILES) $(BPF_OBJ_FILES_DUAL_COMPILE)
-
-# Also test sub-register code-gen if LLVM + kernel both has eBPF v3 processor
-# support which is the first version to contain both ALU32 and JMP32
-# instructions.
+ test_netcnt test_tcpnotify_user test_sock_fields
+
+BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
+TEST_GEN_FILES = $(BPF_OBJ_FILES)
+
+# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
+# contains both ALU32 and JMP32 instructions.
SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
$(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \
- $(LLC) -mattr=+alu32 -mcpu=probe 2>&1 | \
+ $(LLC) -mattr=+alu32 -mcpu=v3 2>&1 | \
grep 'if w')
ifneq ($(SUBREG_CODEGEN),)
-TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES_DUAL_COMPILE))
+TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES))
endif
# Order correspond to 'make run_tests' order
@@ -73,7 +50,8 @@ TEST_PROGS := test_kmod.sh \
test_lirc_mode2.sh \
test_skb_cgroup_id.sh \
test_flow_dissector.sh \
- test_xdp_vlan.sh
+ test_xdp_vlan.sh \
+ test_lwt_ip_encap.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
@@ -111,6 +89,7 @@ $(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
+$(OUTPUT)/test_sock_fields: cgroup_helpers.c
.PHONY: force
@@ -188,7 +167,8 @@ $(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(ALU32_BUILD_DIR) \
$(CC) $(CFLAGS) -o $(ALU32_BUILD_DIR)/test_progs_32 $< \
trace_helpers.c $(OUTPUT)/libbpf.a $(LDLIBS)
-$(ALU32_BUILD_DIR)/%.o: %.c $(ALU32_BUILD_DIR) $(ALU32_BUILD_DIR)/test_progs_32
+$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/test_progs_32
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
@@ -200,7 +180,7 @@ endif
# Have one program compiled without "-target bpf" to test whether libbpf loads
# it successfully
-$(OUTPUT)/test_xdp.o: test_xdp.c
+$(OUTPUT)/test_xdp.o: progs/test_xdp.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
@@ -208,7 +188,7 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-$(OUTPUT)/%.o: %.c
+$(OUTPUT)/%.o: progs/%.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 6a0ce0f055c5..d9999f1ed1d2 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -176,6 +176,10 @@ static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
(void *) BPF_FUNC_spin_lock;
static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
(void *) BPF_FUNC_spin_unlock;
+static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_sk_fullsock;
+static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_tcp_sock;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 84fd6f1bf33e..a29206ebbd13 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -58,4 +58,13 @@ static inline unsigned int bpf_num_possible_cpus(void)
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
+#ifndef sizeof_field
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#endif
+
+#ifndef offsetofend
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+#endif
+
#endif /* __BPF_UTIL__ */
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 284660f5aa95..284660f5aa95 100644
--- a/tools/testing/selftests/bpf/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
diff --git a/tools/testing/selftests/bpf/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index 1fd244d35ba9..1fd244d35ba9 100644
--- a/tools/testing/selftests/bpf/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
diff --git a/tools/testing/selftests/bpf/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c
index 26397ab7b3c7..26397ab7b3c7 100644
--- a/tools/testing/selftests/bpf/connect6_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect6_prog.c
diff --git a/tools/testing/selftests/bpf/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c
index ce41a3475f27..ce41a3475f27 100644
--- a/tools/testing/selftests/bpf/dev_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
index 014dba10b8a5..014dba10b8a5 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c
index 9f741e69cebe..9f741e69cebe 100644
--- a/tools/testing/selftests/bpf/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c
diff --git a/tools/testing/selftests/bpf/sample_map_ret0.c b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
index 0756303676ac..0756303676ac 100644
--- a/tools/testing/selftests/bpf/sample_map_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/progs/sample_ret0.c
index fec99750d6ea..fec99750d6ea 100644
--- a/tools/testing/selftests/bpf/sample_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_ret0.c
diff --git a/tools/testing/selftests/bpf/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
index a91536b1c47e..a91536b1c47e 100644
--- a/tools/testing/selftests/bpf/sendmsg4_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 5aeaa284fc47..5aeaa284fc47 100644
--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
diff --git a/tools/testing/selftests/bpf/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
index 9ff8ac4b0bf6..9ff8ac4b0bf6 100644
--- a/tools/testing/selftests/bpf/socket_cookie_prog.c
+++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index 0f92858f6226..0f92858f6226 100644
--- a/tools/testing/selftests/bpf/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index 12a7b5c82ed6..12a7b5c82ed6 100644
--- a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 2ce7634a4012..2ce7634a4012 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
diff --git a/tools/testing/selftests/bpf/test_adjust_tail.c b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
index 4cd5e860c903..4cd5e860c903 100644
--- a/tools/testing/selftests/bpf/test_adjust_tail.c
+++ b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
index e5c79fe0ffdb..e5c79fe0ffdb 100644
--- a/tools/testing/selftests/bpf/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
diff --git a/tools/testing/selftests/bpf/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 434188c37774..434188c37774 100644
--- a/tools/testing/selftests/bpf/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
diff --git a/tools/testing/selftests/bpf/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
index f6d9f238e00a..f6d9f238e00a 100644
--- a/tools/testing/selftests/bpf/test_get_stack_rawtp.c
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 1e10c9590991..1e10c9590991 100644
--- a/tools/testing/selftests/bpf/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index ba44a14e6dc4..ba44a14e6dc4 100644
--- a/tools/testing/selftests/bpf/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
index 4147130cc3b7..4147130cc3b7 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
new file mode 100644
index 000000000000..c957d6dfe6d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+struct grehdr {
+ __be16 flags;
+ __be16 protocol;
+};
+
+SEC("encap_gre")
+int bpf_lwt_encap_gre(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct iphdr iph;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.iph.ihl = 5;
+ hdr.iph.version = 4;
+ hdr.iph.ttl = 0x40;
+ hdr.iph.protocol = 47; /* IPPROTO_GRE */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ hdr.iph.saddr = 0x640110ac; /* 172.16.1.100 */
+ hdr.iph.daddr = 0x641010ac; /* 172.16.16.100 */
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ hdr.iph.saddr = 0xac100164; /* 172.16.1.100 */
+ hdr.iph.daddr = 0xac101064; /* 172.16.16.100 */
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+ hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+SEC("encap_gre6")
+int bpf_lwt_encap_gre6(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct ipv6hdr ip6hdr;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.ip6hdr.version = 6;
+ hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
+ hdr.ip6hdr.nexthdr = 47; /* IPPROTO_GRE */
+ hdr.ip6hdr.hop_limit = 0x40;
+ /* fb01::1 */
+ hdr.ip6hdr.saddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.saddr.s6_addr[1] = 1;
+ hdr.ip6hdr.saddr.s6_addr[15] = 1;
+ /* fb10::1 */
+ hdr.ip6hdr.daddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.daddr.s6_addr[1] = 0x10;
+ hdr.ip6hdr.daddr.s6_addr[15] = 1;
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 0575751bc1bc..0575751bc1bc 100644
--- a/tools/testing/selftests/bpf/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
diff --git a/tools/testing/selftests/bpf/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index ce923e67e08e..ce923e67e08e 100644
--- a/tools/testing/selftests/bpf/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
diff --git a/tools/testing/selftests/bpf/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
index af8cc68ed2f9..af8cc68ed2f9 100644
--- a/tools/testing/selftests/bpf/test_map_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
diff --git a/tools/testing/selftests/bpf/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c
index 880d2963b472..880d2963b472 100644
--- a/tools/testing/selftests/bpf/test_obj_id.c
+++ b/tools/testing/selftests/bpf/progs/test_obj_id.c
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 6e11ba11709e..6e11ba11709e 100644
--- a/tools/testing/selftests/bpf/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 7956302ecdf2..7956302ecdf2 100644
--- a/tools/testing/selftests/bpf/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
diff --git a/tools/testing/selftests/bpf/test_queue_map.c b/tools/testing/selftests/bpf/progs/test_queue_map.c
index 87db1f9da33d..87db1f9da33d 100644
--- a/tools/testing/selftests/bpf/test_queue_map.c
+++ b/tools/testing/selftests/bpf/progs/test_queue_map.c
diff --git a/tools/testing/selftests/bpf/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 5b54ec637ada..5b54ec637ada 100644
--- a/tools/testing/selftests/bpf/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index e21cd736c196..e21cd736c196 100644
--- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
index 68cf9829f5a7..68cf9829f5a7 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
new file mode 100644
index 000000000000..de1a43e8f610
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <linux/bpf.h>
+#include <netinet/in.h>
+#include <stdbool.h>
+
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+enum bpf_array_idx {
+ SRV_IDX,
+ CLI_IDX,
+ __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") addr_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct sockaddr_in6),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_sock),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") tcp_sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_tcp_sock),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") linum_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
+static bool is_loopback6(__u32 *a6)
+{
+ return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
+}
+
+static void skcpy(struct bpf_sock *dst,
+ const struct bpf_sock *src)
+{
+ dst->bound_dev_if = src->bound_dev_if;
+ dst->family = src->family;
+ dst->type = src->type;
+ dst->protocol = src->protocol;
+ dst->mark = src->mark;
+ dst->priority = src->priority;
+ dst->src_ip4 = src->src_ip4;
+ dst->src_ip6[0] = src->src_ip6[0];
+ dst->src_ip6[1] = src->src_ip6[1];
+ dst->src_ip6[2] = src->src_ip6[2];
+ dst->src_ip6[3] = src->src_ip6[3];
+ dst->src_port = src->src_port;
+ dst->dst_ip4 = src->dst_ip4;
+ dst->dst_ip6[0] = src->dst_ip6[0];
+ dst->dst_ip6[1] = src->dst_ip6[1];
+ dst->dst_ip6[2] = src->dst_ip6[2];
+ dst->dst_ip6[3] = src->dst_ip6[3];
+ dst->dst_port = src->dst_port;
+ dst->state = src->state;
+}
+
+static void tpcpy(struct bpf_tcp_sock *dst,
+ const struct bpf_tcp_sock *src)
+{
+ dst->snd_cwnd = src->snd_cwnd;
+ dst->srtt_us = src->srtt_us;
+ dst->rtt_min = src->rtt_min;
+ dst->snd_ssthresh = src->snd_ssthresh;
+ dst->rcv_nxt = src->rcv_nxt;
+ dst->snd_nxt = src->snd_nxt;
+ dst->snd_una = src->snd_una;
+ dst->mss_cache = src->mss_cache;
+ dst->ecn_flags = src->ecn_flags;
+ dst->rate_delivered = src->rate_delivered;
+ dst->rate_interval_us = src->rate_interval_us;
+ dst->packets_out = src->packets_out;
+ dst->retrans_out = src->retrans_out;
+ dst->total_retrans = src->total_retrans;
+ dst->segs_in = src->segs_in;
+ dst->data_segs_in = src->data_segs_in;
+ dst->segs_out = src->segs_out;
+ dst->data_segs_out = src->data_segs_out;
+ dst->lost_out = src->lost_out;
+ dst->sacked_out = src->sacked_out;
+ dst->bytes_received = src->bytes_received;
+ dst->bytes_acked = src->bytes_acked;
+}
+
+#define RETURN { \
+ linum = __LINE__; \
+ bpf_map_update_elem(&linum_map, &idx0, &linum, 0); \
+ return 1; \
+}
+
+SEC("cgroup_skb/egress")
+int read_sock_fields(struct __sk_buff *skb)
+{
+ __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx;
+ struct sockaddr_in6 *srv_sa6, *cli_sa6;
+ struct bpf_tcp_sock *tp, *tp_ret;
+ struct bpf_sock *sk, *sk_ret;
+ __u32 linum, idx0 = 0;
+
+ sk = skb->sk;
+ if (!sk || sk->state == 10)
+ RETURN;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk || sk->family != AF_INET6 || sk->protocol != IPPROTO_TCP ||
+ !is_loopback6(sk->src_ip6))
+ RETURN;
+
+ tp = bpf_tcp_sock(sk);
+ if (!tp)
+ RETURN;
+
+ srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+ cli_sa6 = bpf_map_lookup_elem(&addr_map, &cli_idx);
+ if (!srv_sa6 || !cli_sa6)
+ RETURN;
+
+ if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
+ idx = srv_idx;
+ else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
+ idx = cli_idx;
+ else
+ RETURN;
+
+ sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx);
+ tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx);
+ if (!sk_ret || !tp_ret)
+ RETURN;
+
+ skcpy(sk_ret, sk);
+ tpcpy(tp_ret, tp);
+
+ RETURN;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_sockhash_kern.c b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
index e6755916442a..e6755916442a 100644
--- a/tools/testing/selftests/bpf/test_sockhash_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.c b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
index 677b2ed1cc1e..677b2ed1cc1e 100644
--- a/tools/testing/selftests/bpf/test_sockmap_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
diff --git a/tools/testing/selftests/bpf/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index 40f904312090..40f904312090 100644
--- a/tools/testing/selftests/bpf/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
diff --git a/tools/testing/selftests/bpf/test_stack_map.c b/tools/testing/selftests/bpf/progs/test_stack_map.c
index 31c3880e6da0..31c3880e6da0 100644
--- a/tools/testing/selftests/bpf/test_stack_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stack_map.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index d86c281e957f..d86c281e957f 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index af111af7ca1a..af111af7ca1a 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
diff --git a/tools/testing/selftests/bpf/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index bee3bbecc0c4..bee3bbecc0c4 100644
--- a/tools/testing/selftests/bpf/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 74f73b33a7b0..74f73b33a7b0 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index edbca203ce2d..edbca203ce2d 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c
index 04bf084517e0..04bf084517e0 100644
--- a/tools/testing/selftests/bpf/test_tracepoint.c
+++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c
diff --git a/tools/testing/selftests/bpf/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index 504df69c83df..504df69c83df 100644
--- a/tools/testing/selftests/bpf/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
diff --git a/tools/testing/selftests/bpf/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 5e7df8bb5b5d..5e7df8bb5b5d 100644
--- a/tools/testing/selftests/bpf/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index 8d0182650653..8d0182650653 100644
--- a/tools/testing/selftests/bpf/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 5e4aac74f9d0..5e4aac74f9d0 100644
--- a/tools/testing/selftests/bpf/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
index ef9e704be140..ef9e704be140 100644
--- a/tools/testing/selftests/bpf/test_xdp_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
index 365a7d2d9f5c..365a7d2d9f5c 100644
--- a/tools/testing/selftests/bpf/test_xdp_vlan.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
diff --git a/tools/testing/selftests/bpf/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c
index 43b0ef1001ed..43b0ef1001ed 100644
--- a/tools/testing/selftests/bpf/xdp_dummy.c
+++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index ee723774015a..02d314383a9c 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -5879,15 +5879,17 @@ static void dump_btf_strings(const char *strs, __u32 len)
static int do_test_dedup(unsigned int test_num)
{
const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
- int err = 0, i;
- __u32 test_nr_types, expect_nr_types, test_str_len, expect_str_len;
- void *raw_btf;
- unsigned int raw_btf_size;
+ __u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
+ const struct btf_header *test_hdr, *expect_hdr;
struct btf *test_btf = NULL, *expect_btf = NULL;
+ const void *test_btf_data, *expect_btf_data;
const char *ret_test_next_str, *ret_expect_next_str;
const char *test_strs, *expect_strs;
const char *test_str_cur, *test_str_end;
const char *expect_str_cur, *expect_str_end;
+ unsigned int raw_btf_size;
+ void *raw_btf;
+ int err = 0, i;
fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
@@ -5924,23 +5926,34 @@ static int do_test_dedup(unsigned int test_num)
goto done;
}
- btf__get_strings(test_btf, &test_strs, &test_str_len);
- btf__get_strings(expect_btf, &expect_strs, &expect_str_len);
- if (CHECK(test_str_len != expect_str_len,
- "test_str_len:%u != expect_str_len:%u",
- test_str_len, expect_str_len)) {
+ test_btf_data = btf__get_raw_data(test_btf, &test_btf_size);
+ expect_btf_data = btf__get_raw_data(expect_btf, &expect_btf_size);
+ if (CHECK(test_btf_size != expect_btf_size,
+ "test_btf_size:%u != expect_btf_size:%u",
+ test_btf_size, expect_btf_size)) {
+ err = -1;
+ goto done;
+ }
+
+ test_hdr = test_btf_data;
+ test_strs = test_btf_data + test_hdr->str_off;
+ expect_hdr = expect_btf_data;
+ expect_strs = expect_btf_data + expect_hdr->str_off;
+ if (CHECK(test_hdr->str_len != expect_hdr->str_len,
+ "test_hdr->str_len:%u != expect_hdr->str_len:%u",
+ test_hdr->str_len, expect_hdr->str_len)) {
fprintf(stderr, "\ntest strings:\n");
- dump_btf_strings(test_strs, test_str_len);
+ dump_btf_strings(test_strs, test_hdr->str_len);
fprintf(stderr, "\nexpected strings:\n");
- dump_btf_strings(expect_strs, expect_str_len);
+ dump_btf_strings(expect_strs, expect_hdr->str_len);
err = -1;
goto done;
}
test_str_cur = test_strs;
- test_str_end = test_strs + test_str_len;
+ test_str_end = test_strs + test_hdr->str_len;
expect_str_cur = expect_strs;
- expect_str_end = expect_strs + expect_str_len;
+ expect_str_end = expect_strs + expect_hdr->str_len;
while (test_str_cur < test_str_end && expect_str_cur < expect_str_end) {
size_t test_len, expect_len;
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
new file mode 100755
index 000000000000..612632c1425f
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -0,0 +1,376 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Setup/topology:
+#
+# NS1 NS2 NS3
+# veth1 <---> veth2 veth3 <---> veth4 (the top route)
+# veth5 <---> veth6 veth7 <---> veth8 (the bottom route)
+#
+# each vethN gets IPv[4|6]_N address
+#
+# IPv*_SRC = IPv*_1
+# IPv*_DST = IPv*_4
+#
+# all tests test pings from IPv*_SRC to IPv*_DST
+#
+# by default, routes are configured to allow packets to go
+# IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
+#
+# a GRE device is installed in NS3 with IPv*_GRE, and
+# NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
+# (the bottom route)
+#
+# Tests:
+#
+# 1. routes NS2->IPv*_DST are brought down, so the only way a ping
+# from IP*_SRC to IP*_DST can work is via IPv*_GRE
+#
+# 2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth1:egress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+#
+# 2b. in an ingress test, a bpf LWT_IN program is installed on veth2
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+
+if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+ exit 1
+fi
+
+readonly NS1="ns1-$(mktemp -u XXXXXX)"
+readonly NS2="ns2-$(mktemp -u XXXXXX)"
+readonly NS3="ns3-$(mktemp -u XXXXXX)"
+
+readonly IPv4_1="172.16.1.100"
+readonly IPv4_2="172.16.2.100"
+readonly IPv4_3="172.16.3.100"
+readonly IPv4_4="172.16.4.100"
+readonly IPv4_5="172.16.5.100"
+readonly IPv4_6="172.16.6.100"
+readonly IPv4_7="172.16.7.100"
+readonly IPv4_8="172.16.8.100"
+readonly IPv4_GRE="172.16.16.100"
+
+readonly IPv4_SRC=$IPv4_1
+readonly IPv4_DST=$IPv4_4
+
+readonly IPv6_1="fb01::1"
+readonly IPv6_2="fb02::1"
+readonly IPv6_3="fb03::1"
+readonly IPv6_4="fb04::1"
+readonly IPv6_5="fb05::1"
+readonly IPv6_6="fb06::1"
+readonly IPv6_7="fb07::1"
+readonly IPv6_8="fb08::1"
+readonly IPv6_GRE="fb10::1"
+
+readonly IPv6_SRC=$IPv6_1
+readonly IPv6_DST=$IPv6_4
+
+TEST_STATUS=0
+TESTS_SUCCEEDED=0
+TESTS_FAILED=0
+
+process_test_results()
+{
+ if [[ "${TEST_STATUS}" -eq 0 ]] ; then
+ echo "PASS"
+ TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
+ else
+ echo "FAIL"
+ TESTS_FAILED=$((TESTS_FAILED+1))
+ fi
+}
+
+print_test_summary_and_exit()
+{
+ echo "passed tests: ${TESTS_SUCCEEDED}"
+ echo "failed tests: ${TESTS_FAILED}"
+ if [ "${TESTS_FAILED}" -eq "0" ] ; then
+ exit 0
+ else
+ exit 1
+ fi
+}
+
+setup()
+{
+ set -e # exit on error
+ TEST_STATUS=0
+
+ # create devices and namespaces
+ ip netns add "${NS1}"
+ ip netns add "${NS2}"
+ ip netns add "${NS3}"
+
+ ip link add veth1 type veth peer name veth2
+ ip link add veth3 type veth peer name veth4
+ ip link add veth5 type veth peer name veth6
+ ip link add veth7 type veth peer name veth8
+
+ ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
+ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip link set veth1 netns ${NS1}
+ ip link set veth2 netns ${NS2}
+ ip link set veth3 netns ${NS2}
+ ip link set veth4 netns ${NS3}
+ ip link set veth5 netns ${NS1}
+ ip link set veth6 netns ${NS2}
+ ip link set veth7 netns ${NS2}
+ ip link set veth8 netns ${NS3}
+
+ # configure addesses: the top route (1-2-3-4)
+ ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1
+ ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2
+ ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3
+ ip -netns ${NS3} addr add ${IPv4_4}/24 dev veth4
+ ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
+ ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
+ ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
+ ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
+
+ # configure addresses: the bottom route (5-6-7-8)
+ ip -netns ${NS1} addr add ${IPv4_5}/24 dev veth5
+ ip -netns ${NS2} addr add ${IPv4_6}/24 dev veth6
+ ip -netns ${NS2} addr add ${IPv4_7}/24 dev veth7
+ ip -netns ${NS3} addr add ${IPv4_8}/24 dev veth8
+ ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
+ ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
+ ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
+ ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
+
+
+ ip -netns ${NS1} link set dev veth1 up
+ ip -netns ${NS2} link set dev veth2 up
+ ip -netns ${NS2} link set dev veth3 up
+ ip -netns ${NS3} link set dev veth4 up
+ ip -netns ${NS1} link set dev veth5 up
+ ip -netns ${NS2} link set dev veth6 up
+ ip -netns ${NS2} link set dev veth7 up
+ ip -netns ${NS3} link set dev veth8 up
+
+ # configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
+ # the bottom route to specific bottom addresses
+
+ # NS1
+ # top route
+ ip -netns ${NS1} route add ${IPv4_2}/32 dev veth1
+ ip -netns ${NS1} route add default dev veth1 via ${IPv4_2} # go top by default
+ ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1
+ ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} # go top by default
+ # bottom route
+ ip -netns ${NS1} route add ${IPv4_6}/32 dev veth5
+ ip -netns ${NS1} route add ${IPv4_7}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS1} route add ${IPv4_8}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5
+ ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6}
+ ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6}
+
+ # NS2
+ # top route
+ ip -netns ${NS2} route add ${IPv4_1}/32 dev veth2
+ ip -netns ${NS2} route add ${IPv4_4}/32 dev veth3
+ ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3
+ # bottom route
+ ip -netns ${NS2} route add ${IPv4_5}/32 dev veth6
+ ip -netns ${NS2} route add ${IPv4_8}/32 dev veth7
+ ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6
+ ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7
+
+ # NS3
+ # top route
+ ip -netns ${NS3} route add ${IPv4_3}/32 dev veth4
+ ip -netns ${NS3} route add ${IPv4_1}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} route add ${IPv4_2}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
+ ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
+ ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
+ # bottom route
+ ip -netns ${NS3} route add ${IPv4_7}/32 dev veth8
+ ip -netns ${NS3} route add ${IPv4_5}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} route add ${IPv4_6}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
+ ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
+ ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
+
+ # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
+ ip -netns ${NS3} link set gre_dev up
+ ip -netns ${NS3} addr add ${IPv4_GRE} nodad dev gre_dev
+ ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
+
+
+ # configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
+ ip -netns ${NS3} link set gre6_dev up
+ ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
+ ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6}
+ ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8}
+
+ # rp_filter gets confused by what these tests are doing, so disable it
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+
+ sleep 1 # reduce flakiness
+ set +e
+}
+
+cleanup()
+{
+ ip netns del ${NS1} 2> /dev/null
+ ip netns del ${NS2} 2> /dev/null
+ ip netns del ${NS3} 2> /dev/null
+}
+
+trap cleanup EXIT
+
+remove_routes_to_gredev()
+{
+ ip -netns ${NS1} route del ${IPv4_GRE} dev veth5
+ ip -netns ${NS2} route del ${IPv4_GRE} dev veth7
+ ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5
+ ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7
+}
+
+add_unreachable_routes_to_gredev()
+{
+ ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32
+ ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32
+ ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128
+ ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128
+}
+
+test_ping()
+{
+ local readonly PROTO=$1
+ local readonly EXPECTED=$2
+ local RET=0
+
+ if [ "${PROTO}" == "IPv4" ] ; then
+ ip netns exec ${NS1} ping -c 1 -W 1 -I ${IPv4_SRC} ${IPv4_DST} 2>&1 > /dev/null
+ RET=$?
+ elif [ "${PROTO}" == "IPv6" ] ; then
+ ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST} 2>&1 > /dev/null
+ RET=$?
+ else
+ echo " test_ping: unknown PROTO: ${PROTO}"
+ TEST_STATUS=1
+ fi
+
+ if [ "0" != "${RET}" ]; then
+ RET=1
+ fi
+
+ if [ "${EXPECTED}" != "${RET}" ] ; then
+ echo " test_ping failed: expected: ${EXPECTED}; got ${RET}"
+ TEST_STATUS=1
+ fi
+}
+
+test_egress()
+{
+ local readonly ENCAP=$1
+ echo "starting egress ${ENCAP} encap test"
+ setup
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, ping fails
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+ else
+ echo " unknown encap ${ENCAP}"
+ TEST_STATUS=1
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+test_ingress()
+{
+ local readonly ENCAP=$1
+ echo "starting ingress ${ENCAP} encap test"
+ setup
+
+ # need to wait a bit for IPv6 to autoconf, otherwise
+ # ping6 sometimes fails with "unable to bind to address"
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, pings fail
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+ else
+ echo "FAIL: unknown encap ${ENCAP}"
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+test_egress IPv4
+test_egress IPv6
+test_ingress IPv4
+test_ingress IPv6
+
+print_test_summary_and_exit
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index 561ffb6d6433..fb679ac3d4b0 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -20,6 +20,7 @@
#define MAX_INSNS 512
char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static bool verbose = false;
struct sock_test {
const char *descr;
@@ -325,6 +326,7 @@ static int load_sock_prog(const struct bpf_insn *prog,
enum bpf_attach_type attach_type)
{
struct bpf_load_program_attr attr;
+ int ret;
memset(&attr, 0, sizeof(struct bpf_load_program_attr));
attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
@@ -332,8 +334,13 @@ static int load_sock_prog(const struct bpf_insn *prog,
attr.insns = prog;
attr.insns_cnt = probe_prog_length(attr.insns);
attr.license = "GPL";
+ attr.log_level = 2;
- return bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ if (verbose && ret < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return ret;
}
static int attach_sock_prog(int cgfd, int progfd,
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
new file mode 100644
index 000000000000..9bb58369b481
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_sock_fields.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <sys/socket.h>
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "cgroup_helpers.h"
+
+enum bpf_array_idx {
+ SRV_IDX,
+ CLI_IDX,
+ __NR_BPF_ARRAY_IDX,
+};
+
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ printf("\n"); \
+ exit(-1); \
+ } \
+})
+
+#define TEST_CGROUP "/test-bpf-sock-fields"
+#define DATA "Hello BPF!"
+#define DATA_LEN sizeof(DATA)
+
+static struct sockaddr_in6 srv_sa6, cli_sa6;
+static int linum_map_fd;
+static int addr_map_fd;
+static int tp_map_fd;
+static int sk_map_fd;
+static __u32 srv_idx = SRV_IDX;
+static __u32 cli_idx = CLI_IDX;
+
+static void init_loopback6(struct sockaddr_in6 *sa6)
+{
+ memset(sa6, 0, sizeof(*sa6));
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = in6addr_loopback;
+}
+
+static void print_sk(const struct bpf_sock *sk)
+{
+ char src_ip4[24], dst_ip4[24];
+ char src_ip6[64], dst_ip6[64];
+
+ inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
+ inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
+ inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
+ inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
+
+ printf("state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u "
+ "src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u "
+ "dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n",
+ sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
+ sk->mark, sk->priority,
+ sk->src_ip4, src_ip4,
+ sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
+ src_ip6, sk->src_port,
+ sk->dst_ip4, dst_ip4,
+ sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
+ dst_ip6, ntohs(sk->dst_port));
+}
+
+static void print_tp(const struct bpf_tcp_sock *tp)
+{
+ printf("snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u "
+ "snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u "
+ "rate_delivered:%u rate_interval_us:%u packets_out:%u "
+ "retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u "
+ "segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u "
+ "bytes_received:%llu bytes_acked:%llu\n",
+ tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh,
+ tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,
+ tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us,
+ tp->packets_out, tp->retrans_out, tp->total_retrans,
+ tp->segs_in, tp->data_segs_in, tp->segs_out,
+ tp->data_segs_out, tp->lost_out, tp->sacked_out,
+ tp->bytes_received, tp->bytes_acked);
+}
+
+static void check_result(void)
+{
+ struct bpf_tcp_sock srv_tp, cli_tp;
+ struct bpf_sock srv_sk, cli_sk;
+ __u32 linum, idx0 = 0;
+ int err;
+
+ err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)",
+ "err:%d errno:%d", err, errno);
+
+ printf("srv_sk: ");
+ print_sk(&srv_sk);
+ printf("\n");
+
+ printf("cli_sk: ");
+ print_sk(&cli_sk);
+ printf("\n");
+
+ printf("srv_tp: ");
+ print_tp(&srv_tp);
+ printf("\n");
+
+ printf("cli_tp: ");
+ print_tp(&cli_tp);
+ printf("\n");
+
+ CHECK(srv_sk.state == 10 ||
+ !srv_sk.state ||
+ srv_sk.family != AF_INET6 ||
+ srv_sk.protocol != IPPROTO_TCP ||
+ memcmp(srv_sk.src_ip6, &in6addr_loopback,
+ sizeof(srv_sk.src_ip6)) ||
+ memcmp(srv_sk.dst_ip6, &in6addr_loopback,
+ sizeof(srv_sk.dst_ip6)) ||
+ srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ srv_sk.dst_port != cli_sa6.sin6_port,
+ "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum);
+
+ CHECK(cli_sk.state == 10 ||
+ !cli_sk.state ||
+ cli_sk.family != AF_INET6 ||
+ cli_sk.protocol != IPPROTO_TCP ||
+ memcmp(cli_sk.src_ip6, &in6addr_loopback,
+ sizeof(cli_sk.src_ip6)) ||
+ memcmp(cli_sk.dst_ip6, &in6addr_loopback,
+ sizeof(cli_sk.dst_ip6)) ||
+ cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
+ cli_sk.dst_port != srv_sa6.sin6_port,
+ "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum);
+
+ CHECK(srv_tp.data_segs_out != 1 ||
+ srv_tp.data_segs_in ||
+ srv_tp.snd_cwnd != 10 ||
+ srv_tp.total_retrans ||
+ srv_tp.bytes_acked != DATA_LEN,
+ "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum);
+
+ CHECK(cli_tp.data_segs_out ||
+ cli_tp.data_segs_in != 1 ||
+ cli_tp.snd_cwnd != 10 ||
+ cli_tp.total_retrans ||
+ cli_tp.bytes_received != DATA_LEN,
+ "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum);
+}
+
+static void test(void)
+{
+ int listen_fd, cli_fd, accept_fd, epfd, err;
+ struct epoll_event ev;
+ socklen_t addrlen;
+
+ addrlen = sizeof(struct sockaddr_in6);
+ ev.events = EPOLLIN;
+
+ epfd = epoll_create(1);
+ CHECK(epfd == -1, "epoll_create()", "epfd:%d errno:%d", epfd, errno);
+
+ /* Prepare listen_fd */
+ listen_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(listen_fd == -1, "socket()", "listen_fd:%d errno:%d",
+ listen_fd, errno);
+
+ init_loopback6(&srv_sa6);
+ err = bind(listen_fd, (struct sockaddr *)&srv_sa6, sizeof(srv_sa6));
+ CHECK(err, "bind(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
+ CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = listen(listen_fd, 1);
+ CHECK(err, "listen(listen_fd)", "err:%d errno:%d", err, errno);
+
+ /* Prepare cli_fd */
+ cli_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(cli_fd == -1, "socket()", "cli_fd:%d errno:%d", cli_fd, errno);
+
+ init_loopback6(&cli_sa6);
+ err = bind(cli_fd, (struct sockaddr *)&cli_sa6, sizeof(cli_sa6));
+ CHECK(err, "bind(cli_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
+ CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Update addr_map with srv_sa6 and cli_sa6 */
+ err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ /* Connect from cli_sa6 to srv_sa6 */
+ err = connect(cli_fd, (struct sockaddr *)&srv_sa6, addrlen);
+ printf("srv_sa6.sin6_port:%u cli_sa6.sin6_port:%u\n\n",
+ ntohs(srv_sa6.sin6_port), ntohs(cli_sa6.sin6_port));
+ CHECK(err && errno != EINPROGRESS,
+ "connect(cli_fd)", "err:%d errno:%d", err, errno);
+
+ ev.data.fd = listen_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, listen_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, listen_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Accept the connection */
+ /* Have some timeout in accept(listen_fd). Just in case. */
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != listen_fd,
+ "epoll_wait(listen_fd)",
+ "err:%d errno:%d ev.data.fd:%d listen_fd:%d",
+ err, errno, ev.data.fd, listen_fd);
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ CHECK(accept_fd == -1, "accept(listen_fd)", "accept_fd:%d errno:%d",
+ accept_fd, errno);
+ close(listen_fd);
+
+ /* Send some data from accept_fd to cli_fd */
+ err = send(accept_fd, DATA, DATA_LEN, 0);
+ CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Have some timeout in recv(cli_fd). Just in case. */
+ ev.data.fd = cli_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, cli_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != cli_fd,
+ "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
+ err, errno, ev.data.fd, cli_fd);
+
+ err = recv(cli_fd, NULL, 0, MSG_TRUNC);
+ CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);
+
+ close(epfd);
+ close(accept_fd);
+ close(cli_fd);
+
+ check_result();
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "test_sock_fields_kern.o",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .expected_attach_type = BPF_CGROUP_INET_EGRESS,
+ };
+ int cgroup_fd, prog_fd, err;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+
+ err = setup_cgroup_environment();
+ CHECK(err, "setup_cgroup_environment()", "err:%d errno:%d",
+ err, errno);
+
+ atexit(cleanup_cgroup_environment);
+
+ /* Create a cgroup, get fd, and join it */
+ cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
+ CHECK(cgroup_fd == -1, "create_and_get_cgroup()",
+ "cgroup_fd:%d errno:%d", cgroup_fd, errno);
+
+ err = join_cgroup(TEST_CGROUP);
+ CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
+
+ err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+ CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+ CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
+ "err:%d errno%d", err, errno);
+ close(cgroup_fd);
+
+ map = bpf_object__find_map_by_name(obj, "addr_map");
+ CHECK(!map, "cannot find addr_map", "(null)");
+ addr_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "sock_result_map");
+ CHECK(!map, "cannot find sock_result_map", "(null)");
+ sk_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "tcp_sock_result_map");
+ CHECK(!map, "cannot find tcp_sock_result_map", "(null)");
+ tp_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "linum_map");
+ CHECK(!map, "cannot find linum_map", "(null)");
+ linum_map_fd = bpf_map__fd(map);
+
+ test();
+
+ bpf_object__close(obj);
+ cleanup_cgroup_environment();
+
+ printf("PASS\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index dc2cc823df2b..3ed3593bd8b6 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -547,7 +547,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "cannot write into socket",
+ .errstr = "cannot write into sock",
.result = REJECT,
},
{
@@ -562,7 +562,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_sock access off=0 size=8",
+ .errstr = "invalid sock access off=0 size=8",
.result = REJECT,
},
{
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
new file mode 100644
index 000000000000..0ddfdf76aba5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -0,0 +1,384 @@
+{
+ "skb->sk: no NULL check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_common_or_null'",
+},
+{
+ "skb->sk: sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "skb->sk: sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock_common access",
+},
+{
+ "bpf_sk_fullsock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "sk_fullsock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_or_null'",
+},
+{
+ "sk_fullsock(skb->sk): sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->state [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->type [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->protocol [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, protocol)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "bpf_tcp_sock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "bpf_tcp_sock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'tcp_sock_or_null'",
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->bytes_acked",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid tcp_sock access",
+},
+{
+ "bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_sk_release(skb->sk)",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "type=sock_common expected=sock",
+},
+{
+ "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ "bpf_sk_release(bpf_tcp_sock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "type=tcp_sock expected=sock",
+},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
index 3e046695fad7..dbaf5be947b2 100644
--- a/tools/testing/selftests/bpf/verifier/unpriv.c
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -365,7 +365,7 @@
},
.result = REJECT,
//.errstr = "same insn cannot be used with different pointers",
- .errstr = "cannot write into socket",
+ .errstr = "cannot write into sock",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{