summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst11
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool7
-rw-r--r--tools/bpf/bpftool/common.c9
-rw-r--r--tools/bpf/bpftool/feature.c24
-rw-r--r--tools/bpf/bpftool/iter.c2
-rw-r--r--tools/bpf/bpftool/link.c16
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/map.c19
-rw-r--r--tools/bpf/bpftool/prog.c53
-rw-r--r--tools/bpf/bpftool/struct_ops.c2
-rw-r--r--tools/bpf/resolve_btfids/Makefile4
-rw-r--r--tools/include/uapi/asm-generic/socket.h3
-rw-r--r--tools/include/uapi/linux/bpf.h31
-rw-r--r--tools/lib/bpf/bpf.c17
-rw-r--r--tools/lib/bpf/bpf.h18
-rw-r--r--tools/lib/bpf/bpf_helpers.h15
-rw-r--r--tools/lib/bpf/bpf_tracing.h3
-rw-r--r--tools/lib/bpf/btf.c2
-rw-r--r--tools/lib/bpf/btf_dump.c22
-rw-r--r--tools/lib/bpf/gen_loader.c14
-rw-r--r--tools/lib/bpf/libbpf.c154
-rw-r--r--tools/lib/bpf/libbpf.h18
-rw-r--r--tools/lib/bpf/libbpf.map5
-rw-r--r--tools/lib/bpf/libbpf_probes.c2
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/usdt.c5
-rw-r--r--tools/net/ynl/Makefile19
-rw-r--r--tools/net/ynl/Makefile.deps20
-rw-r--r--tools/net/ynl/generated/Makefile50
-rw-r--r--tools/net/ynl/generated/devlink-user.c721
-rw-r--r--tools/net/ynl/generated/devlink-user.h210
-rw-r--r--tools/net/ynl/generated/ethtool-user.c6353
-rw-r--r--tools/net/ynl/generated/ethtool-user.h5531
-rw-r--r--tools/net/ynl/generated/fou-user.c328
-rw-r--r--tools/net/ynl/generated/fou-user.h337
-rw-r--r--tools/net/ynl/generated/handshake-user.c331
-rw-r--r--tools/net/ynl/generated/handshake-user.h145
-rw-r--r--tools/net/ynl/generated/netdev-user.c200
-rw-r--r--tools/net/ynl/generated/netdev-user.h85
-rw-r--r--tools/net/ynl/lib/Makefile28
-rw-r--r--tools/net/ynl/lib/nlspec.py46
-rw-r--r--tools/net/ynl/lib/ynl.c901
-rw-r--r--tools/net/ynl/lib/ynl.h237
-rw-r--r--tools/net/ynl/lib/ynl.py137
-rw-r--r--tools/net/ynl/samples/.gitignore3
-rw-r--r--tools/net/ynl/samples/Makefile30
-rw-r--r--tools/net/ynl/samples/devlink.c60
-rw-r--r--tools/net/ynl/samples/ethtool.c65
-rw-r--r--tools/net/ynl/samples/netdev.c108
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py745
-rwxr-xr-xtools/net/ynl/ynl-regen.sh6
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h1
-rw-r--r--tools/perf/trace/beauty/msg_flags.c6
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch6483
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x1
-rw-r--r--tools/testing/selftests/bpf/Makefile3
-rw-r--r--tools/testing/selftests/bpf/bench.c15
-rw-r--r--tools/testing/selftests/bpf/bench.h1
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c14
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c10
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c10
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_loop.c10
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_count.c14
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage.c12
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_create.c8
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c10
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_rename.c15
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c2
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_strncmp.c11
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c21
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh26
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h6
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c182
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h107
-rw-r--r--tools/testing/selftests/bpf/config4
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c23
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c34
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c268
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c40
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/check_mtu.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_map_resize.c227
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netcnt.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_destroy.c221
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt.c100
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_multi.c108
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c53
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c312
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bonding.c121
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h4
-rw-r--r--tools/testing/selftests/bpf/progs/cb_refs.c4
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c13
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c17
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h6
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c64
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c308
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c337
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c26
-rw-r--r--tools/testing/selftests/bpf/progs/jit_probe_mem.c4
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_destructive.c3
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_fail.c9
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_race.c3
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c17
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c9
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c5
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c5
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/sock_destroy_prog.c145
-rw-r--r--tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c22
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_inherit.c18
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_multi.c26
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c10
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_map_resize.c58
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_under_cgroup.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_dynptr.c1
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c659
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c536
-rw-r--r--tools/testing/selftests/bpf/progs/vrf_socket_lookup.c89
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c4
-rw-r--r--tools/testing/selftests/bpf/test_progs.c113
-rw-r--r--tools/testing/selftests/bpf/test_progs.h1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c194
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh10
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c268
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h12
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c143
-rw-r--r--tools/testing/selftests/bpf/veristat.c9
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c47
-rw-r--r--tools/testing/selftests/bpf/xdp_metadata.h1
-rw-r--r--tools/testing/selftests/bpf/xsk.h5
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c771
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h31
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/egress_vid_classification.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/extack.sh24
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1d.sh5
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/q_in_q_veto.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh41
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile3
-rw-r--r--tools/testing/selftests/net/af_unix/scm_pidfd.c430
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh87
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile2
-rwxr-xr-xtools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bound.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh3
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_topo_lib.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_dsfield.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/q_in_vni.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_vlan.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/skbedit_priority.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower_cfm.sh206
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh350
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh741
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_sockopt.c120
-rw-r--r--tools/testing/selftests/net/nettest.c46
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh1
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_nolocalbypass.sh240
-rw-r--r--tools/testing/selftests/net/tls.c131
-rw-r--r--tools/testing/selftests/ptp/testptp.c29
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/filter.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json25
189 files changed, 24252 insertions, 1928 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 11250c4734fe..3b7ba037af95 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -28,7 +28,7 @@ MAP COMMANDS
| **bpftool** **map** { **show** | **list** } [*MAP*]
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \
-| [**dev** *NAME*]
+| [**offload_dev** *NAME*]
| **bpftool** **map dump** *MAP*
| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
| **bpftool** **map lookup** *MAP* [**key** *DATA*]
@@ -73,7 +73,7 @@ DESCRIPTION
maps. On such kernels bpftool will automatically emit this
information as well.
- **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**dev** *NAME*]
+ **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**offload_dev** *NAME*]
Create a new map with given parameters and pin it to *bpffs*
as *FILE*.
@@ -86,8 +86,8 @@ DESCRIPTION
kernel needs it to collect metadata related to the inner maps
that the new map will work with.
- Keyword **dev** expects a network interface name, and is used
- to request hardware offload for the map.
+ Keyword **offload_dev** expects a network interface name,
+ and is used to request hardware offload for the map.
**bpftool map dump** *MAP*
Dump all entries in a given *MAP*. In case of **name**,
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 9443c524bb76..dcae81bd27ed 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -31,7 +31,7 @@ PROG COMMANDS
| **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
| **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
| **bpftool** **prog pin** *PROG* *FILE*
-| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
+| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
@@ -129,7 +129,7 @@ DESCRIPTION
contain a dot character ('.'), which is reserved for future
extensions of *bpffs*.
- **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
+ **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
Load bpf program(s) from binary *OBJ* and pin as *PATH*.
**bpftool prog load** pins only the first program from the
*OBJ* as *PATH*. **bpftool prog loadall** pins all programs
@@ -143,8 +143,11 @@ DESCRIPTION
to be replaced in the ELF file counting from 0, while *NAME*
allows to replace a map by name. *MAP* specifies the map to
use, referring to it by **id** or through a **pinned** file.
- If **dev** *NAME* is specified program will be loaded onto
- given networking device (offload).
+ If **offload_dev** *NAME* is specified program will be loaded
+ onto given networking device (offload).
+ If **xdpmeta_dev** *NAME* is specified program will become
+ device-bound without offloading, this facilitates access
+ to XDP metadata.
Optional **pinmaps** argument can be provided to pin all
maps under *MAP_DIR* directory.
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index e7234d1a5306..085bf18f3659 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -278,7 +278,7 @@ _bpftool()
_bpftool_get_prog_tags
return 0
;;
- dev)
+ dev|offload_dev|xdpmeta_dev)
_sysfs_get_netdevs
return 0
;;
@@ -508,7 +508,8 @@ _bpftool()
;;
*)
COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
- _bpftool_once_attr 'type dev pinmaps autoattach'
+ _bpftool_once_attr 'type pinmaps autoattach'
+ _bpftool_one_of_list 'offload_dev xdpmeta_dev'
return 0
;;
esac
@@ -733,7 +734,7 @@ _bpftool()
esac
;;
*)
- _bpftool_once_attr 'type key value entries name flags dev'
+ _bpftool_once_attr 'type key value entries name flags offload_dev'
if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
_bpftool_once_attr 'inner_map'
fi
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 1360c82ae732..cc6e6aae2447 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -68,7 +68,7 @@ void p_info(const char *fmt, ...)
va_end(ap);
}
-static bool is_bpffs(char *path)
+static bool is_bpffs(const char *path)
{
struct statfs st_fs;
@@ -244,13 +244,16 @@ int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
return fd;
}
-int mount_bpffs_for_pin(const char *name)
+int mount_bpffs_for_pin(const char *name, bool is_dir)
{
char err_str[ERR_MAX_LEN];
char *file;
char *dir;
int err = 0;
+ if (is_dir && is_bpffs(name))
+ return err;
+
file = malloc(strlen(name) + 1);
if (!file) {
p_err("mem alloc failed");
@@ -286,7 +289,7 @@ int do_pin_fd(int fd, const char *name)
{
int err;
- err = mount_bpffs_for_pin(name);
+ err = mount_bpffs_for_pin(name, false);
if (err)
return err;
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index da16e6a27ccc..0675d6a46413 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -167,12 +167,12 @@ static int get_vendor_id(int ifindex)
return strtol(buf, NULL, 0);
}
-static int read_procfs(const char *path)
+static long read_procfs(const char *path)
{
char *endptr, *line = NULL;
size_t len = 0;
FILE *fd;
- int res;
+ long res;
fd = fopen(path, "r");
if (!fd)
@@ -194,7 +194,7 @@ static int read_procfs(const char *path)
static void probe_unprivileged_disabled(void)
{
- int res;
+ long res;
/* No support for C-style ouptut */
@@ -216,14 +216,14 @@ static void probe_unprivileged_disabled(void)
printf("Unable to retrieve required privileges for bpf() syscall\n");
break;
default:
- printf("bpf() syscall restriction has unknown value %d\n", res);
+ printf("bpf() syscall restriction has unknown value %ld\n", res);
}
}
}
static void probe_jit_enable(void)
{
- int res;
+ long res;
/* No support for C-style ouptut */
@@ -245,7 +245,7 @@ static void probe_jit_enable(void)
printf("Unable to retrieve JIT-compiler status\n");
break;
default:
- printf("JIT-compiler status has unknown value %d\n",
+ printf("JIT-compiler status has unknown value %ld\n",
res);
}
}
@@ -253,7 +253,7 @@ static void probe_jit_enable(void)
static void probe_jit_harden(void)
{
- int res;
+ long res;
/* No support for C-style ouptut */
@@ -275,7 +275,7 @@ static void probe_jit_harden(void)
printf("Unable to retrieve JIT hardening status\n");
break;
default:
- printf("JIT hardening status has unknown value %d\n",
+ printf("JIT hardening status has unknown value %ld\n",
res);
}
}
@@ -283,7 +283,7 @@ static void probe_jit_harden(void)
static void probe_jit_kallsyms(void)
{
- int res;
+ long res;
/* No support for C-style ouptut */
@@ -302,14 +302,14 @@ static void probe_jit_kallsyms(void)
printf("Unable to retrieve JIT kallsyms export status\n");
break;
default:
- printf("JIT kallsyms exports status has unknown value %d\n", res);
+ printf("JIT kallsyms exports status has unknown value %ld\n", res);
}
}
}
static void probe_jit_limit(void)
{
- int res;
+ long res;
/* No support for C-style ouptut */
@@ -322,7 +322,7 @@ static void probe_jit_limit(void)
printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
break;
default:
- printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
+ printf("Global memory limit for JIT compiler for unprivileged users is %ld bytes\n", res);
}
}
}
diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
index 9a1d2365a297..6b0e5202ca7a 100644
--- a/tools/bpf/bpftool/iter.c
+++ b/tools/bpf/bpftool/iter.c
@@ -76,7 +76,7 @@ static int do_pin(int argc, char **argv)
goto close_obj;
}
- err = mount_bpffs_for_pin(path);
+ err = mount_bpffs_for_pin(path, false);
if (err)
goto close_link;
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index d98dbc50cf4c..2d786072ed0d 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -195,6 +195,8 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
show_link_attach_type_json(info->tracing.attach_type,
json_wtr);
+ jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
+ jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
break;
case BPF_LINK_TYPE_CGROUP:
jsonw_lluint_field(json_wtr, "cgroup_id",
@@ -212,7 +214,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_NETFILTER:
netfilter_dump_json(info, json_wtr);
break;
-
+ case BPF_LINK_TYPE_STRUCT_OPS:
+ jsonw_uint_field(json_wtr, "map_id",
+ info->struct_ops.map_id);
+ break;
default:
break;
}
@@ -245,7 +250,10 @@ static void show_link_header_plain(struct bpf_link_info *info)
else
printf("type %u ", info->type);
- printf("prog %u ", info->prog_id);
+ if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
+ printf("map %u ", info->struct_ops.map_id);
+ else
+ printf("prog %u ", info->prog_id);
}
static void show_link_attach_type_plain(__u32 attach_type)
@@ -369,6 +377,10 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
printf("\n\tprog_type %u ", prog_info.type);
show_link_attach_type_plain(info->tracing.attach_type);
+ if (info->tracing.target_obj_id || info->tracing.target_btf_id)
+ printf("\n\ttarget_obj_id %u target_btf_id %u ",
+ info->tracing.target_obj_id,
+ info->tracing.target_btf_id);
break;
case BPF_LINK_TYPE_CGROUP:
printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id);
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index a49534d7eafa..b8bb08d10dec 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -142,7 +142,7 @@ const char *get_fd_type_name(enum bpf_obj_type type);
char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(const char *path, bool quiet);
int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
-int mount_bpffs_for_pin(const char *name);
+int mount_bpffs_for_pin(const char *name, bool is_dir);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
int do_pin_fd(int fd, const char *name);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index aaeb8939e137..f98f7bbea2b1 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -139,6 +139,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "value");
print_hex_data_json(value, info->value_size);
+ if (map_is_map_of_maps(info->type))
+ jsonw_uint_field(json_wtr, "inner_map_id",
+ *(unsigned int *)value);
if (btf) {
struct btf_dumper d = {
.btf = btf,
@@ -259,8 +262,13 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
}
if (info->value_size) {
- printf("value:%c", break_names ? '\n' : ' ');
- fprint_hex(stdout, value, info->value_size, " ");
+ if (map_is_map_of_maps(info->type)) {
+ printf("inner_map_id:%c", break_names ? '\n' : ' ');
+ printf("%u ", *(unsigned int *)value);
+ } else {
+ printf("value:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, value, info->value_size, " ");
+ }
}
printf("\n");
@@ -1279,6 +1287,11 @@ static int do_create(int argc, char **argv)
"flags"))
goto exit;
} else if (is_prefix(*argv, "dev")) {
+ p_info("Warning: 'bpftool map create [...] dev <ifname>' syntax is deprecated.\n"
+ "Going further, please use 'offload_dev <ifname>' to request hardware offload for the map.");
+ goto offload_dev;
+ } else if (is_prefix(*argv, "offload_dev")) {
+offload_dev:
NEXT_ARG();
if (attr.map_ifindex) {
@@ -1423,7 +1436,7 @@ static int do_help(int argc, char **argv)
"Usage: %1$s %2$s { show | list } [MAP]\n"
" %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
- " [inner_map MAP] [dev NAME]\n"
+ " [inner_map MAP] [offload_dev NAME]\n"
" %1$s %2$s dump MAP\n"
" %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
" %1$s %2$s lookup MAP [key DATA]\n"
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 91b6075b2db3..8443a149dd17 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1517,12 +1517,13 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
struct bpf_program *prog = NULL, *pos;
unsigned int old_map_fds = 0;
const char *pinmaps = NULL;
+ __u32 xdpmeta_ifindex = 0;
+ __u32 offload_ifindex = 0;
bool auto_attach = false;
struct bpf_object *obj;
struct bpf_map *map;
const char *pinfile;
unsigned int i, j;
- __u32 ifindex = 0;
const char *file;
int idx, err;
@@ -1614,17 +1615,46 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
map_replace[old_map_fds].fd = fd;
old_map_fds++;
} else if (is_prefix(*argv, "dev")) {
+ p_info("Warning: 'bpftool prog load [...] dev <ifname>' syntax is deprecated.\n"
+ "Going further, please use 'offload_dev <ifname>' to offload program to device.\n"
+ "For applications using XDP hints only, use 'xdpmeta_dev <ifname>'.");
+ goto offload_dev;
+ } else if (is_prefix(*argv, "offload_dev")) {
+offload_dev:
NEXT_ARG();
- if (ifindex) {
- p_err("offload device already specified");
+ if (offload_ifindex) {
+ p_err("offload_dev already specified");
+ goto err_free_reuse_maps;
+ } else if (xdpmeta_ifindex) {
+ p_err("xdpmeta_dev and offload_dev are mutually exclusive");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ offload_ifindex = if_nametoindex(*argv);
+ if (!offload_ifindex) {
+ p_err("unrecognized netdevice '%s': %s",
+ *argv, strerror(errno));
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "xdpmeta_dev")) {
+ NEXT_ARG();
+
+ if (xdpmeta_ifindex) {
+ p_err("xdpmeta_dev already specified");
+ goto err_free_reuse_maps;
+ } else if (offload_ifindex) {
+ p_err("xdpmeta_dev and offload_dev are mutually exclusive");
goto err_free_reuse_maps;
}
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
- ifindex = if_nametoindex(*argv);
- if (!ifindex) {
+ xdpmeta_ifindex = if_nametoindex(*argv);
+ if (!xdpmeta_ifindex) {
p_err("unrecognized netdevice '%s': %s",
*argv, strerror(errno));
goto err_free_reuse_maps;
@@ -1671,7 +1701,12 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
}
- bpf_program__set_ifindex(pos, ifindex);
+ if (prog_type == BPF_PROG_TYPE_XDP && xdpmeta_ifindex) {
+ bpf_program__set_flags(pos, BPF_F_XDP_DEV_BOUND_ONLY);
+ bpf_program__set_ifindex(pos, xdpmeta_ifindex);
+ } else {
+ bpf_program__set_ifindex(pos, offload_ifindex);
+ }
if (bpf_program__type(pos) != prog_type)
bpf_program__set_type(pos, prog_type);
bpf_program__set_expected_attach_type(pos, expected_attach_type);
@@ -1709,7 +1744,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
idx = 0;
bpf_object__for_each_map(map, obj) {
if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
- bpf_map__set_ifindex(map, ifindex);
+ bpf_map__set_ifindex(map, offload_ifindex);
if (j < old_map_fds && idx == map_replace[j].idx) {
err = bpf_map__reuse_fd(map, map_replace[j++].fd);
@@ -1739,7 +1774,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
}
- err = mount_bpffs_for_pin(pinfile);
+ err = mount_bpffs_for_pin(pinfile, !first_prog_only);
if (err)
goto err_close_obj;
@@ -2416,7 +2451,7 @@ static int do_help(int argc, char **argv)
" %1$s %2$s dump jited PROG [{ file FILE | [opcodes] [linum] }]\n"
" %1$s %2$s pin PROG FILE\n"
" %1$s %2$s { load | loadall } OBJ PATH \\\n"
- " [type TYPE] [dev NAME] \\\n"
+ " [type TYPE] [{ offload_dev | xdpmeta_dev } NAME] \\\n"
" [map { idx IDX | name NAME } MAP]\\\n"
" [pinmaps MAP_DIR]\n"
" [autoattach]\n"
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index 57c3da70aa31..3ebc9fe91e0e 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -509,7 +509,7 @@ static int do_register(int argc, char **argv)
if (argc == 1)
linkdir = GET_ARG();
- if (linkdir && mount_bpffs_for_pin(linkdir)) {
+ if (linkdir && mount_bpffs_for_pin(linkdir, true)) {
p_err("can't mount bpffs for pinning");
return -1;
}
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index ac548a7baa73..4b8079f294f6 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -67,7 +67,7 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU
LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
-HOSTCFLAGS += -g \
+HOSTCFLAGS_resolve_btfids += -g \
-I$(srctree)/tools/include \
-I$(srctree)/tools/include/uapi \
-I$(LIBBPF_INCLUDE) \
@@ -76,7 +76,7 @@ HOSTCFLAGS += -g \
LIBS = $(LIBELF_LIBS) -lz
-export srctree OUTPUT HOSTCFLAGS Q HOSTCC HOSTLD HOSTAR
+export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
include $(srctree)/tools/build/Makefile.include
$(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h
index 8756df13be50..54d9c8bf7c55 100644
--- a/tools/include/uapi/asm-generic/socket.h
+++ b/tools/include/uapi/asm-generic/socket.h
@@ -121,6 +121,9 @@
#define SO_RCVMARK 75
+#define SO_PASSPIDFD 76
+#define SO_PEERPIDFD 77
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index c994ff5b157c..60a9d59beeab 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1273,6 +1273,9 @@ enum {
/* Create a map that will be registered/unregesitered by the backed bpf_link */
BPF_F_LINK = (1U << 13),
+
+/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
+ BPF_F_PATH_FD = (1U << 14),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1421,6 +1424,13 @@ union bpf_attr {
__aligned_u64 pathname;
__u32 bpf_fd;
__u32 file_flags;
+ /* Same as dirfd in openat() syscall; see openat(2)
+ * manpage for details of path FD and pathname semantics;
+ * path_fd should accompanied by BPF_F_PATH_FD flag set in
+ * file_flags field, otherwise it should be set to zero;
+ * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
+ */
+ __s32 path_fd;
};
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
@@ -3168,6 +3178,10 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
* rules.
+ * **BPF_FIB_LOOKUP_TBID**
+ * Used with BPF_FIB_LOOKUP_DIRECT.
+ * Use the routing table ID present in *params*->tbid
+ * for the fib lookup.
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
@@ -6822,6 +6836,7 @@ enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
};
enum {
@@ -6882,9 +6897,19 @@ struct bpf_fib_lookup {
__u32 ipv6_dst[4]; /* in6_addr; network order */
};
- /* output */
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
+ union {
+ struct {
+ /* output */
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ };
+ /* input: when accompanied with the
+ * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
+ * specific routing table to use for the fib lookup.
+ */
+ __u32 tbid;
+ };
+
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 128ac723c4ea..ed86b37d8024 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -572,20 +572,30 @@ int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *co
(void *)keys, (void *)values, count, opts);
}
-int bpf_obj_pin(int fd, const char *pathname)
+int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
+ const size_t attr_sz = offsetofend(union bpf_attr, path_fd);
union bpf_attr attr;
int ret;
+ if (!OPTS_VALID(opts, bpf_obj_pin_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
+ attr.path_fd = OPTS_GET(opts, path_fd, 0);
attr.pathname = ptr_to_u64((void *)pathname);
+ attr.file_flags = OPTS_GET(opts, file_flags, 0);
attr.bpf_fd = fd;
ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz);
return libbpf_err_errno(ret);
}
+int bpf_obj_pin(int fd, const char *pathname)
+{
+ return bpf_obj_pin_opts(fd, pathname, NULL);
+}
+
int bpf_obj_get(const char *pathname)
{
return bpf_obj_get_opts(pathname, NULL);
@@ -593,7 +603,7 @@ int bpf_obj_get(const char *pathname)
int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
+ const size_t attr_sz = offsetofend(union bpf_attr, path_fd);
union bpf_attr attr;
int fd;
@@ -601,6 +611,7 @@ int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
return libbpf_err(-EINVAL);
memset(&attr, 0, attr_sz);
+ attr.path_fd = OPTS_GET(opts, path_fd, 0);
attr.pathname = ptr_to_u64((void *)pathname);
attr.file_flags = OPTS_GET(opts, file_flags, 0);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index a2c091389b18..9aa0ee473754 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -284,16 +284,30 @@ LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values
__u32 *count,
const struct bpf_map_batch_opts *opts);
-struct bpf_obj_get_opts {
+struct bpf_obj_pin_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
__u32 file_flags;
+ int path_fd;
size_t :0;
};
-#define bpf_obj_get_opts__last_field file_flags
+#define bpf_obj_pin_opts__last_field path_fd
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
+LIBBPF_API int bpf_obj_pin_opts(int fd, const char *pathname,
+ const struct bpf_obj_pin_opts *opts);
+
+struct bpf_obj_get_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ __u32 file_flags;
+ int path_fd;
+
+ size_t :0;
+};
+#define bpf_obj_get_opts__last_field path_fd
+
LIBBPF_API int bpf_obj_get(const char *pathname);
LIBBPF_API int bpf_obj_get_opts(const char *pathname,
const struct bpf_obj_get_opts *opts);
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 929a3baca8ef..bbab9ad9dc5a 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -77,16 +77,21 @@
/*
* Helper macros to manipulate data structures
*/
-#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
-#endif
-#ifndef container_of
+
+/* offsetof() definition that uses __builtin_offset() might not preserve field
+ * offset CO-RE relocation properly, so force-redefine offsetof() using
+ * old-school approach which works with CO-RE correctly
+ */
+#undef offsetof
+#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
+
+/* redefined container_of() to ensure we use the above offsetof() macro */
+#undef container_of
#define container_of(ptr, type, member) \
({ \
void *__mptr = (void *)(ptr); \
((type *)(__mptr - offsetof(type, member))); \
})
-#endif
/*
* Compiler (optimization) barrier.
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index 6fb3d0f9af17..be076a4041ab 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -351,6 +351,7 @@ struct pt_regs___arm64 {
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
*/
+/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG a0
#define __PT_PARM2_REG a1
@@ -383,7 +384,7 @@ struct pt_regs___arm64 {
* https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf
*/
-/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
+/* arc provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG scratch.r0
#define __PT_PARM2_REG scratch.r1
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 0a2c079244b6..8484b563b53d 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1064,7 +1064,7 @@ static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
int err = 0;
long sz;
- f = fopen(path, "rb");
+ f = fopen(path, "rbe");
if (!f) {
err = -errno;
goto err_out;
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 580985ee5545..4d9f30bf7f01 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -2250,9 +2250,25 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
const struct btf_type *t,
__u32 id,
const void *data,
- __u8 bits_offset)
+ __u8 bits_offset,
+ __u8 bit_sz)
{
- __s64 size = btf__resolve_size(d->btf, id);
+ __s64 size;
+
+ if (bit_sz) {
+ /* bits_offset is at most 7. bit_sz is at most 128. */
+ __u8 nr_bytes = (bits_offset + bit_sz + 7) / 8;
+
+ /* When bit_sz is non zero, it is called from
+ * btf_dump_struct_data() where it only cares about
+ * negative error value.
+ * Return nr_bytes in success case to make it
+ * consistent as the regular integer case below.
+ */
+ return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes;
+ }
+
+ size = btf__resolve_size(d->btf, id);
if (size < 0 || size >= INT_MAX) {
pr_warn("unexpected size [%zu] for id [%u]\n",
@@ -2407,7 +2423,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
{
int size, err = 0;
- size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset);
+ size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz);
if (size < 0)
return size;
err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 83e8e3bfd8ff..cf3323fd47b8 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -703,17 +703,17 @@ static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo
/* obtain fd in BPF_REG_9 */
emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
- /* jump to fd_array store if fd denotes module BTF */
- emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
- /* set the default value for off */
- emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
- /* skip BTF fd store for vmlinux BTF */
- emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
/* load fd_array slot pointer */
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
- /* store BTF fd in slot */
+ /* store BTF fd in slot, 0 for vmlinux */
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
+ /* jump to insn[insn_idx].off store if fd denotes module BTF */
+ emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
+ /* set the default value for off */
+ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
+ /* skip BTF fd store for vmlinux BTF */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
/* store index into insn[insn_idx].off */
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
log:
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a27f6e9ccce7..214f828ece6b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1501,16 +1501,36 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
return map;
}
-static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
{
- long page_sz = sysconf(_SC_PAGE_SIZE);
+ const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
- map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
+ map_sz = (size_t)roundup(value_sz, 8) * max_entries;
map_sz = roundup(map_sz, page_sz);
return map_sz;
}
+static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
+{
+ void *mmaped;
+
+ if (!map->mmaped)
+ return -EINVAL;
+
+ if (old_sz == new_sz)
+ return 0;
+
+ mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (mmaped == MAP_FAILED)
+ return -errno;
+
+ memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
+ munmap(map->mmaped, old_sz);
+ map->mmaped = mmaped;
+ return 0;
+}
+
static char *internal_map_name(struct bpf_object *obj, const char *real_name)
{
char map_name[BPF_OBJ_NAME_LEN], *p;
@@ -1609,6 +1629,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
{
struct bpf_map_def *def;
struct bpf_map *map;
+ size_t mmap_sz;
int err;
map = bpf_object__add_map(obj);
@@ -1643,7 +1664,8 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
- map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
+ mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (map->mmaped == MAP_FAILED) {
err = -errno;
@@ -4330,7 +4352,7 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
memset(info, 0, sizeof(*info));
- fp = fopen(file, "r");
+ fp = fopen(file, "re");
if (!fp) {
err = -errno;
pr_warn("failed to open %s: %d. No procfs support?\n", file,
@@ -4393,18 +4415,17 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
if (!new_name)
return libbpf_err(-errno);
- new_fd = open("/", O_RDONLY | O_CLOEXEC);
+ /*
+ * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
+ * This is similar to what we do in ensure_good_fd(), but without
+ * closing original FD.
+ */
+ new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
if (new_fd < 0) {
err = -errno;
goto err_free_new_name;
}
- new_fd = dup3(fd, new_fd, O_CLOEXEC);
- if (new_fd < 0) {
- err = -errno;
- goto err_close_new_fd;
- }
-
err = zclose(map->fd);
if (err) {
err = -errno;
@@ -7434,7 +7455,7 @@ int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
int ret, err = 0;
FILE *f;
- f = fopen("/proc/kallsyms", "r");
+ f = fopen("/proc/kallsyms", "re");
if (!f) {
err = -errno;
pr_warn("failed to open /proc/kallsyms: %d\n", err);
@@ -8295,7 +8316,10 @@ static void bpf_map__destroy(struct bpf_map *map)
map->init_slots_sz = 0;
if (map->mmaped) {
- munmap(map->mmaped, bpf_map_mmap_sz(map));
+ size_t mmap_sz;
+
+ mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ munmap(map->mmaped, mmap_sz);
map->mmaped = NULL;
}
@@ -9413,10 +9437,103 @@ __u32 bpf_map__value_size(const struct bpf_map *map)
return map->def.value_size;
}
+static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
+{
+ struct btf *btf;
+ struct btf_type *datasec_type, *var_type;
+ struct btf_var_secinfo *var;
+ const struct btf_type *array_type;
+ const struct btf_array *array;
+ int vlen, element_sz, new_array_id;
+ __u32 nr_elements;
+
+ /* check btf existence */
+ btf = bpf_object__btf(map->obj);
+ if (!btf)
+ return -ENOENT;
+
+ /* verify map is datasec */
+ datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
+ if (!btf_is_datasec(datasec_type)) {
+ pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
+ bpf_map__name(map));
+ return -EINVAL;
+ }
+
+ /* verify datasec has at least one var */
+ vlen = btf_vlen(datasec_type);
+ if (vlen == 0) {
+ pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
+ bpf_map__name(map));
+ return -EINVAL;
+ }
+
+ /* verify last var in the datasec is an array */
+ var = &btf_var_secinfos(datasec_type)[vlen - 1];
+ var_type = btf_type_by_id(btf, var->type);
+ array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
+ if (!btf_is_array(array_type)) {
+ pr_warn("map '%s': cannot be resized, last var must be an array\n",
+ bpf_map__name(map));
+ return -EINVAL;
+ }
+
+ /* verify request size aligns with array */
+ array = btf_array(array_type);
+ element_sz = btf__resolve_size(btf, array->type);
+ if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
+ pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
+ bpf_map__name(map), element_sz, size);
+ return -EINVAL;
+ }
+
+ /* create a new array based on the existing array, but with new length */
+ nr_elements = (size - var->offset) / element_sz;
+ new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
+ if (new_array_id < 0)
+ return new_array_id;
+
+ /* adding a new btf type invalidates existing pointers to btf objects,
+ * so refresh pointers before proceeding
+ */
+ datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
+ var = &btf_var_secinfos(datasec_type)[vlen - 1];
+ var_type = btf_type_by_id(btf, var->type);
+
+ /* finally update btf info */
+ datasec_type->size = size;
+ var->size = size - var->offset;
+ var_type->type = new_array_id;
+
+ return 0;
+}
+
int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
{
if (map->fd >= 0)
return libbpf_err(-EBUSY);
+
+ if (map->mmaped) {
+ int err;
+ size_t mmap_old_sz, mmap_new_sz;
+
+ mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries);
+ err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
+ if (err) {
+ pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
+ bpf_map__name(map), err);
+ return err;
+ }
+ err = map_btf_datasec_resize(map, size);
+ if (err && err != -ENOENT) {
+ pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
+ bpf_map__name(map), err);
+ map->btf_value_type_id = 0;
+ map->btf_key_type_id = 0;
+ }
+ }
+
map->def.value_size = size;
return 0;
}
@@ -9442,7 +9559,7 @@ int bpf_map__set_initial_value(struct bpf_map *map,
return 0;
}
-const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
+void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
{
if (!map->mmaped)
return NULL;
@@ -9958,7 +10075,7 @@ static int parse_uint_from_file(const char *file, const char *fmt)
int err, ret;
FILE *f;
- f = fopen(file, "r");
+ f = fopen(file, "re");
if (!f) {
err = -errno;
pr_debug("failed to open '%s': %s\n", file,
@@ -12694,7 +12811,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
- size_t mmap_sz = bpf_map_mmap_sz(map);
+ size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
int prot, map_fd = bpf_map__fd(map);
void **mmaped = s->maps[i].mmaped;
@@ -12721,8 +12838,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
* as per normal clean up procedure, so we don't need to worry
* about it from skeleton's clean up perspective.
*/
- *mmaped = mmap(map->mmaped, mmap_sz, prot,
- MAP_SHARED | MAP_FIXED, map_fd, 0);
+ *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
if (*mmaped == MAP_FAILED) {
err = -errno;
*mmaped = NULL;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 0b7362397ea3..754da73c643b 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -869,8 +869,22 @@ LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node);
/* get/set map key size */
LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size);
-/* get/set map value size */
+/* get map value size */
LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
+/**
+ * @brief **bpf_map__set_value_size()** sets map value size.
+ * @param map the BPF map instance
+ * @return 0, on success; negative error, otherwise
+ *
+ * There is a special case for maps with associated memory-mapped regions, like
+ * the global data section maps (bss, data, rodata). When this function is used
+ * on such a map, the mapped region is resized. Afterward, an attempt is made to
+ * adjust the corresponding BTF info. This attempt is best-effort and can only
+ * succeed if the last variable of the data section map is an array. The array
+ * BTF type is replaced by a new BTF array type with a different length.
+ * Any previously existing pointers returned from bpf_map__initial_value() or
+ * corresponding data section skeleton pointer must be reinitialized.
+ */
LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size);
/* get map key/value BTF type IDs */
LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
@@ -884,7 +898,7 @@ LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
-LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
+LIBBPF_API void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
/**
* @brief **bpf_map__is_internal()** tells the caller whether or not the
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index a5aa3a383d69..7521a2fb7626 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -391,3 +391,8 @@ LIBBPF_1.2.0 {
bpf_map_get_info_by_fd;
bpf_prog_get_info_by_fd;
} LIBBPF_1.1.0;
+
+LIBBPF_1.3.0 {
+ global:
+ bpf_obj_pin_opts;
+} LIBBPF_1.2.0;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index b7d443129f1c..9c4db90b92b6 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -38,7 +38,7 @@ static __u32 get_ubuntu_kernel_version(void)
if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) != 0)
return 0;
- f = fopen(ubuntu_kver_file, "r");
+ f = fopen(ubuntu_kver_file, "re");
if (!f)
return 0;
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index 1fd2eeac5cfc..290411ddb39e 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 1
-#define LIBBPF_MINOR_VERSION 2
+#define LIBBPF_MINOR_VERSION 3
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index 086eef355ab3..f1a141555f08 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -466,7 +466,7 @@ static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs,
proceed:
sprintf(line, "/proc/%d/maps", pid);
- f = fopen(line, "r");
+ f = fopen(line, "re");
if (!f) {
err = -errno;
pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
@@ -954,8 +954,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
spec_map_fd = bpf_map__fd(man->specs_map);
ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
- /* TODO: perform path resolution similar to uprobe's */
- fd = open(path, O_RDONLY);
+ fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
new file mode 100644
index 000000000000..d664b36deb5b
--- /dev/null
+++ b/tools/net/ynl/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+SUBDIRS = lib generated samples
+
+all: $(SUBDIRS)
+
+$(SUBDIRS):
+ @if [ -f "$@/Makefile" ] ; then \
+ $(MAKE) -C $@ ; \
+ fi
+
+clean hardclean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -f "$$dir/Makefile" ] ; then \
+ $(MAKE) -C $$dir $@; \
+ fi \
+ done
+
+.PHONY: clean all $(SUBDIRS)
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
new file mode 100644
index 000000000000..f842bc66b967
--- /dev/null
+++ b/tools/net/ynl/Makefile.deps
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Try to include uAPI headers from the kernel uapi/ path.
+# Most code under tools/ requires the respective kernel uAPI headers
+# to be copied to tools/include. The duplication is annoying.
+# All the family headers should be self-contained. We avoid the copying
+# by selectively including just the uAPI header of the family directly
+# from the kernel sources.
+
+UAPI_PATH:=../../../../include/uapi/
+
+# scripts/headers_install.sh strips "_UAPI" from header guards so we
+# need the explicit -D matching what's in /usr, to avoid multiple definitions.
+
+get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2)
+
+CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
+CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
+CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
+CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h)
diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile
new file mode 100644
index 000000000000..f8817d2e56e4
--- /dev/null
+++ b/tools/net/ynl/generated/Makefile
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CC=gcc
+CFLAGS=-std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow \
+ -I../lib/ -idirafter $(UAPI_PATH)
+ifeq ("$(DEBUG)","1")
+ CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan
+endif
+
+include ../Makefile.deps
+
+YNL_GEN_ARG_ethtool:=--user-header linux/ethtool_netlink.h \
+ --exclude-op stats-get
+
+TOOL:=../ynl-gen-c.py
+
+GENS:=ethtool devlink handshake fou netdev
+SRCS=$(patsubst %,%-user.c,${GENS})
+HDRS=$(patsubst %,%-user.h,${GENS})
+OBJS=$(patsubst %,%-user.o,${GENS})
+
+all: protos.a $(HDRS) $(SRCS) $(KHDRS) $(KSRCS) $(UAPI) regen
+
+protos.a: $(OBJS)
+ @echo -e "\tAR $@"
+ @ar rcs $@ $(OBJS)
+
+%-user.h: ../../../../Documentation/netlink/specs/%.yaml $(TOOL)
+ @echo -e "\tGEN $@"
+ @$(TOOL) --mode user --header --spec $< $(YNL_GEN_ARG_$*) > $@
+
+%-user.c: ../../../../Documentation/netlink/specs/%.yaml $(TOOL)
+ @echo -e "\tGEN $@"
+ @$(TOOL) --mode user --source --spec $< $(YNL_GEN_ARG_$*) > $@
+
+%-user.o: %-user.c %-user.h
+ @echo -e "\tCC $@"
+ @$(COMPILE.c) $(CFLAGS_$*) -o $@ $<
+
+clean:
+ rm -f *.o
+
+hardclean: clean
+ rm -f *.c *.h *.a
+
+regen:
+ @../ynl-regen.sh
+
+.PHONY: all clean hardclean regen
+.DEFAULT_GOAL: all
diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c
new file mode 100644
index 000000000000..939bd45feaca
--- /dev/null
+++ b/tools/net/ynl/generated/devlink-user.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/devlink.yaml */
+/* YNL-GEN user source */
+
+#include <stdlib.h>
+#include <string.h>
+#include "devlink-user.h"
+#include "ynl.h"
+#include <linux/devlink.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+/* Enums */
+static const char * const devlink_op_strmap[] = {
+ [3] = "get",
+ [DEVLINK_CMD_INFO_GET] = "info-get",
+};
+
+const char *devlink_op_str(int op)
+{
+ if (op < 0 || op >= (int)MNL_ARRAY_SIZE(devlink_op_strmap))
+ return NULL;
+ return devlink_op_strmap[op];
+}
+
+/* Policies */
+struct ynl_policy_attr devlink_dl_info_version_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_INFO_VERSION_VALUE] = { .name = "info-version-value", .type = YNL_PT_NUL_STR, },
+};
+
+struct ynl_policy_nest devlink_dl_info_version_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_info_version_policy,
+};
+
+struct ynl_policy_attr devlink_dl_reload_stats_entry_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_RELOAD_STATS_LIMIT] = { .name = "reload-stats-limit", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RELOAD_STATS_VALUE] = { .name = "reload-stats-value", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest devlink_dl_reload_stats_entry_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_reload_stats_entry_policy,
+};
+
+struct ynl_policy_attr devlink_dl_reload_act_stats_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_RELOAD_STATS_ENTRY] = { .name = "reload-stats-entry", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_entry_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_reload_act_stats_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_reload_act_stats_policy,
+};
+
+struct ynl_policy_attr devlink_dl_reload_act_info_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_RELOAD_ACTION] = { .name = "reload-action", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RELOAD_ACTION_STATS] = { .name = "reload-action-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_stats_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_reload_act_info_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_reload_act_info_policy,
+};
+
+struct ynl_policy_attr devlink_dl_reload_stats_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_RELOAD_ACTION_INFO] = { .name = "reload-action-info", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_info_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_reload_stats_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_reload_stats_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dev_stats_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_RELOAD_STATS] = { .name = "reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
+ [DEVLINK_ATTR_REMOTE_RELOAD_STATS] = { .name = "remote-reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dev_stats_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dev_stats_policy,
+};
+
+struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .name = "bus-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_DEV_NAME] = { .name = "dev-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .name = "port-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_INFO_DRIVER_NAME] = { .name = "info-driver-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_INFO_SERIAL_NUMBER] = { .name = "info-serial-number", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_INFO_VERSION_FIXED] = { .name = "info-version-fixed", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, },
+ [DEVLINK_ATTR_INFO_VERSION_RUNNING] = { .name = "info-version-running", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, },
+ [DEVLINK_ATTR_INFO_VERSION_STORED] = { .name = "info-version-stored", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, },
+ [DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_INFO_VERSION_VALUE] = { .name = "info-version-value", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_RELOAD_FAILED] = { .name = "reload-failed", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RELOAD_ACTION] = { .name = "reload-action", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_DEV_STATS] = { .name = "dev-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_dev_stats_nest, },
+ [DEVLINK_ATTR_RELOAD_STATS] = { .name = "reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
+ [DEVLINK_ATTR_RELOAD_STATS_ENTRY] = { .name = "reload-stats-entry", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_entry_nest, },
+ [DEVLINK_ATTR_RELOAD_STATS_LIMIT] = { .name = "reload-stats-limit", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RELOAD_STATS_VALUE] = { .name = "reload-stats-value", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_REMOTE_RELOAD_STATS] = { .name = "remote-reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
+ [DEVLINK_ATTR_RELOAD_ACTION_INFO] = { .name = "reload-action-info", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_info_nest, },
+ [DEVLINK_ATTR_RELOAD_ACTION_STATS] = { .name = "reload-action-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_stats_nest, },
+};
+
+struct ynl_policy_nest devlink_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_policy,
+};
+
+/* Common nested types */
+void devlink_dl_info_version_free(struct devlink_dl_info_version *obj)
+{
+ free(obj->info_version_name);
+ free(obj->info_version_value);
+}
+
+int devlink_dl_info_version_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_info_version *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_INFO_VERSION_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.info_version_name_len = len;
+ dst->info_version_name = malloc(len + 1);
+ memcpy(dst->info_version_name, mnl_attr_get_str(attr), len);
+ dst->info_version_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_INFO_VERSION_VALUE) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.info_version_value_len = len;
+ dst->info_version_value = malloc(len + 1);
+ memcpy(dst->info_version_value, mnl_attr_get_str(attr), len);
+ dst->info_version_value[len] = 0;
+ }
+ }
+
+ return 0;
+}
+
+void
+devlink_dl_reload_stats_entry_free(struct devlink_dl_reload_stats_entry *obj)
+{
+}
+
+int devlink_dl_reload_stats_entry_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_reload_stats_entry *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_RELOAD_STATS_LIMIT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reload_stats_limit = 1;
+ dst->reload_stats_limit = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_RELOAD_STATS_VALUE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reload_stats_value = 1;
+ dst->reload_stats_value = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_reload_act_stats_free(struct devlink_dl_reload_act_stats *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_reload_stats_entry; i++)
+ devlink_dl_reload_stats_entry_free(&obj->reload_stats_entry[i]);
+ free(obj->reload_stats_entry);
+}
+
+int devlink_dl_reload_act_stats_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_reload_act_stats *dst = yarg->data;
+ unsigned int n_reload_stats_entry = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->reload_stats_entry)
+ return ynl_error_parse(yarg, "attribute already present (dl-reload-act-stats.reload-stats-entry)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_RELOAD_STATS_ENTRY) {
+ n_reload_stats_entry++;
+ }
+ }
+
+ if (n_reload_stats_entry) {
+ dst->reload_stats_entry = calloc(n_reload_stats_entry, sizeof(*dst->reload_stats_entry));
+ dst->n_reload_stats_entry = n_reload_stats_entry;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_reload_stats_entry_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_RELOAD_STATS_ENTRY) {
+ parg.data = &dst->reload_stats_entry[i];
+ if (devlink_dl_reload_stats_entry_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_reload_act_info_free(struct devlink_dl_reload_act_info *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_reload_action_stats; i++)
+ devlink_dl_reload_act_stats_free(&obj->reload_action_stats[i]);
+ free(obj->reload_action_stats);
+}
+
+int devlink_dl_reload_act_info_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_reload_act_info *dst = yarg->data;
+ unsigned int n_reload_action_stats = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->reload_action_stats)
+ return ynl_error_parse(yarg, "attribute already present (dl-reload-act-info.reload-action-stats)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_RELOAD_ACTION) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reload_action = 1;
+ dst->reload_action = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_RELOAD_ACTION_STATS) {
+ n_reload_action_stats++;
+ }
+ }
+
+ if (n_reload_action_stats) {
+ dst->reload_action_stats = calloc(n_reload_action_stats, sizeof(*dst->reload_action_stats));
+ dst->n_reload_action_stats = n_reload_action_stats;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_reload_act_stats_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_RELOAD_ACTION_STATS) {
+ parg.data = &dst->reload_action_stats[i];
+ if (devlink_dl_reload_act_stats_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_reload_stats_free(struct devlink_dl_reload_stats *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_reload_action_info; i++)
+ devlink_dl_reload_act_info_free(&obj->reload_action_info[i]);
+ free(obj->reload_action_info);
+}
+
+int devlink_dl_reload_stats_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_reload_stats *dst = yarg->data;
+ unsigned int n_reload_action_info = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->reload_action_info)
+ return ynl_error_parse(yarg, "attribute already present (dl-reload-stats.reload-action-info)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_RELOAD_ACTION_INFO) {
+ n_reload_action_info++;
+ }
+ }
+
+ if (n_reload_action_info) {
+ dst->reload_action_info = calloc(n_reload_action_info, sizeof(*dst->reload_action_info));
+ dst->n_reload_action_info = n_reload_action_info;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_reload_act_info_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_RELOAD_ACTION_INFO) {
+ parg.data = &dst->reload_action_info[i];
+ if (devlink_dl_reload_act_info_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_dev_stats_free(struct devlink_dl_dev_stats *obj)
+{
+ devlink_dl_reload_stats_free(&obj->reload_stats);
+ devlink_dl_reload_stats_free(&obj->remote_reload_stats);
+}
+
+int devlink_dl_dev_stats_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dev_stats *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_RELOAD_STATS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reload_stats = 1;
+
+ parg.rsp_policy = &devlink_dl_reload_stats_nest;
+ parg.data = &dst->reload_stats;
+ if (devlink_dl_reload_stats_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == DEVLINK_ATTR_REMOTE_RELOAD_STATS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.remote_reload_stats = 1;
+
+ parg.rsp_policy = &devlink_dl_reload_stats_nest;
+ parg.data = &dst->remote_reload_stats;
+ if (devlink_dl_reload_stats_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_GET ============== */
+/* DEVLINK_CMD_GET - do */
+void devlink_get_req_free(struct devlink_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_get_rsp_free(struct devlink_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ devlink_dl_dev_stats_free(&rsp->dev_stats);
+ free(rsp);
+}
+
+int devlink_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_RELOAD_FAILED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reload_failed = 1;
+ dst->reload_failed = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_RELOAD_ACTION) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reload_action = 1;
+ dst->reload_action = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_DEV_STATS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dev_stats = 1;
+
+ parg.rsp_policy = &devlink_dl_dev_stats_nest;
+ parg.data = &dst->dev_stats;
+ if (devlink_dl_dev_stats_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_get_rsp *
+devlink_get(struct ynl_sock *ys, struct devlink_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_get_rsp_parse;
+ yrs.rsp_cmd = 3;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_GET - dump */
+void devlink_get_list_free(struct devlink_get_list *rsp)
+{
+ struct devlink_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ devlink_dl_dev_stats_free(&rsp->obj.dev_stats);
+ free(rsp);
+ }
+}
+
+struct devlink_get_list *devlink_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_get_list);
+ yds.cb = devlink_get_rsp_parse;
+ yds.rsp_cmd = 3;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_INFO_GET ============== */
+/* DEVLINK_CMD_INFO_GET - do */
+void devlink_info_get_req_free(struct devlink_info_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_info_get_rsp_free(struct devlink_info_get_rsp *rsp)
+{
+ unsigned int i;
+
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->info_driver_name);
+ free(rsp->info_serial_number);
+ for (i = 0; i < rsp->n_info_version_fixed; i++)
+ devlink_dl_info_version_free(&rsp->info_version_fixed[i]);
+ free(rsp->info_version_fixed);
+ for (i = 0; i < rsp->n_info_version_running; i++)
+ devlink_dl_info_version_free(&rsp->info_version_running[i]);
+ free(rsp->info_version_running);
+ for (i = 0; i < rsp->n_info_version_stored; i++)
+ devlink_dl_info_version_free(&rsp->info_version_stored[i]);
+ free(rsp->info_version_stored);
+ free(rsp);
+}
+
+int devlink_info_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ unsigned int n_info_version_running = 0;
+ unsigned int n_info_version_stored = 0;
+ unsigned int n_info_version_fixed = 0;
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_info_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ if (dst->info_version_fixed)
+ return ynl_error_parse(yarg, "attribute already present (devlink.info-version-fixed)");
+ if (dst->info_version_running)
+ return ynl_error_parse(yarg, "attribute already present (devlink.info-version-running)");
+ if (dst->info_version_stored)
+ return ynl_error_parse(yarg, "attribute already present (devlink.info-version-stored)");
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_INFO_DRIVER_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.info_driver_name_len = len;
+ dst->info_driver_name = malloc(len + 1);
+ memcpy(dst->info_driver_name, mnl_attr_get_str(attr), len);
+ dst->info_driver_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_INFO_SERIAL_NUMBER) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.info_serial_number_len = len;
+ dst->info_serial_number = malloc(len + 1);
+ memcpy(dst->info_serial_number, mnl_attr_get_str(attr), len);
+ dst->info_serial_number[len] = 0;
+ } else if (type == DEVLINK_ATTR_INFO_VERSION_FIXED) {
+ n_info_version_fixed++;
+ } else if (type == DEVLINK_ATTR_INFO_VERSION_RUNNING) {
+ n_info_version_running++;
+ } else if (type == DEVLINK_ATTR_INFO_VERSION_STORED) {
+ n_info_version_stored++;
+ }
+ }
+
+ if (n_info_version_fixed) {
+ dst->info_version_fixed = calloc(n_info_version_fixed, sizeof(*dst->info_version_fixed));
+ dst->n_info_version_fixed = n_info_version_fixed;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_info_version_nest;
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_INFO_VERSION_FIXED) {
+ parg.data = &dst->info_version_fixed[i];
+ if (devlink_dl_info_version_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+ if (n_info_version_running) {
+ dst->info_version_running = calloc(n_info_version_running, sizeof(*dst->info_version_running));
+ dst->n_info_version_running = n_info_version_running;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_info_version_nest;
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_INFO_VERSION_RUNNING) {
+ parg.data = &dst->info_version_running[i];
+ if (devlink_dl_info_version_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+ if (n_info_version_stored) {
+ dst->info_version_stored = calloc(n_info_version_stored, sizeof(*dst->info_version_stored));
+ dst->n_info_version_stored = n_info_version_stored;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_info_version_nest;
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_INFO_VERSION_STORED) {
+ parg.data = &dst->info_version_stored[i];
+ if (devlink_dl_info_version_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_info_get_rsp *
+devlink_info_get(struct ynl_sock *ys, struct devlink_info_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_info_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_INFO_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_info_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_INFO_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_info_get_rsp_free(rsp);
+ return NULL;
+}
+
+const struct ynl_family ynl_devlink_family = {
+ .name = "devlink",
+};
diff --git a/tools/net/ynl/generated/devlink-user.h b/tools/net/ynl/generated/devlink-user.h
new file mode 100644
index 000000000000..a008b99b6e24
--- /dev/null
+++ b/tools/net/ynl/generated/devlink-user.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/devlink.yaml */
+/* YNL-GEN user header */
+
+#ifndef _LINUX_DEVLINK_GEN_H
+#define _LINUX_DEVLINK_GEN_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <linux/types.h>
+#include <linux/devlink.h>
+
+struct ynl_sock;
+
+extern const struct ynl_family ynl_devlink_family;
+
+/* Enums */
+const char *devlink_op_str(int op);
+
+/* Common nested types */
+struct devlink_dl_info_version {
+ struct {
+ __u32 info_version_name_len;
+ __u32 info_version_value_len;
+ } _present;
+
+ char *info_version_name;
+ char *info_version_value;
+};
+
+struct devlink_dl_reload_stats_entry {
+ struct {
+ __u32 reload_stats_limit:1;
+ __u32 reload_stats_value:1;
+ } _present;
+
+ __u8 reload_stats_limit;
+ __u32 reload_stats_value;
+};
+
+struct devlink_dl_reload_act_stats {
+ unsigned int n_reload_stats_entry;
+ struct devlink_dl_reload_stats_entry *reload_stats_entry;
+};
+
+struct devlink_dl_reload_act_info {
+ struct {
+ __u32 reload_action:1;
+ } _present;
+
+ __u8 reload_action;
+ unsigned int n_reload_action_stats;
+ struct devlink_dl_reload_act_stats *reload_action_stats;
+};
+
+struct devlink_dl_reload_stats {
+ unsigned int n_reload_action_info;
+ struct devlink_dl_reload_act_info *reload_action_info;
+};
+
+struct devlink_dl_dev_stats {
+ struct {
+ __u32 reload_stats:1;
+ __u32 remote_reload_stats:1;
+ } _present;
+
+ struct devlink_dl_reload_stats reload_stats;
+ struct devlink_dl_reload_stats remote_reload_stats;
+};
+
+/* ============== DEVLINK_CMD_GET ============== */
+/* DEVLINK_CMD_GET - do */
+struct devlink_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_get_req *devlink_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_get_req));
+}
+void devlink_get_req_free(struct devlink_get_req *req);
+
+static inline void
+devlink_get_req_set_bus_name(struct devlink_get_req *req, const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_get_req_set_dev_name(struct devlink_get_req *req, const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 reload_failed:1;
+ __u32 reload_action:1;
+ __u32 dev_stats:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u8 reload_failed;
+ __u8 reload_action;
+ struct devlink_dl_dev_stats dev_stats;
+};
+
+void devlink_get_rsp_free(struct devlink_get_rsp *rsp);
+
+/*
+ * Get devlink instances.
+ */
+struct devlink_get_rsp *
+devlink_get(struct ynl_sock *ys, struct devlink_get_req *req);
+
+/* DEVLINK_CMD_GET - dump */
+struct devlink_get_list {
+ struct devlink_get_list *next;
+ struct devlink_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_get_list_free(struct devlink_get_list *rsp);
+
+struct devlink_get_list *devlink_get_dump(struct ynl_sock *ys);
+
+/* ============== DEVLINK_CMD_INFO_GET ============== */
+/* DEVLINK_CMD_INFO_GET - do */
+struct devlink_info_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_info_get_req *devlink_info_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_info_get_req));
+}
+void devlink_info_get_req_free(struct devlink_info_get_req *req);
+
+static inline void
+devlink_info_get_req_set_bus_name(struct devlink_info_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_info_get_req_set_dev_name(struct devlink_info_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_info_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 info_driver_name_len;
+ __u32 info_serial_number_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *info_driver_name;
+ char *info_serial_number;
+ unsigned int n_info_version_fixed;
+ struct devlink_dl_info_version *info_version_fixed;
+ unsigned int n_info_version_running;
+ struct devlink_dl_info_version *info_version_running;
+ unsigned int n_info_version_stored;
+ struct devlink_dl_info_version *info_version_stored;
+};
+
+void devlink_info_get_rsp_free(struct devlink_info_get_rsp *rsp);
+
+/*
+ * Get device information, like driver name, hardware and firmware versions etc.
+ */
+struct devlink_info_get_rsp *
+devlink_info_get(struct ynl_sock *ys, struct devlink_info_get_req *req);
+
+#endif /* _LINUX_DEVLINK_GEN_H */
diff --git a/tools/net/ynl/generated/ethtool-user.c b/tools/net/ynl/generated/ethtool-user.c
new file mode 100644
index 000000000000..74b883a14958
--- /dev/null
+++ b/tools/net/ynl/generated/ethtool-user.c
@@ -0,0 +1,6353 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ethtool.yaml */
+/* YNL-GEN user source */
+/* YNL-ARG --user-header linux/ethtool_netlink.h --exclude-op stats-get */
+
+#include <stdlib.h>
+#include <string.h>
+#include "ethtool-user.h"
+#include "ynl.h"
+#include <linux/ethtool.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+#include "linux/ethtool_netlink.h"
+
+/* Enums */
+static const char * const ethtool_op_strmap[] = {
+ [ETHTOOL_MSG_STRSET_GET] = "strset-get",
+ [ETHTOOL_MSG_LINKINFO_GET] = "linkinfo-get",
+ [3] = "linkinfo-ntf",
+ [ETHTOOL_MSG_LINKMODES_GET] = "linkmodes-get",
+ [5] = "linkmodes-ntf",
+ [ETHTOOL_MSG_LINKSTATE_GET] = "linkstate-get",
+ [ETHTOOL_MSG_DEBUG_GET] = "debug-get",
+ [8] = "debug-ntf",
+ [ETHTOOL_MSG_WOL_GET] = "wol-get",
+ [10] = "wol-ntf",
+ [ETHTOOL_MSG_FEATURES_GET] = "features-get",
+ [ETHTOOL_MSG_FEATURES_SET] = "features-set",
+ [13] = "features-ntf",
+ [14] = "privflags-get",
+ [15] = "privflags-ntf",
+ [16] = "rings-get",
+ [17] = "rings-ntf",
+ [18] = "channels-get",
+ [19] = "channels-ntf",
+ [20] = "coalesce-get",
+ [21] = "coalesce-ntf",
+ [22] = "pause-get",
+ [23] = "pause-ntf",
+ [24] = "eee-get",
+ [25] = "eee-ntf",
+ [26] = "tsinfo-get",
+ [27] = "cable-test-ntf",
+ [28] = "cable-test-tdr-ntf",
+ [29] = "tunnel-info-get",
+ [30] = "fec-get",
+ [31] = "fec-ntf",
+ [32] = "module-eeprom-get",
+ [34] = "phc-vclocks-get",
+ [35] = "module-get",
+ [36] = "module-ntf",
+ [37] = "pse-get",
+ [ETHTOOL_MSG_RSS_GET] = "rss-get",
+ [ETHTOOL_MSG_PLCA_GET_CFG] = "plca-get-cfg",
+ [40] = "plca-get-status",
+ [41] = "plca-ntf",
+ [ETHTOOL_MSG_MM_GET] = "mm-get",
+ [43] = "mm-ntf",
+};
+
+const char *ethtool_op_str(int op)
+{
+ if (op < 0 || op >= (int)MNL_ARRAY_SIZE(ethtool_op_strmap))
+ return NULL;
+ return ethtool_op_strmap[op];
+}
+
+static const char * const ethtool_udp_tunnel_type_strmap[] = {
+ [0] = "vxlan",
+ [1] = "geneve",
+ [2] = "vxlan-gpe",
+};
+
+const char *ethtool_udp_tunnel_type_str(int value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(ethtool_udp_tunnel_type_strmap))
+ return NULL;
+ return ethtool_udp_tunnel_type_strmap[value];
+}
+
+static const char * const ethtool_stringset_strmap[] = {
+};
+
+const char *ethtool_stringset_str(enum ethtool_stringset value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(ethtool_stringset_strmap))
+ return NULL;
+ return ethtool_stringset_strmap[value];
+}
+
+/* Policies */
+struct ynl_policy_attr ethtool_header_policy[ETHTOOL_A_HEADER_MAX + 1] = {
+ [ETHTOOL_A_HEADER_DEV_INDEX] = { .name = "dev-index", .type = YNL_PT_U32, },
+ [ETHTOOL_A_HEADER_DEV_NAME] = { .name = "dev-name", .type = YNL_PT_NUL_STR, },
+ [ETHTOOL_A_HEADER_FLAGS] = { .name = "flags", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_header_nest = {
+ .max_attr = ETHTOOL_A_HEADER_MAX,
+ .table = ethtool_header_policy,
+};
+
+struct ynl_policy_attr ethtool_pause_stat_policy[ETHTOOL_A_PAUSE_STAT_MAX + 1] = {
+ [ETHTOOL_A_PAUSE_STAT_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
+ [ETHTOOL_A_PAUSE_STAT_TX_FRAMES] = { .name = "tx-frames", .type = YNL_PT_U64, },
+ [ETHTOOL_A_PAUSE_STAT_RX_FRAMES] = { .name = "rx-frames", .type = YNL_PT_U64, },
+};
+
+struct ynl_policy_nest ethtool_pause_stat_nest = {
+ .max_attr = ETHTOOL_A_PAUSE_STAT_MAX,
+ .table = ethtool_pause_stat_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_test_tdr_cfg_policy[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1] = {
+ [ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .name = "first", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST] = { .name = "last", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP] = { .name = "step", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR] = { .name = "pair", .type = YNL_PT_U8, },
+};
+
+struct ynl_policy_nest ethtool_cable_test_tdr_cfg_nest = {
+ .max_attr = ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX,
+ .table = ethtool_cable_test_tdr_cfg_policy,
+};
+
+struct ynl_policy_attr ethtool_fec_stat_policy[ETHTOOL_A_FEC_STAT_MAX + 1] = {
+ [ETHTOOL_A_FEC_STAT_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
+ [ETHTOOL_A_FEC_STAT_CORRECTED] = { .name = "corrected", .type = YNL_PT_BINARY,},
+ [ETHTOOL_A_FEC_STAT_UNCORR] = { .name = "uncorr", .type = YNL_PT_BINARY,},
+ [ETHTOOL_A_FEC_STAT_CORR_BITS] = { .name = "corr-bits", .type = YNL_PT_BINARY,},
+};
+
+struct ynl_policy_nest ethtool_fec_stat_nest = {
+ .max_attr = ETHTOOL_A_FEC_STAT_MAX,
+ .table = ethtool_fec_stat_policy,
+};
+
+struct ynl_policy_attr ethtool_mm_stat_policy[ETHTOOL_A_MM_STAT_MAX + 1] = {
+ [ETHTOOL_A_MM_STAT_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
+ [ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS] = { .name = "reassembly-errors", .type = YNL_PT_U64, },
+ [ETHTOOL_A_MM_STAT_SMD_ERRORS] = { .name = "smd-errors", .type = YNL_PT_U64, },
+ [ETHTOOL_A_MM_STAT_REASSEMBLY_OK] = { .name = "reassembly-ok", .type = YNL_PT_U64, },
+ [ETHTOOL_A_MM_STAT_RX_FRAG_COUNT] = { .name = "rx-frag-count", .type = YNL_PT_U64, },
+ [ETHTOOL_A_MM_STAT_TX_FRAG_COUNT] = { .name = "tx-frag-count", .type = YNL_PT_U64, },
+ [ETHTOOL_A_MM_STAT_HOLD_COUNT] = { .name = "hold-count", .type = YNL_PT_U64, },
+};
+
+struct ynl_policy_nest ethtool_mm_stat_nest = {
+ .max_attr = ETHTOOL_A_MM_STAT_MAX,
+ .table = ethtool_mm_stat_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_result_policy[ETHTOOL_A_CABLE_RESULT_MAX + 1] = {
+ [ETHTOOL_A_CABLE_RESULT_PAIR] = { .name = "pair", .type = YNL_PT_U8, },
+ [ETHTOOL_A_CABLE_RESULT_CODE] = { .name = "code", .type = YNL_PT_U8, },
+};
+
+struct ynl_policy_nest ethtool_cable_result_nest = {
+ .max_attr = ETHTOOL_A_CABLE_RESULT_MAX,
+ .table = ethtool_cable_result_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_fault_length_policy[ETHTOOL_A_CABLE_FAULT_LENGTH_MAX + 1] = {
+ [ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR] = { .name = "pair", .type = YNL_PT_U8, },
+ [ETHTOOL_A_CABLE_FAULT_LENGTH_CM] = { .name = "cm", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_cable_fault_length_nest = {
+ .max_attr = ETHTOOL_A_CABLE_FAULT_LENGTH_MAX,
+ .table = ethtool_cable_fault_length_policy,
+};
+
+struct ynl_policy_attr ethtool_bitset_bit_policy[ETHTOOL_A_BITSET_BIT_MAX + 1] = {
+ [ETHTOOL_A_BITSET_BIT_INDEX] = { .name = "index", .type = YNL_PT_U32, },
+ [ETHTOOL_A_BITSET_BIT_NAME] = { .name = "name", .type = YNL_PT_NUL_STR, },
+ [ETHTOOL_A_BITSET_BIT_VALUE] = { .name = "value", .type = YNL_PT_FLAG, },
+};
+
+struct ynl_policy_nest ethtool_bitset_bit_nest = {
+ .max_attr = ETHTOOL_A_BITSET_BIT_MAX,
+ .table = ethtool_bitset_bit_policy,
+};
+
+struct ynl_policy_attr ethtool_tunnel_udp_entry_policy[ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX + 1] = {
+ [ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT] = { .name = "port", .type = YNL_PT_U16, },
+ [ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE] = { .name = "type", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_tunnel_udp_entry_nest = {
+ .max_attr = ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX,
+ .table = ethtool_tunnel_udp_entry_policy,
+};
+
+struct ynl_policy_attr ethtool_string_policy[ETHTOOL_A_STRING_MAX + 1] = {
+ [ETHTOOL_A_STRING_INDEX] = { .name = "index", .type = YNL_PT_U32, },
+ [ETHTOOL_A_STRING_VALUE] = { .name = "value", .type = YNL_PT_NUL_STR, },
+};
+
+struct ynl_policy_nest ethtool_string_nest = {
+ .max_attr = ETHTOOL_A_STRING_MAX,
+ .table = ethtool_string_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_nest_policy[ETHTOOL_A_CABLE_NEST_MAX + 1] = {
+ [ETHTOOL_A_CABLE_NEST_RESULT] = { .name = "result", .type = YNL_PT_NEST, .nest = &ethtool_cable_result_nest, },
+ [ETHTOOL_A_CABLE_NEST_FAULT_LENGTH] = { .name = "fault-length", .type = YNL_PT_NEST, .nest = &ethtool_cable_fault_length_nest, },
+};
+
+struct ynl_policy_nest ethtool_cable_nest_nest = {
+ .max_attr = ETHTOOL_A_CABLE_NEST_MAX,
+ .table = ethtool_cable_nest_policy,
+};
+
+struct ynl_policy_attr ethtool_bitset_bits_policy[ETHTOOL_A_BITSET_BITS_MAX + 1] = {
+ [ETHTOOL_A_BITSET_BITS_BIT] = { .name = "bit", .type = YNL_PT_NEST, .nest = &ethtool_bitset_bit_nest, },
+};
+
+struct ynl_policy_nest ethtool_bitset_bits_nest = {
+ .max_attr = ETHTOOL_A_BITSET_BITS_MAX,
+ .table = ethtool_bitset_bits_policy,
+};
+
+struct ynl_policy_attr ethtool_strings_policy[ETHTOOL_A_STRINGS_MAX + 1] = {
+ [ETHTOOL_A_STRINGS_STRING] = { .name = "string", .type = YNL_PT_NEST, .nest = &ethtool_string_nest, },
+};
+
+struct ynl_policy_nest ethtool_strings_nest = {
+ .max_attr = ETHTOOL_A_STRINGS_MAX,
+ .table = ethtool_strings_policy,
+};
+
+struct ynl_policy_attr ethtool_bitset_policy[ETHTOOL_A_BITSET_MAX + 1] = {
+ [ETHTOOL_A_BITSET_NOMASK] = { .name = "nomask", .type = YNL_PT_FLAG, },
+ [ETHTOOL_A_BITSET_SIZE] = { .name = "size", .type = YNL_PT_U32, },
+ [ETHTOOL_A_BITSET_BITS] = { .name = "bits", .type = YNL_PT_NEST, .nest = &ethtool_bitset_bits_nest, },
+};
+
+struct ynl_policy_nest ethtool_bitset_nest = {
+ .max_attr = ETHTOOL_A_BITSET_MAX,
+ .table = ethtool_bitset_policy,
+};
+
+struct ynl_policy_attr ethtool_stringset_policy[ETHTOOL_A_STRINGSET_MAX + 1] = {
+ [ETHTOOL_A_STRINGSET_ID] = { .name = "id", .type = YNL_PT_U32, },
+ [ETHTOOL_A_STRINGSET_COUNT] = { .name = "count", .type = YNL_PT_U32, },
+ [ETHTOOL_A_STRINGSET_STRINGS] = { .name = "strings", .type = YNL_PT_NEST, .nest = &ethtool_strings_nest, },
+};
+
+struct ynl_policy_nest ethtool_stringset_nest = {
+ .max_attr = ETHTOOL_A_STRINGSET_MAX,
+ .table = ethtool_stringset_policy,
+};
+
+struct ynl_policy_attr ethtool_tunnel_udp_table_policy[ETHTOOL_A_TUNNEL_UDP_TABLE_MAX + 1] = {
+ [ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE] = { .name = "size", .type = YNL_PT_U32, },
+ [ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES] = { .name = "types", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY] = { .name = "entry", .type = YNL_PT_NEST, .nest = &ethtool_tunnel_udp_entry_nest, },
+};
+
+struct ynl_policy_nest ethtool_tunnel_udp_table_nest = {
+ .max_attr = ETHTOOL_A_TUNNEL_UDP_TABLE_MAX,
+ .table = ethtool_tunnel_udp_table_policy,
+};
+
+struct ynl_policy_attr ethtool_stringsets_policy[ETHTOOL_A_STRINGSETS_MAX + 1] = {
+ [ETHTOOL_A_STRINGSETS_STRINGSET] = { .name = "stringset", .type = YNL_PT_NEST, .nest = &ethtool_stringset_nest, },
+};
+
+struct ynl_policy_nest ethtool_stringsets_nest = {
+ .max_attr = ETHTOOL_A_STRINGSETS_MAX,
+ .table = ethtool_stringsets_policy,
+};
+
+struct ynl_policy_attr ethtool_tunnel_udp_policy[ETHTOOL_A_TUNNEL_UDP_MAX + 1] = {
+ [ETHTOOL_A_TUNNEL_UDP_TABLE] = { .name = "table", .type = YNL_PT_NEST, .nest = &ethtool_tunnel_udp_table_nest, },
+};
+
+struct ynl_policy_nest ethtool_tunnel_udp_nest = {
+ .max_attr = ETHTOOL_A_TUNNEL_UDP_MAX,
+ .table = ethtool_tunnel_udp_policy,
+};
+
+struct ynl_policy_attr ethtool_strset_policy[ETHTOOL_A_STRSET_MAX + 1] = {
+ [ETHTOOL_A_STRSET_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_STRSET_STRINGSETS] = { .name = "stringsets", .type = YNL_PT_NEST, .nest = &ethtool_stringsets_nest, },
+ [ETHTOOL_A_STRSET_COUNTS_ONLY] = { .name = "counts-only", .type = YNL_PT_FLAG, },
+};
+
+struct ynl_policy_nest ethtool_strset_nest = {
+ .max_attr = ETHTOOL_A_STRSET_MAX,
+ .table = ethtool_strset_policy,
+};
+
+struct ynl_policy_attr ethtool_linkinfo_policy[ETHTOOL_A_LINKINFO_MAX + 1] = {
+ [ETHTOOL_A_LINKINFO_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_LINKINFO_PORT] = { .name = "port", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKINFO_PHYADDR] = { .name = "phyaddr", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKINFO_TP_MDIX] = { .name = "tp-mdix", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKINFO_TP_MDIX_CTRL] = { .name = "tp-mdix-ctrl", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKINFO_TRANSCEIVER] = { .name = "transceiver", .type = YNL_PT_U8, },
+};
+
+struct ynl_policy_nest ethtool_linkinfo_nest = {
+ .max_attr = ETHTOOL_A_LINKINFO_MAX,
+ .table = ethtool_linkinfo_policy,
+};
+
+struct ynl_policy_attr ethtool_linkmodes_policy[ETHTOOL_A_LINKMODES_MAX + 1] = {
+ [ETHTOOL_A_LINKMODES_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_LINKMODES_AUTONEG] = { .name = "autoneg", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKMODES_OURS] = { .name = "ours", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_LINKMODES_PEER] = { .name = "peer", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_LINKMODES_SPEED] = { .name = "speed", .type = YNL_PT_U32, },
+ [ETHTOOL_A_LINKMODES_DUPLEX] = { .name = "duplex", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG] = { .name = "master-slave-cfg", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE] = { .name = "master-slave-state", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKMODES_LANES] = { .name = "lanes", .type = YNL_PT_U32, },
+ [ETHTOOL_A_LINKMODES_RATE_MATCHING] = { .name = "rate-matching", .type = YNL_PT_U8, },
+};
+
+struct ynl_policy_nest ethtool_linkmodes_nest = {
+ .max_attr = ETHTOOL_A_LINKMODES_MAX,
+ .table = ethtool_linkmodes_policy,
+};
+
+struct ynl_policy_attr ethtool_linkstate_policy[ETHTOOL_A_LINKSTATE_MAX + 1] = {
+ [ETHTOOL_A_LINKSTATE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_LINKSTATE_LINK] = { .name = "link", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKSTATE_SQI] = { .name = "sqi", .type = YNL_PT_U32, },
+ [ETHTOOL_A_LINKSTATE_SQI_MAX] = { .name = "sqi-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_LINKSTATE_EXT_STATE] = { .name = "ext-state", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKSTATE_EXT_SUBSTATE] = { .name = "ext-substate", .type = YNL_PT_U8, },
+ [ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT] = { .name = "ext-down-cnt", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_linkstate_nest = {
+ .max_attr = ETHTOOL_A_LINKSTATE_MAX,
+ .table = ethtool_linkstate_policy,
+};
+
+struct ynl_policy_attr ethtool_debug_policy[ETHTOOL_A_DEBUG_MAX + 1] = {
+ [ETHTOOL_A_DEBUG_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_DEBUG_MSGMASK] = { .name = "msgmask", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+};
+
+struct ynl_policy_nest ethtool_debug_nest = {
+ .max_attr = ETHTOOL_A_DEBUG_MAX,
+ .table = ethtool_debug_policy,
+};
+
+struct ynl_policy_attr ethtool_wol_policy[ETHTOOL_A_WOL_MAX + 1] = {
+ [ETHTOOL_A_WOL_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_WOL_MODES] = { .name = "modes", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_WOL_SOPASS] = { .name = "sopass", .type = YNL_PT_BINARY,},
+};
+
+struct ynl_policy_nest ethtool_wol_nest = {
+ .max_attr = ETHTOOL_A_WOL_MAX,
+ .table = ethtool_wol_policy,
+};
+
+struct ynl_policy_attr ethtool_features_policy[ETHTOOL_A_FEATURES_MAX + 1] = {
+ [ETHTOOL_A_FEATURES_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_FEATURES_HW] = { .name = "hw", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_FEATURES_WANTED] = { .name = "wanted", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_FEATURES_ACTIVE] = { .name = "active", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_FEATURES_NOCHANGE] = { .name = "nochange", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+};
+
+struct ynl_policy_nest ethtool_features_nest = {
+ .max_attr = ETHTOOL_A_FEATURES_MAX,
+ .table = ethtool_features_policy,
+};
+
+struct ynl_policy_attr ethtool_privflags_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = {
+ [ETHTOOL_A_PRIVFLAGS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .name = "flags", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+};
+
+struct ynl_policy_nest ethtool_privflags_nest = {
+ .max_attr = ETHTOOL_A_PRIVFLAGS_MAX,
+ .table = ethtool_privflags_policy,
+};
+
+struct ynl_policy_attr ethtool_rings_policy[ETHTOOL_A_RINGS_MAX + 1] = {
+ [ETHTOOL_A_RINGS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_RINGS_RX_MAX] = { .name = "rx-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .name = "rx-mini-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .name = "rx-jumbo-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_TX_MAX] = { .name = "tx-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_RX] = { .name = "rx", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_RX_MINI] = { .name = "rx-mini", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_RX_JUMBO] = { .name = "rx-jumbo", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_TX] = { .name = "tx", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_RX_BUF_LEN] = { .name = "rx-buf-len", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] = { .name = "tcp-data-split", .type = YNL_PT_U8, },
+ [ETHTOOL_A_RINGS_CQE_SIZE] = { .name = "cqe-size", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_TX_PUSH] = { .name = "tx-push", .type = YNL_PT_U8, },
+ [ETHTOOL_A_RINGS_RX_PUSH] = { .name = "rx-push", .type = YNL_PT_U8, },
+ [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .name = "tx-push-buf-len", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX] = { .name = "tx-push-buf-len-max", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_rings_nest = {
+ .max_attr = ETHTOOL_A_RINGS_MAX,
+ .table = ethtool_rings_policy,
+};
+
+struct ynl_policy_attr ethtool_channels_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
+ [ETHTOOL_A_CHANNELS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_CHANNELS_RX_MAX] = { .name = "rx-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CHANNELS_TX_MAX] = { .name = "tx-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .name = "other-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .name = "combined-max", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CHANNELS_RX_COUNT] = { .name = "rx-count", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CHANNELS_TX_COUNT] = { .name = "tx-count", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .name = "other-count", .type = YNL_PT_U32, },
+ [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .name = "combined-count", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_channels_nest = {
+ .max_attr = ETHTOOL_A_CHANNELS_MAX,
+ .table = ethtool_channels_policy,
+};
+
+struct ynl_policy_attr ethtool_coalesce_policy[ETHTOOL_A_COALESCE_MAX + 1] = {
+ [ETHTOOL_A_COALESCE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_COALESCE_RX_USECS] = { .name = "rx-usecs", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES] = { .name = "rx-max-frames", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RX_USECS_IRQ] = { .name = "rx-usecs-irq", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ] = { .name = "rx-max-frames-irq", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_USECS] = { .name = "tx-usecs", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES] = { .name = "tx-max-frames", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_USECS_IRQ] = { .name = "tx-usecs-irq", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ] = { .name = "tx-max-frames-irq", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_STATS_BLOCK_USECS] = { .name = "stats-block-usecs", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX] = { .name = "use-adaptive-rx", .type = YNL_PT_U8, },
+ [ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX] = { .name = "use-adaptive-tx", .type = YNL_PT_U8, },
+ [ETHTOOL_A_COALESCE_PKT_RATE_LOW] = { .name = "pkt-rate-low", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RX_USECS_LOW] = { .name = "rx-usecs-low", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW] = { .name = "rx-max-frames-low", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_USECS_LOW] = { .name = "tx-usecs-low", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW] = { .name = "tx-max-frames-low", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_PKT_RATE_HIGH] = { .name = "pkt-rate-high", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RX_USECS_HIGH] = { .name = "rx-usecs-high", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH] = { .name = "rx-max-frames-high", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_USECS_HIGH] = { .name = "tx-usecs-high", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH] = { .name = "tx-max-frames-high", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL] = { .name = "rate-sample-interval", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_USE_CQE_MODE_TX] = { .name = "use-cqe-mode-tx", .type = YNL_PT_U8, },
+ [ETHTOOL_A_COALESCE_USE_CQE_MODE_RX] = { .name = "use-cqe-mode-rx", .type = YNL_PT_U8, },
+ [ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES] = { .name = "tx-aggr-max-bytes", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES] = { .name = "tx-aggr-max-frames", .type = YNL_PT_U32, },
+ [ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS] = { .name = "tx-aggr-time-usecs", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_coalesce_nest = {
+ .max_attr = ETHTOOL_A_COALESCE_MAX,
+ .table = ethtool_coalesce_policy,
+};
+
+struct ynl_policy_attr ethtool_pause_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
+ [ETHTOOL_A_PAUSE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_PAUSE_AUTONEG] = { .name = "autoneg", .type = YNL_PT_U8, },
+ [ETHTOOL_A_PAUSE_RX] = { .name = "rx", .type = YNL_PT_U8, },
+ [ETHTOOL_A_PAUSE_TX] = { .name = "tx", .type = YNL_PT_U8, },
+ [ETHTOOL_A_PAUSE_STATS] = { .name = "stats", .type = YNL_PT_NEST, .nest = &ethtool_pause_stat_nest, },
+ [ETHTOOL_A_PAUSE_STATS_SRC] = { .name = "stats-src", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_pause_nest = {
+ .max_attr = ETHTOOL_A_PAUSE_MAX,
+ .table = ethtool_pause_policy,
+};
+
+struct ynl_policy_attr ethtool_eee_policy[ETHTOOL_A_EEE_MAX + 1] = {
+ [ETHTOOL_A_EEE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_EEE_MODES_OURS] = { .name = "modes-ours", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_EEE_MODES_PEER] = { .name = "modes-peer", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_EEE_ACTIVE] = { .name = "active", .type = YNL_PT_U8, },
+ [ETHTOOL_A_EEE_ENABLED] = { .name = "enabled", .type = YNL_PT_U8, },
+ [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .name = "tx-lpi-enabled", .type = YNL_PT_U8, },
+ [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .name = "tx-lpi-timer", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_eee_nest = {
+ .max_attr = ETHTOOL_A_EEE_MAX,
+ .table = ethtool_eee_policy,
+};
+
+struct ynl_policy_attr ethtool_tsinfo_policy[ETHTOOL_A_TSINFO_MAX + 1] = {
+ [ETHTOOL_A_TSINFO_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_TSINFO_TIMESTAMPING] = { .name = "timestamping", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_TSINFO_TX_TYPES] = { .name = "tx-types", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_TSINFO_RX_FILTERS] = { .name = "rx-filters", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_TSINFO_PHC_INDEX] = { .name = "phc-index", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_tsinfo_nest = {
+ .max_attr = ETHTOOL_A_TSINFO_MAX,
+ .table = ethtool_tsinfo_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_test_policy[ETHTOOL_A_CABLE_TEST_MAX + 1] = {
+ [ETHTOOL_A_CABLE_TEST_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+};
+
+struct ynl_policy_nest ethtool_cable_test_nest = {
+ .max_attr = ETHTOOL_A_CABLE_TEST_MAX,
+ .table = ethtool_cable_test_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_test_ntf_policy[ETHTOOL_A_CABLE_TEST_NTF_MAX + 1] = {
+ [ETHTOOL_A_CABLE_TEST_NTF_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_CABLE_TEST_NTF_STATUS] = { .name = "status", .type = YNL_PT_U8, },
+ [ETHTOOL_A_CABLE_TEST_NTF_NEST] = { .name = "nest", .type = YNL_PT_NEST, .nest = &ethtool_cable_nest_nest, },
+};
+
+struct ynl_policy_nest ethtool_cable_test_ntf_nest = {
+ .max_attr = ETHTOOL_A_CABLE_TEST_NTF_MAX,
+ .table = ethtool_cable_test_ntf_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_test_tdr_policy[ETHTOOL_A_CABLE_TEST_TDR_MAX + 1] = {
+ [ETHTOOL_A_CABLE_TEST_TDR_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_CABLE_TEST_TDR_CFG] = { .name = "cfg", .type = YNL_PT_NEST, .nest = &ethtool_cable_test_tdr_cfg_nest, },
+};
+
+struct ynl_policy_nest ethtool_cable_test_tdr_nest = {
+ .max_attr = ETHTOOL_A_CABLE_TEST_TDR_MAX,
+ .table = ethtool_cable_test_tdr_policy,
+};
+
+struct ynl_policy_attr ethtool_cable_test_tdr_ntf_policy[ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX + 1] = {
+ [ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS] = { .name = "status", .type = YNL_PT_U8, },
+ [ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST] = { .name = "nest", .type = YNL_PT_NEST, .nest = &ethtool_cable_nest_nest, },
+};
+
+struct ynl_policy_nest ethtool_cable_test_tdr_ntf_nest = {
+ .max_attr = ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX,
+ .table = ethtool_cable_test_tdr_ntf_policy,
+};
+
+struct ynl_policy_attr ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = {
+ [ETHTOOL_A_TUNNEL_INFO_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_TUNNEL_INFO_UDP_PORTS] = { .name = "udp-ports", .type = YNL_PT_NEST, .nest = &ethtool_tunnel_udp_nest, },
+};
+
+struct ynl_policy_nest ethtool_tunnel_info_nest = {
+ .max_attr = ETHTOOL_A_TUNNEL_INFO_MAX,
+ .table = ethtool_tunnel_info_policy,
+};
+
+struct ynl_policy_attr ethtool_fec_policy[ETHTOOL_A_FEC_MAX + 1] = {
+ [ETHTOOL_A_FEC_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_FEC_MODES] = { .name = "modes", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, },
+ [ETHTOOL_A_FEC_AUTO] = { .name = "auto", .type = YNL_PT_U8, },
+ [ETHTOOL_A_FEC_ACTIVE] = { .name = "active", .type = YNL_PT_U32, },
+ [ETHTOOL_A_FEC_STATS] = { .name = "stats", .type = YNL_PT_NEST, .nest = &ethtool_fec_stat_nest, },
+};
+
+struct ynl_policy_nest ethtool_fec_nest = {
+ .max_attr = ETHTOOL_A_FEC_MAX,
+ .table = ethtool_fec_policy,
+};
+
+struct ynl_policy_attr ethtool_module_eeprom_policy[ETHTOOL_A_MODULE_EEPROM_MAX + 1] = {
+ [ETHTOOL_A_MODULE_EEPROM_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_MODULE_EEPROM_OFFSET] = { .name = "offset", .type = YNL_PT_U32, },
+ [ETHTOOL_A_MODULE_EEPROM_LENGTH] = { .name = "length", .type = YNL_PT_U32, },
+ [ETHTOOL_A_MODULE_EEPROM_PAGE] = { .name = "page", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MODULE_EEPROM_BANK] = { .name = "bank", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS] = { .name = "i2c-address", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MODULE_EEPROM_DATA] = { .name = "data", .type = YNL_PT_BINARY,},
+};
+
+struct ynl_policy_nest ethtool_module_eeprom_nest = {
+ .max_attr = ETHTOOL_A_MODULE_EEPROM_MAX,
+ .table = ethtool_module_eeprom_policy,
+};
+
+struct ynl_policy_attr ethtool_phc_vclocks_policy[ETHTOOL_A_PHC_VCLOCKS_MAX + 1] = {
+ [ETHTOOL_A_PHC_VCLOCKS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_PHC_VCLOCKS_NUM] = { .name = "num", .type = YNL_PT_U32, },
+ [ETHTOOL_A_PHC_VCLOCKS_INDEX] = { .name = "index", .type = YNL_PT_BINARY,},
+};
+
+struct ynl_policy_nest ethtool_phc_vclocks_nest = {
+ .max_attr = ETHTOOL_A_PHC_VCLOCKS_MAX,
+ .table = ethtool_phc_vclocks_policy,
+};
+
+struct ynl_policy_attr ethtool_module_policy[ETHTOOL_A_MODULE_MAX + 1] = {
+ [ETHTOOL_A_MODULE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_MODULE_POWER_MODE_POLICY] = { .name = "power-mode-policy", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MODULE_POWER_MODE] = { .name = "power-mode", .type = YNL_PT_U8, },
+};
+
+struct ynl_policy_nest ethtool_module_nest = {
+ .max_attr = ETHTOOL_A_MODULE_MAX,
+ .table = ethtool_module_policy,
+};
+
+struct ynl_policy_attr ethtool_pse_policy[ETHTOOL_A_PSE_MAX + 1] = {
+ [ETHTOOL_A_PSE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_PODL_PSE_ADMIN_STATE] = { .name = "admin-state", .type = YNL_PT_U32, },
+ [ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] = { .name = "admin-control", .type = YNL_PT_U32, },
+ [ETHTOOL_A_PODL_PSE_PW_D_STATUS] = { .name = "pw-d-status", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_pse_nest = {
+ .max_attr = ETHTOOL_A_PSE_MAX,
+ .table = ethtool_pse_policy,
+};
+
+struct ynl_policy_attr ethtool_rss_policy[ETHTOOL_A_RSS_MAX + 1] = {
+ [ETHTOOL_A_RSS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_RSS_CONTEXT] = { .name = "context", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RSS_HFUNC] = { .name = "hfunc", .type = YNL_PT_U32, },
+ [ETHTOOL_A_RSS_INDIR] = { .name = "indir", .type = YNL_PT_BINARY,},
+ [ETHTOOL_A_RSS_HKEY] = { .name = "hkey", .type = YNL_PT_BINARY,},
+};
+
+struct ynl_policy_nest ethtool_rss_nest = {
+ .max_attr = ETHTOOL_A_RSS_MAX,
+ .table = ethtool_rss_policy,
+};
+
+struct ynl_policy_attr ethtool_plca_policy[ETHTOOL_A_PLCA_MAX + 1] = {
+ [ETHTOOL_A_PLCA_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_PLCA_VERSION] = { .name = "version", .type = YNL_PT_U16, },
+ [ETHTOOL_A_PLCA_ENABLED] = { .name = "enabled", .type = YNL_PT_U8, },
+ [ETHTOOL_A_PLCA_STATUS] = { .name = "status", .type = YNL_PT_U8, },
+ [ETHTOOL_A_PLCA_NODE_CNT] = { .name = "node-cnt", .type = YNL_PT_U32, },
+ [ETHTOOL_A_PLCA_NODE_ID] = { .name = "node-id", .type = YNL_PT_U32, },
+ [ETHTOOL_A_PLCA_TO_TMR] = { .name = "to-tmr", .type = YNL_PT_U32, },
+ [ETHTOOL_A_PLCA_BURST_CNT] = { .name = "burst-cnt", .type = YNL_PT_U32, },
+ [ETHTOOL_A_PLCA_BURST_TMR] = { .name = "burst-tmr", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest ethtool_plca_nest = {
+ .max_attr = ETHTOOL_A_PLCA_MAX,
+ .table = ethtool_plca_policy,
+};
+
+struct ynl_policy_attr ethtool_mm_policy[ETHTOOL_A_MM_MAX + 1] = {
+ [ETHTOOL_A_MM_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, },
+ [ETHTOOL_A_MM_PMAC_ENABLED] = { .name = "pmac-enabled", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MM_TX_ENABLED] = { .name = "tx-enabled", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MM_TX_ACTIVE] = { .name = "tx-active", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MM_TX_MIN_FRAG_SIZE] = { .name = "tx-min-frag-size", .type = YNL_PT_U32, },
+ [ETHTOOL_A_MM_RX_MIN_FRAG_SIZE] = { .name = "rx-min-frag-size", .type = YNL_PT_U32, },
+ [ETHTOOL_A_MM_VERIFY_ENABLED] = { .name = "verify-enabled", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MM_VERIFY_STATUS] = { .name = "verify-status", .type = YNL_PT_U8, },
+ [ETHTOOL_A_MM_VERIFY_TIME] = { .name = "verify-time", .type = YNL_PT_U32, },
+ [ETHTOOL_A_MM_MAX_VERIFY_TIME] = { .name = "max-verify-time", .type = YNL_PT_U32, },
+ [ETHTOOL_A_MM_STATS] = { .name = "stats", .type = YNL_PT_NEST, .nest = &ethtool_mm_stat_nest, },
+};
+
+struct ynl_policy_nest ethtool_mm_nest = {
+ .max_attr = ETHTOOL_A_MM_MAX,
+ .table = ethtool_mm_policy,
+};
+
+/* Common nested types */
+void ethtool_header_free(struct ethtool_header *obj)
+{
+ free(obj->dev_name);
+}
+
+int ethtool_header_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_header *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.dev_index)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_HEADER_DEV_INDEX, obj->dev_index);
+ if (obj->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, ETHTOOL_A_HEADER_DEV_NAME, obj->dev_name);
+ if (obj->_present.flags)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_HEADER_FLAGS, obj->flags);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_header_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_header *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_HEADER_DEV_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dev_index = 1;
+ dst->dev_index = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_HEADER_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == ETHTOOL_A_HEADER_FLAGS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.flags = 1;
+ dst->flags = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_pause_stat_free(struct ethtool_pause_stat *obj)
+{
+}
+
+int ethtool_pause_stat_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_pause_stat *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.tx_frames)
+ mnl_attr_put_u64(nlh, ETHTOOL_A_PAUSE_STAT_TX_FRAMES, obj->tx_frames);
+ if (obj->_present.rx_frames)
+ mnl_attr_put_u64(nlh, ETHTOOL_A_PAUSE_STAT_RX_FRAMES, obj->rx_frames);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_pause_stat_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_pause_stat *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_PAUSE_STAT_TX_FRAMES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_frames = 1;
+ dst->tx_frames = mnl_attr_get_u64(attr);
+ } else if (type == ETHTOOL_A_PAUSE_STAT_RX_FRAMES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_frames = 1;
+ dst->rx_frames = mnl_attr_get_u64(attr);
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_cable_test_tdr_cfg_free(struct ethtool_cable_test_tdr_cfg *obj)
+{
+}
+
+void ethtool_fec_stat_free(struct ethtool_fec_stat *obj)
+{
+ free(obj->corrected);
+ free(obj->uncorr);
+ free(obj->corr_bits);
+}
+
+int ethtool_fec_stat_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_fec_stat *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.corrected_len)
+ mnl_attr_put(nlh, ETHTOOL_A_FEC_STAT_CORRECTED, obj->_present.corrected_len, obj->corrected);
+ if (obj->_present.uncorr_len)
+ mnl_attr_put(nlh, ETHTOOL_A_FEC_STAT_UNCORR, obj->_present.uncorr_len, obj->uncorr);
+ if (obj->_present.corr_bits_len)
+ mnl_attr_put(nlh, ETHTOOL_A_FEC_STAT_CORR_BITS, obj->_present.corr_bits_len, obj->corr_bits);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_fec_stat_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_fec_stat *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_FEC_STAT_CORRECTED) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.corrected_len = len;
+ dst->corrected = malloc(len);
+ memcpy(dst->corrected, mnl_attr_get_payload(attr), len);
+ } else if (type == ETHTOOL_A_FEC_STAT_UNCORR) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.uncorr_len = len;
+ dst->uncorr = malloc(len);
+ memcpy(dst->uncorr, mnl_attr_get_payload(attr), len);
+ } else if (type == ETHTOOL_A_FEC_STAT_CORR_BITS) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.corr_bits_len = len;
+ dst->corr_bits = malloc(len);
+ memcpy(dst->corr_bits, mnl_attr_get_payload(attr), len);
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_mm_stat_free(struct ethtool_mm_stat *obj)
+{
+}
+
+int ethtool_mm_stat_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_mm_stat *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reassembly_errors = 1;
+ dst->reassembly_errors = mnl_attr_get_u64(attr);
+ } else if (type == ETHTOOL_A_MM_STAT_SMD_ERRORS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.smd_errors = 1;
+ dst->smd_errors = mnl_attr_get_u64(attr);
+ } else if (type == ETHTOOL_A_MM_STAT_REASSEMBLY_OK) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reassembly_ok = 1;
+ dst->reassembly_ok = mnl_attr_get_u64(attr);
+ } else if (type == ETHTOOL_A_MM_STAT_RX_FRAG_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_frag_count = 1;
+ dst->rx_frag_count = mnl_attr_get_u64(attr);
+ } else if (type == ETHTOOL_A_MM_STAT_TX_FRAG_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_frag_count = 1;
+ dst->tx_frag_count = mnl_attr_get_u64(attr);
+ } else if (type == ETHTOOL_A_MM_STAT_HOLD_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.hold_count = 1;
+ dst->hold_count = mnl_attr_get_u64(attr);
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_cable_result_free(struct ethtool_cable_result *obj)
+{
+}
+
+int ethtool_cable_result_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_cable_result *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_CABLE_RESULT_PAIR) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.pair = 1;
+ dst->pair = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_CABLE_RESULT_CODE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.code = 1;
+ dst->code = mnl_attr_get_u8(attr);
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_cable_fault_length_free(struct ethtool_cable_fault_length *obj)
+{
+}
+
+int ethtool_cable_fault_length_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_cable_fault_length *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.pair = 1;
+ dst->pair = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_CABLE_FAULT_LENGTH_CM) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.cm = 1;
+ dst->cm = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_bitset_bit_free(struct ethtool_bitset_bit *obj)
+{
+ free(obj->name);
+}
+
+int ethtool_bitset_bit_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_bitset_bit *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.index)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_BITSET_BIT_INDEX, obj->index);
+ if (obj->_present.name_len)
+ mnl_attr_put_strz(nlh, ETHTOOL_A_BITSET_BIT_NAME, obj->name);
+ if (obj->_present.value)
+ mnl_attr_put(nlh, ETHTOOL_A_BITSET_BIT_VALUE, 0, NULL);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_bitset_bit_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_bitset_bit *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_BITSET_BIT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.index = 1;
+ dst->index = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_BITSET_BIT_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.name_len = len;
+ dst->name = malloc(len + 1);
+ memcpy(dst->name, mnl_attr_get_str(attr), len);
+ dst->name[len] = 0;
+ } else if (type == ETHTOOL_A_BITSET_BIT_VALUE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.value = 1;
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_tunnel_udp_entry_free(struct ethtool_tunnel_udp_entry *obj)
+{
+}
+
+int ethtool_tunnel_udp_entry_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_tunnel_udp_entry *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port = 1;
+ dst->port = mnl_attr_get_u16(attr);
+ } else if (type == ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.type = 1;
+ dst->type = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_string_free(struct ethtool_string *obj)
+{
+ free(obj->value);
+}
+
+int ethtool_string_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_string *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.index)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_STRING_INDEX, obj->index);
+ if (obj->_present.value_len)
+ mnl_attr_put_strz(nlh, ETHTOOL_A_STRING_VALUE, obj->value);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_string_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_string *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_STRING_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.index = 1;
+ dst->index = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_STRING_VALUE) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.value_len = len;
+ dst->value = malloc(len + 1);
+ memcpy(dst->value, mnl_attr_get_str(attr), len);
+ dst->value[len] = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_cable_nest_free(struct ethtool_cable_nest *obj)
+{
+ ethtool_cable_result_free(&obj->result);
+ ethtool_cable_fault_length_free(&obj->fault_length);
+}
+
+int ethtool_cable_nest_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_cable_nest *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_CABLE_NEST_RESULT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.result = 1;
+
+ parg.rsp_policy = &ethtool_cable_result_nest;
+ parg.data = &dst->result;
+ if (ethtool_cable_result_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_CABLE_NEST_FAULT_LENGTH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.fault_length = 1;
+
+ parg.rsp_policy = &ethtool_cable_fault_length_nest;
+ parg.data = &dst->fault_length;
+ if (ethtool_cable_fault_length_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_bitset_bits_free(struct ethtool_bitset_bits *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_bit; i++)
+ ethtool_bitset_bit_free(&obj->bit[i]);
+ free(obj->bit);
+}
+
+int ethtool_bitset_bits_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_bitset_bits *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ for (unsigned int i = 0; i < obj->n_bit; i++)
+ ethtool_bitset_bit_put(nlh, ETHTOOL_A_BITSET_BITS_BIT, &obj->bit[i]);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_bitset_bits_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_bitset_bits *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ unsigned int n_bit = 0;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->bit)
+ return ynl_error_parse(yarg, "attribute already present (bitset-bits.bit)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_BITSET_BITS_BIT) {
+ n_bit++;
+ }
+ }
+
+ if (n_bit) {
+ dst->bit = calloc(n_bit, sizeof(*dst->bit));
+ dst->n_bit = n_bit;
+ i = 0;
+ parg.rsp_policy = &ethtool_bitset_bit_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == ETHTOOL_A_BITSET_BITS_BIT) {
+ parg.data = &dst->bit[i];
+ if (ethtool_bitset_bit_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_strings_free(struct ethtool_strings *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_string; i++)
+ ethtool_string_free(&obj->string[i]);
+ free(obj->string);
+}
+
+int ethtool_strings_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_strings *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ for (unsigned int i = 0; i < obj->n_string; i++)
+ ethtool_string_put(nlh, ETHTOOL_A_STRINGS_STRING, &obj->string[i]);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_strings_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_strings *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ unsigned int n_string = 0;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->string)
+ return ynl_error_parse(yarg, "attribute already present (strings.string)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_STRINGS_STRING) {
+ n_string++;
+ }
+ }
+
+ if (n_string) {
+ dst->string = calloc(n_string, sizeof(*dst->string));
+ dst->n_string = n_string;
+ i = 0;
+ parg.rsp_policy = &ethtool_string_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == ETHTOOL_A_STRINGS_STRING) {
+ parg.data = &dst->string[i];
+ if (ethtool_string_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_bitset_free(struct ethtool_bitset *obj)
+{
+ ethtool_bitset_bits_free(&obj->bits);
+}
+
+int ethtool_bitset_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_bitset *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.nomask)
+ mnl_attr_put(nlh, ETHTOOL_A_BITSET_NOMASK, 0, NULL);
+ if (obj->_present.size)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_BITSET_SIZE, obj->size);
+ if (obj->_present.bits)
+ ethtool_bitset_bits_put(nlh, ETHTOOL_A_BITSET_BITS, &obj->bits);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_bitset_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_bitset *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_BITSET_NOMASK) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.nomask = 1;
+ } else if (type == ETHTOOL_A_BITSET_SIZE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.size = 1;
+ dst->size = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_BITSET_BITS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.bits = 1;
+
+ parg.rsp_policy = &ethtool_bitset_bits_nest;
+ parg.data = &dst->bits;
+ if (ethtool_bitset_bits_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_stringset_free(struct ethtool_stringset_ *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_strings; i++)
+ ethtool_strings_free(&obj->strings[i]);
+ free(obj->strings);
+}
+
+int ethtool_stringset_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_stringset_ *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.id)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_STRINGSET_ID, obj->id);
+ if (obj->_present.count)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_STRINGSET_COUNT, obj->count);
+ for (unsigned int i = 0; i < obj->n_strings; i++)
+ ethtool_strings_put(nlh, ETHTOOL_A_STRINGSET_STRINGS, &obj->strings[i]);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_stringset_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_stringset_ *dst = yarg->data;
+ unsigned int n_strings = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->strings)
+ return ynl_error_parse(yarg, "attribute already present (stringset.strings)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_STRINGSET_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.id = 1;
+ dst->id = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_STRINGSET_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.count = 1;
+ dst->count = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_STRINGSET_STRINGS) {
+ n_strings++;
+ }
+ }
+
+ if (n_strings) {
+ dst->strings = calloc(n_strings, sizeof(*dst->strings));
+ dst->n_strings = n_strings;
+ i = 0;
+ parg.rsp_policy = &ethtool_strings_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == ETHTOOL_A_STRINGSET_STRINGS) {
+ parg.data = &dst->strings[i];
+ if (ethtool_strings_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_tunnel_udp_table_free(struct ethtool_tunnel_udp_table *obj)
+{
+ unsigned int i;
+
+ ethtool_bitset_free(&obj->types);
+ for (i = 0; i < obj->n_entry; i++)
+ ethtool_tunnel_udp_entry_free(&obj->entry[i]);
+ free(obj->entry);
+}
+
+int ethtool_tunnel_udp_table_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_tunnel_udp_table *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ unsigned int n_entry = 0;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->entry)
+ return ynl_error_parse(yarg, "attribute already present (tunnel-udp-table.entry)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.size = 1;
+ dst->size = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.types = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->types;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY) {
+ n_entry++;
+ }
+ }
+
+ if (n_entry) {
+ dst->entry = calloc(n_entry, sizeof(*dst->entry));
+ dst->n_entry = n_entry;
+ i = 0;
+ parg.rsp_policy = &ethtool_tunnel_udp_entry_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY) {
+ parg.data = &dst->entry[i];
+ if (ethtool_tunnel_udp_entry_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_stringsets_free(struct ethtool_stringsets *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_stringset; i++)
+ ethtool_stringset_free(&obj->stringset[i]);
+ free(obj->stringset);
+}
+
+int ethtool_stringsets_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct ethtool_stringsets *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ for (unsigned int i = 0; i < obj->n_stringset; i++)
+ ethtool_stringset_put(nlh, ETHTOOL_A_STRINGSETS_STRINGSET, &obj->stringset[i]);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+int ethtool_stringsets_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_stringsets *dst = yarg->data;
+ unsigned int n_stringset = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->stringset)
+ return ynl_error_parse(yarg, "attribute already present (stringsets.stringset)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_STRINGSETS_STRINGSET) {
+ n_stringset++;
+ }
+ }
+
+ if (n_stringset) {
+ dst->stringset = calloc(n_stringset, sizeof(*dst->stringset));
+ dst->n_stringset = n_stringset;
+ i = 0;
+ parg.rsp_policy = &ethtool_stringset_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == ETHTOOL_A_STRINGSETS_STRINGSET) {
+ parg.data = &dst->stringset[i];
+ if (ethtool_stringset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void ethtool_tunnel_udp_free(struct ethtool_tunnel_udp *obj)
+{
+ ethtool_tunnel_udp_table_free(&obj->table);
+}
+
+int ethtool_tunnel_udp_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct ethtool_tunnel_udp *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_TUNNEL_UDP_TABLE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.table = 1;
+
+ parg.rsp_policy = &ethtool_tunnel_udp_table_nest;
+ parg.data = &dst->table;
+ if (ethtool_tunnel_udp_table_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_STRSET_GET ============== */
+/* ETHTOOL_MSG_STRSET_GET - do */
+void ethtool_strset_get_req_free(struct ethtool_strset_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_stringsets_free(&req->stringsets);
+ free(req);
+}
+
+void ethtool_strset_get_rsp_free(struct ethtool_strset_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_stringsets_free(&rsp->stringsets);
+ free(rsp);
+}
+
+int ethtool_strset_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_strset_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_STRSET_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_STRSET_STRINGSETS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.stringsets = 1;
+
+ parg.rsp_policy = &ethtool_stringsets_nest;
+ parg.data = &dst->stringsets;
+ if (ethtool_stringsets_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_strset_get_rsp *
+ethtool_strset_get(struct ynl_sock *ys, struct ethtool_strset_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_strset_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_STRSET_GET, 1);
+ ys->req_policy = &ethtool_strset_nest;
+ yrs.yarg.rsp_policy = &ethtool_strset_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_STRSET_HEADER, &req->header);
+ if (req->_present.stringsets)
+ ethtool_stringsets_put(nlh, ETHTOOL_A_STRSET_STRINGSETS, &req->stringsets);
+ if (req->_present.counts_only)
+ mnl_attr_put(nlh, ETHTOOL_A_STRSET_COUNTS_ONLY, 0, NULL);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_strset_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_STRSET_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_strset_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_STRSET_GET - dump */
+void ethtool_strset_get_list_free(struct ethtool_strset_get_list *rsp)
+{
+ struct ethtool_strset_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_stringsets_free(&rsp->obj.stringsets);
+ free(rsp);
+ }
+}
+
+struct ethtool_strset_get_list *
+ethtool_strset_get_dump(struct ynl_sock *ys,
+ struct ethtool_strset_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_strset_get_list);
+ yds.cb = ethtool_strset_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_STRSET_GET;
+ yds.rsp_policy = &ethtool_strset_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_STRSET_GET, 1);
+ ys->req_policy = &ethtool_strset_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_STRSET_HEADER, &req->header);
+ if (req->_present.stringsets)
+ ethtool_stringsets_put(nlh, ETHTOOL_A_STRSET_STRINGSETS, &req->stringsets);
+ if (req->_present.counts_only)
+ mnl_attr_put(nlh, ETHTOOL_A_STRSET_COUNTS_ONLY, 0, NULL);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_strset_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_LINKINFO_GET ============== */
+/* ETHTOOL_MSG_LINKINFO_GET - do */
+void ethtool_linkinfo_get_req_free(struct ethtool_linkinfo_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_linkinfo_get_rsp_free(struct ethtool_linkinfo_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_linkinfo_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_linkinfo_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_LINKINFO_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_LINKINFO_PORT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port = 1;
+ dst->port = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKINFO_PHYADDR) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.phyaddr = 1;
+ dst->phyaddr = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKINFO_TP_MDIX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tp_mdix = 1;
+ dst->tp_mdix = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKINFO_TP_MDIX_CTRL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tp_mdix_ctrl = 1;
+ dst->tp_mdix_ctrl = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKINFO_TRANSCEIVER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.transceiver = 1;
+ dst->transceiver = mnl_attr_get_u8(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_linkinfo_get_rsp *
+ethtool_linkinfo_get(struct ynl_sock *ys, struct ethtool_linkinfo_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_linkinfo_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKINFO_GET, 1);
+ ys->req_policy = &ethtool_linkinfo_nest;
+ yrs.yarg.rsp_policy = &ethtool_linkinfo_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKINFO_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_linkinfo_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_LINKINFO_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_linkinfo_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_LINKINFO_GET - dump */
+void ethtool_linkinfo_get_list_free(struct ethtool_linkinfo_get_list *rsp)
+{
+ struct ethtool_linkinfo_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_linkinfo_get_list *
+ethtool_linkinfo_get_dump(struct ynl_sock *ys,
+ struct ethtool_linkinfo_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_linkinfo_get_list);
+ yds.cb = ethtool_linkinfo_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_LINKINFO_GET;
+ yds.rsp_policy = &ethtool_linkinfo_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_LINKINFO_GET, 1);
+ ys->req_policy = &ethtool_linkinfo_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKINFO_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_linkinfo_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_LINKINFO_GET - notify */
+void ethtool_linkinfo_get_ntf_free(struct ethtool_linkinfo_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_LINKINFO_SET ============== */
+/* ETHTOOL_MSG_LINKINFO_SET - do */
+void ethtool_linkinfo_set_req_free(struct ethtool_linkinfo_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_linkinfo_set(struct ynl_sock *ys,
+ struct ethtool_linkinfo_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKINFO_SET, 1);
+ ys->req_policy = &ethtool_linkinfo_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKINFO_HEADER, &req->header);
+ if (req->_present.port)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_PORT, req->port);
+ if (req->_present.phyaddr)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_PHYADDR, req->phyaddr);
+ if (req->_present.tp_mdix)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TP_MDIX, req->tp_mdix);
+ if (req->_present.tp_mdix_ctrl)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TP_MDIX_CTRL, req->tp_mdix_ctrl);
+ if (req->_present.transceiver)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TRANSCEIVER, req->transceiver);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_LINKMODES_GET ============== */
+/* ETHTOOL_MSG_LINKMODES_GET - do */
+void ethtool_linkmodes_get_req_free(struct ethtool_linkmodes_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_linkmodes_get_rsp_free(struct ethtool_linkmodes_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->ours);
+ ethtool_bitset_free(&rsp->peer);
+ free(rsp);
+}
+
+int ethtool_linkmodes_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_linkmodes_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_LINKMODES_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_LINKMODES_AUTONEG) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.autoneg = 1;
+ dst->autoneg = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKMODES_OURS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ours = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->ours;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_LINKMODES_PEER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.peer = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->peer;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_LINKMODES_SPEED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.speed = 1;
+ dst->speed = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_LINKMODES_DUPLEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.duplex = 1;
+ dst->duplex = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.master_slave_cfg = 1;
+ dst->master_slave_cfg = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.master_slave_state = 1;
+ dst->master_slave_state = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKMODES_LANES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.lanes = 1;
+ dst->lanes = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_LINKMODES_RATE_MATCHING) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rate_matching = 1;
+ dst->rate_matching = mnl_attr_get_u8(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_linkmodes_get_rsp *
+ethtool_linkmodes_get(struct ynl_sock *ys,
+ struct ethtool_linkmodes_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_linkmodes_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKMODES_GET, 1);
+ ys->req_policy = &ethtool_linkmodes_nest;
+ yrs.yarg.rsp_policy = &ethtool_linkmodes_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKMODES_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_linkmodes_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_LINKMODES_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_linkmodes_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_LINKMODES_GET - dump */
+void ethtool_linkmodes_get_list_free(struct ethtool_linkmodes_get_list *rsp)
+{
+ struct ethtool_linkmodes_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.ours);
+ ethtool_bitset_free(&rsp->obj.peer);
+ free(rsp);
+ }
+}
+
+struct ethtool_linkmodes_get_list *
+ethtool_linkmodes_get_dump(struct ynl_sock *ys,
+ struct ethtool_linkmodes_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_linkmodes_get_list);
+ yds.cb = ethtool_linkmodes_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_LINKMODES_GET;
+ yds.rsp_policy = &ethtool_linkmodes_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_LINKMODES_GET, 1);
+ ys->req_policy = &ethtool_linkmodes_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKMODES_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_linkmodes_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_LINKMODES_GET - notify */
+void ethtool_linkmodes_get_ntf_free(struct ethtool_linkmodes_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.ours);
+ ethtool_bitset_free(&rsp->obj.peer);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_LINKMODES_SET ============== */
+/* ETHTOOL_MSG_LINKMODES_SET - do */
+void ethtool_linkmodes_set_req_free(struct ethtool_linkmodes_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_bitset_free(&req->ours);
+ ethtool_bitset_free(&req->peer);
+ free(req);
+}
+
+int ethtool_linkmodes_set(struct ynl_sock *ys,
+ struct ethtool_linkmodes_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKMODES_SET, 1);
+ ys->req_policy = &ethtool_linkmodes_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKMODES_HEADER, &req->header);
+ if (req->_present.autoneg)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_AUTONEG, req->autoneg);
+ if (req->_present.ours)
+ ethtool_bitset_put(nlh, ETHTOOL_A_LINKMODES_OURS, &req->ours);
+ if (req->_present.peer)
+ ethtool_bitset_put(nlh, ETHTOOL_A_LINKMODES_PEER, &req->peer);
+ if (req->_present.speed)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_LINKMODES_SPEED, req->speed);
+ if (req->_present.duplex)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_DUPLEX, req->duplex);
+ if (req->_present.master_slave_cfg)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, req->master_slave_cfg);
+ if (req->_present.master_slave_state)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, req->master_slave_state);
+ if (req->_present.lanes)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_LINKMODES_LANES, req->lanes);
+ if (req->_present.rate_matching)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_RATE_MATCHING, req->rate_matching);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_LINKSTATE_GET ============== */
+/* ETHTOOL_MSG_LINKSTATE_GET - do */
+void ethtool_linkstate_get_req_free(struct ethtool_linkstate_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_linkstate_get_rsp_free(struct ethtool_linkstate_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_linkstate_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_linkstate_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_LINKSTATE_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_LINKSTATE_LINK) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.link = 1;
+ dst->link = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKSTATE_SQI) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sqi = 1;
+ dst->sqi = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_LINKSTATE_SQI_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sqi_max = 1;
+ dst->sqi_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_LINKSTATE_EXT_STATE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ext_state = 1;
+ dst->ext_state = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKSTATE_EXT_SUBSTATE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ext_substate = 1;
+ dst->ext_substate = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ext_down_cnt = 1;
+ dst->ext_down_cnt = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_linkstate_get_rsp *
+ethtool_linkstate_get(struct ynl_sock *ys,
+ struct ethtool_linkstate_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_linkstate_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKSTATE_GET, 1);
+ ys->req_policy = &ethtool_linkstate_nest;
+ yrs.yarg.rsp_policy = &ethtool_linkstate_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKSTATE_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_linkstate_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_LINKSTATE_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_linkstate_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_LINKSTATE_GET - dump */
+void ethtool_linkstate_get_list_free(struct ethtool_linkstate_get_list *rsp)
+{
+ struct ethtool_linkstate_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_linkstate_get_list *
+ethtool_linkstate_get_dump(struct ynl_sock *ys,
+ struct ethtool_linkstate_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_linkstate_get_list);
+ yds.cb = ethtool_linkstate_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_LINKSTATE_GET;
+ yds.rsp_policy = &ethtool_linkstate_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_LINKSTATE_GET, 1);
+ ys->req_policy = &ethtool_linkstate_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_LINKSTATE_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_linkstate_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_DEBUG_GET ============== */
+/* ETHTOOL_MSG_DEBUG_GET - do */
+void ethtool_debug_get_req_free(struct ethtool_debug_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_debug_get_rsp_free(struct ethtool_debug_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->msgmask);
+ free(rsp);
+}
+
+int ethtool_debug_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_debug_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_DEBUG_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_DEBUG_MSGMASK) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.msgmask = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->msgmask;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_debug_get_rsp *
+ethtool_debug_get(struct ynl_sock *ys, struct ethtool_debug_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_debug_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_DEBUG_GET, 1);
+ ys->req_policy = &ethtool_debug_nest;
+ yrs.yarg.rsp_policy = &ethtool_debug_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_DEBUG_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_debug_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_DEBUG_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_debug_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_DEBUG_GET - dump */
+void ethtool_debug_get_list_free(struct ethtool_debug_get_list *rsp)
+{
+ struct ethtool_debug_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.msgmask);
+ free(rsp);
+ }
+}
+
+struct ethtool_debug_get_list *
+ethtool_debug_get_dump(struct ynl_sock *ys,
+ struct ethtool_debug_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_debug_get_list);
+ yds.cb = ethtool_debug_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_DEBUG_GET;
+ yds.rsp_policy = &ethtool_debug_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_DEBUG_GET, 1);
+ ys->req_policy = &ethtool_debug_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_DEBUG_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_debug_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_DEBUG_GET - notify */
+void ethtool_debug_get_ntf_free(struct ethtool_debug_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.msgmask);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_DEBUG_SET ============== */
+/* ETHTOOL_MSG_DEBUG_SET - do */
+void ethtool_debug_set_req_free(struct ethtool_debug_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_bitset_free(&req->msgmask);
+ free(req);
+}
+
+int ethtool_debug_set(struct ynl_sock *ys, struct ethtool_debug_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_DEBUG_SET, 1);
+ ys->req_policy = &ethtool_debug_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_DEBUG_HEADER, &req->header);
+ if (req->_present.msgmask)
+ ethtool_bitset_put(nlh, ETHTOOL_A_DEBUG_MSGMASK, &req->msgmask);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_WOL_GET ============== */
+/* ETHTOOL_MSG_WOL_GET - do */
+void ethtool_wol_get_req_free(struct ethtool_wol_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_wol_get_rsp_free(struct ethtool_wol_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->modes);
+ free(rsp->sopass);
+ free(rsp);
+}
+
+int ethtool_wol_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct ethtool_wol_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_WOL_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_WOL_MODES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.modes = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->modes;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_WOL_SOPASS) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.sopass_len = len;
+ dst->sopass = malloc(len);
+ memcpy(dst->sopass, mnl_attr_get_payload(attr), len);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_wol_get_rsp *
+ethtool_wol_get(struct ynl_sock *ys, struct ethtool_wol_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_wol_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_WOL_GET, 1);
+ ys->req_policy = &ethtool_wol_nest;
+ yrs.yarg.rsp_policy = &ethtool_wol_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_WOL_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_wol_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_WOL_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_wol_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_WOL_GET - dump */
+void ethtool_wol_get_list_free(struct ethtool_wol_get_list *rsp)
+{
+ struct ethtool_wol_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.modes);
+ free(rsp->obj.sopass);
+ free(rsp);
+ }
+}
+
+struct ethtool_wol_get_list *
+ethtool_wol_get_dump(struct ynl_sock *ys, struct ethtool_wol_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_wol_get_list);
+ yds.cb = ethtool_wol_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_WOL_GET;
+ yds.rsp_policy = &ethtool_wol_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_WOL_GET, 1);
+ ys->req_policy = &ethtool_wol_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_WOL_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_wol_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_WOL_GET - notify */
+void ethtool_wol_get_ntf_free(struct ethtool_wol_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.modes);
+ free(rsp->obj.sopass);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_WOL_SET ============== */
+/* ETHTOOL_MSG_WOL_SET - do */
+void ethtool_wol_set_req_free(struct ethtool_wol_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_bitset_free(&req->modes);
+ free(req->sopass);
+ free(req);
+}
+
+int ethtool_wol_set(struct ynl_sock *ys, struct ethtool_wol_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_WOL_SET, 1);
+ ys->req_policy = &ethtool_wol_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_WOL_HEADER, &req->header);
+ if (req->_present.modes)
+ ethtool_bitset_put(nlh, ETHTOOL_A_WOL_MODES, &req->modes);
+ if (req->_present.sopass_len)
+ mnl_attr_put(nlh, ETHTOOL_A_WOL_SOPASS, req->_present.sopass_len, req->sopass);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_FEATURES_GET ============== */
+/* ETHTOOL_MSG_FEATURES_GET - do */
+void ethtool_features_get_req_free(struct ethtool_features_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_features_get_rsp_free(struct ethtool_features_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->hw);
+ ethtool_bitset_free(&rsp->wanted);
+ ethtool_bitset_free(&rsp->active);
+ ethtool_bitset_free(&rsp->nochange);
+ free(rsp);
+}
+
+int ethtool_features_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_features_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_FEATURES_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_HW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.hw = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->hw;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_WANTED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.wanted = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->wanted;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_ACTIVE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.active = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->active;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_NOCHANGE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.nochange = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->nochange;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_features_get_rsp *
+ethtool_features_get(struct ynl_sock *ys, struct ethtool_features_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_features_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEATURES_GET, 1);
+ ys->req_policy = &ethtool_features_nest;
+ yrs.yarg.rsp_policy = &ethtool_features_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_FEATURES_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_features_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_FEATURES_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_features_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_FEATURES_GET - dump */
+void ethtool_features_get_list_free(struct ethtool_features_get_list *rsp)
+{
+ struct ethtool_features_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.hw);
+ ethtool_bitset_free(&rsp->obj.wanted);
+ ethtool_bitset_free(&rsp->obj.active);
+ ethtool_bitset_free(&rsp->obj.nochange);
+ free(rsp);
+ }
+}
+
+struct ethtool_features_get_list *
+ethtool_features_get_dump(struct ynl_sock *ys,
+ struct ethtool_features_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_features_get_list);
+ yds.cb = ethtool_features_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_FEATURES_GET;
+ yds.rsp_policy = &ethtool_features_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_FEATURES_GET, 1);
+ ys->req_policy = &ethtool_features_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_FEATURES_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_features_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_FEATURES_GET - notify */
+void ethtool_features_get_ntf_free(struct ethtool_features_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.hw);
+ ethtool_bitset_free(&rsp->obj.wanted);
+ ethtool_bitset_free(&rsp->obj.active);
+ ethtool_bitset_free(&rsp->obj.nochange);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_FEATURES_SET ============== */
+/* ETHTOOL_MSG_FEATURES_SET - do */
+void ethtool_features_set_req_free(struct ethtool_features_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_bitset_free(&req->hw);
+ ethtool_bitset_free(&req->wanted);
+ ethtool_bitset_free(&req->active);
+ ethtool_bitset_free(&req->nochange);
+ free(req);
+}
+
+void ethtool_features_set_rsp_free(struct ethtool_features_set_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->hw);
+ ethtool_bitset_free(&rsp->wanted);
+ ethtool_bitset_free(&rsp->active);
+ ethtool_bitset_free(&rsp->nochange);
+ free(rsp);
+}
+
+int ethtool_features_set_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_features_set_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_FEATURES_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_HW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.hw = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->hw;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_WANTED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.wanted = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->wanted;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_ACTIVE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.active = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->active;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEATURES_NOCHANGE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.nochange = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->nochange;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_features_set_rsp *
+ethtool_features_set(struct ynl_sock *ys, struct ethtool_features_set_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_features_set_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEATURES_SET, 1);
+ ys->req_policy = &ethtool_features_nest;
+ yrs.yarg.rsp_policy = &ethtool_features_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_FEATURES_HEADER, &req->header);
+ if (req->_present.hw)
+ ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_HW, &req->hw);
+ if (req->_present.wanted)
+ ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_WANTED, &req->wanted);
+ if (req->_present.active)
+ ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_ACTIVE, &req->active);
+ if (req->_present.nochange)
+ ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_NOCHANGE, &req->nochange);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_features_set_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_FEATURES_SET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_features_set_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_PRIVFLAGS_GET ============== */
+/* ETHTOOL_MSG_PRIVFLAGS_GET - do */
+void ethtool_privflags_get_req_free(struct ethtool_privflags_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_privflags_get_rsp_free(struct ethtool_privflags_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->flags);
+ free(rsp);
+}
+
+int ethtool_privflags_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_privflags_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_PRIVFLAGS_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_PRIVFLAGS_FLAGS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.flags = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->flags;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_privflags_get_rsp *
+ethtool_privflags_get(struct ynl_sock *ys,
+ struct ethtool_privflags_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_privflags_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PRIVFLAGS_GET, 1);
+ ys->req_policy = &ethtool_privflags_nest;
+ yrs.yarg.rsp_policy = &ethtool_privflags_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PRIVFLAGS_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_privflags_get_rsp_parse;
+ yrs.rsp_cmd = 14;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_privflags_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PRIVFLAGS_GET - dump */
+void ethtool_privflags_get_list_free(struct ethtool_privflags_get_list *rsp)
+{
+ struct ethtool_privflags_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.flags);
+ free(rsp);
+ }
+}
+
+struct ethtool_privflags_get_list *
+ethtool_privflags_get_dump(struct ynl_sock *ys,
+ struct ethtool_privflags_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_privflags_get_list);
+ yds.cb = ethtool_privflags_get_rsp_parse;
+ yds.rsp_cmd = 14;
+ yds.rsp_policy = &ethtool_privflags_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PRIVFLAGS_GET, 1);
+ ys->req_policy = &ethtool_privflags_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PRIVFLAGS_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_privflags_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PRIVFLAGS_GET - notify */
+void ethtool_privflags_get_ntf_free(struct ethtool_privflags_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.flags);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_PRIVFLAGS_SET ============== */
+/* ETHTOOL_MSG_PRIVFLAGS_SET - do */
+void ethtool_privflags_set_req_free(struct ethtool_privflags_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_bitset_free(&req->flags);
+ free(req);
+}
+
+int ethtool_privflags_set(struct ynl_sock *ys,
+ struct ethtool_privflags_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PRIVFLAGS_SET, 1);
+ ys->req_policy = &ethtool_privflags_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PRIVFLAGS_HEADER, &req->header);
+ if (req->_present.flags)
+ ethtool_bitset_put(nlh, ETHTOOL_A_PRIVFLAGS_FLAGS, &req->flags);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_RINGS_GET ============== */
+/* ETHTOOL_MSG_RINGS_GET - do */
+void ethtool_rings_get_req_free(struct ethtool_rings_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_rings_get_rsp_free(struct ethtool_rings_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_rings_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_rings_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_RINGS_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_RINGS_RX_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_max = 1;
+ dst->rx_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_RX_MINI_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_mini_max = 1;
+ dst->rx_mini_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_RX_JUMBO_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_jumbo_max = 1;
+ dst->rx_jumbo_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_TX_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_max = 1;
+ dst->tx_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_RX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx = 1;
+ dst->rx = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_RX_MINI) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_mini = 1;
+ dst->rx_mini = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_RX_JUMBO) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_jumbo = 1;
+ dst->rx_jumbo = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_TX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx = 1;
+ dst->tx = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_RX_BUF_LEN) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_buf_len = 1;
+ dst->rx_buf_len = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_TCP_DATA_SPLIT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tcp_data_split = 1;
+ dst->tcp_data_split = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_RINGS_CQE_SIZE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.cqe_size = 1;
+ dst->cqe_size = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_TX_PUSH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_push = 1;
+ dst->tx_push = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_RINGS_RX_PUSH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_push = 1;
+ dst->rx_push = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_push_buf_len = 1;
+ dst->tx_push_buf_len = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_push_buf_len_max = 1;
+ dst->tx_push_buf_len_max = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_rings_get_rsp *
+ethtool_rings_get(struct ynl_sock *ys, struct ethtool_rings_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_rings_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_RINGS_GET, 1);
+ ys->req_policy = &ethtool_rings_nest;
+ yrs.yarg.rsp_policy = &ethtool_rings_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_RINGS_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_rings_get_rsp_parse;
+ yrs.rsp_cmd = 16;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_rings_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_RINGS_GET - dump */
+void ethtool_rings_get_list_free(struct ethtool_rings_get_list *rsp)
+{
+ struct ethtool_rings_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_rings_get_list *
+ethtool_rings_get_dump(struct ynl_sock *ys,
+ struct ethtool_rings_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_rings_get_list);
+ yds.cb = ethtool_rings_get_rsp_parse;
+ yds.rsp_cmd = 16;
+ yds.rsp_policy = &ethtool_rings_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_RINGS_GET, 1);
+ ys->req_policy = &ethtool_rings_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_RINGS_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_rings_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_RINGS_GET - notify */
+void ethtool_rings_get_ntf_free(struct ethtool_rings_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_RINGS_SET ============== */
+/* ETHTOOL_MSG_RINGS_SET - do */
+void ethtool_rings_set_req_free(struct ethtool_rings_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_rings_set(struct ynl_sock *ys, struct ethtool_rings_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_RINGS_SET, 1);
+ ys->req_policy = &ethtool_rings_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_RINGS_HEADER, &req->header);
+ if (req->_present.rx_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_MAX, req->rx_max);
+ if (req->_present.rx_mini_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_MINI_MAX, req->rx_mini_max);
+ if (req->_present.rx_jumbo_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_JUMBO_MAX, req->rx_jumbo_max);
+ if (req->_present.tx_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_MAX, req->tx_max);
+ if (req->_present.rx)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX, req->rx);
+ if (req->_present.rx_mini)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_MINI, req->rx_mini);
+ if (req->_present.rx_jumbo)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_JUMBO, req->rx_jumbo);
+ if (req->_present.tx)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX, req->tx);
+ if (req->_present.rx_buf_len)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_BUF_LEN, req->rx_buf_len);
+ if (req->_present.tcp_data_split)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, req->tcp_data_split);
+ if (req->_present.cqe_size)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_CQE_SIZE, req->cqe_size);
+ if (req->_present.tx_push)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_RINGS_TX_PUSH, req->tx_push);
+ if (req->_present.rx_push)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_RINGS_RX_PUSH, req->rx_push);
+ if (req->_present.tx_push_buf_len)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, req->tx_push_buf_len);
+ if (req->_present.tx_push_buf_len_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, req->tx_push_buf_len_max);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_CHANNELS_GET ============== */
+/* ETHTOOL_MSG_CHANNELS_GET - do */
+void ethtool_channels_get_req_free(struct ethtool_channels_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_channels_get_rsp_free(struct ethtool_channels_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_channels_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_channels_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_CHANNELS_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_CHANNELS_RX_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_max = 1;
+ dst->rx_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_CHANNELS_TX_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_max = 1;
+ dst->tx_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_CHANNELS_OTHER_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.other_max = 1;
+ dst->other_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_CHANNELS_COMBINED_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.combined_max = 1;
+ dst->combined_max = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_CHANNELS_RX_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_count = 1;
+ dst->rx_count = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_CHANNELS_TX_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_count = 1;
+ dst->tx_count = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_CHANNELS_OTHER_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.other_count = 1;
+ dst->other_count = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_CHANNELS_COMBINED_COUNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.combined_count = 1;
+ dst->combined_count = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_channels_get_rsp *
+ethtool_channels_get(struct ynl_sock *ys, struct ethtool_channels_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_channels_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CHANNELS_GET, 1);
+ ys->req_policy = &ethtool_channels_nest;
+ yrs.yarg.rsp_policy = &ethtool_channels_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_CHANNELS_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_channels_get_rsp_parse;
+ yrs.rsp_cmd = 18;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_channels_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_CHANNELS_GET - dump */
+void ethtool_channels_get_list_free(struct ethtool_channels_get_list *rsp)
+{
+ struct ethtool_channels_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_channels_get_list *
+ethtool_channels_get_dump(struct ynl_sock *ys,
+ struct ethtool_channels_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_channels_get_list);
+ yds.cb = ethtool_channels_get_rsp_parse;
+ yds.rsp_cmd = 18;
+ yds.rsp_policy = &ethtool_channels_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_CHANNELS_GET, 1);
+ ys->req_policy = &ethtool_channels_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_CHANNELS_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_channels_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_CHANNELS_GET - notify */
+void ethtool_channels_get_ntf_free(struct ethtool_channels_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_CHANNELS_SET ============== */
+/* ETHTOOL_MSG_CHANNELS_SET - do */
+void ethtool_channels_set_req_free(struct ethtool_channels_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_channels_set(struct ynl_sock *ys,
+ struct ethtool_channels_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CHANNELS_SET, 1);
+ ys->req_policy = &ethtool_channels_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_CHANNELS_HEADER, &req->header);
+ if (req->_present.rx_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_RX_MAX, req->rx_max);
+ if (req->_present.tx_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_TX_MAX, req->tx_max);
+ if (req->_present.other_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_OTHER_MAX, req->other_max);
+ if (req->_present.combined_max)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_COMBINED_MAX, req->combined_max);
+ if (req->_present.rx_count)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_RX_COUNT, req->rx_count);
+ if (req->_present.tx_count)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_TX_COUNT, req->tx_count);
+ if (req->_present.other_count)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_OTHER_COUNT, req->other_count);
+ if (req->_present.combined_count)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_COMBINED_COUNT, req->combined_count);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_COALESCE_GET ============== */
+/* ETHTOOL_MSG_COALESCE_GET - do */
+void ethtool_coalesce_get_req_free(struct ethtool_coalesce_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_coalesce_get_rsp_free(struct ethtool_coalesce_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_coalesce_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_coalesce_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_COALESCE_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_COALESCE_RX_USECS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_usecs = 1;
+ dst->rx_usecs = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_max_frames = 1;
+ dst->rx_max_frames = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RX_USECS_IRQ) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_usecs_irq = 1;
+ dst->rx_usecs_irq = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_max_frames_irq = 1;
+ dst->rx_max_frames_irq = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_USECS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_usecs = 1;
+ dst->tx_usecs = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_max_frames = 1;
+ dst->tx_max_frames = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_USECS_IRQ) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_usecs_irq = 1;
+ dst->tx_usecs_irq = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_max_frames_irq = 1;
+ dst->tx_max_frames_irq = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_STATS_BLOCK_USECS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.stats_block_usecs = 1;
+ dst->stats_block_usecs = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.use_adaptive_rx = 1;
+ dst->use_adaptive_rx = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.use_adaptive_tx = 1;
+ dst->use_adaptive_tx = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_COALESCE_PKT_RATE_LOW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.pkt_rate_low = 1;
+ dst->pkt_rate_low = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RX_USECS_LOW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_usecs_low = 1;
+ dst->rx_usecs_low = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_max_frames_low = 1;
+ dst->rx_max_frames_low = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_USECS_LOW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_usecs_low = 1;
+ dst->tx_usecs_low = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_max_frames_low = 1;
+ dst->tx_max_frames_low = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_PKT_RATE_HIGH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.pkt_rate_high = 1;
+ dst->pkt_rate_high = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RX_USECS_HIGH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_usecs_high = 1;
+ dst->rx_usecs_high = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_max_frames_high = 1;
+ dst->rx_max_frames_high = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_USECS_HIGH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_usecs_high = 1;
+ dst->tx_usecs_high = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_max_frames_high = 1;
+ dst->tx_max_frames_high = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rate_sample_interval = 1;
+ dst->rate_sample_interval = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_USE_CQE_MODE_TX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.use_cqe_mode_tx = 1;
+ dst->use_cqe_mode_tx = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_COALESCE_USE_CQE_MODE_RX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.use_cqe_mode_rx = 1;
+ dst->use_cqe_mode_rx = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_aggr_max_bytes = 1;
+ dst->tx_aggr_max_bytes = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_aggr_max_frames = 1;
+ dst->tx_aggr_max_frames = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_aggr_time_usecs = 1;
+ dst->tx_aggr_time_usecs = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_coalesce_get_rsp *
+ethtool_coalesce_get(struct ynl_sock *ys, struct ethtool_coalesce_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_coalesce_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_COALESCE_GET, 1);
+ ys->req_policy = &ethtool_coalesce_nest;
+ yrs.yarg.rsp_policy = &ethtool_coalesce_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_COALESCE_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_coalesce_get_rsp_parse;
+ yrs.rsp_cmd = 20;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_coalesce_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_COALESCE_GET - dump */
+void ethtool_coalesce_get_list_free(struct ethtool_coalesce_get_list *rsp)
+{
+ struct ethtool_coalesce_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_coalesce_get_list *
+ethtool_coalesce_get_dump(struct ynl_sock *ys,
+ struct ethtool_coalesce_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_coalesce_get_list);
+ yds.cb = ethtool_coalesce_get_rsp_parse;
+ yds.rsp_cmd = 20;
+ yds.rsp_policy = &ethtool_coalesce_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_COALESCE_GET, 1);
+ ys->req_policy = &ethtool_coalesce_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_COALESCE_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_coalesce_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_COALESCE_GET - notify */
+void ethtool_coalesce_get_ntf_free(struct ethtool_coalesce_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_COALESCE_SET ============== */
+/* ETHTOOL_MSG_COALESCE_SET - do */
+void ethtool_coalesce_set_req_free(struct ethtool_coalesce_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_coalesce_set(struct ynl_sock *ys,
+ struct ethtool_coalesce_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_COALESCE_SET, 1);
+ ys->req_policy = &ethtool_coalesce_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_COALESCE_HEADER, &req->header);
+ if (req->_present.rx_usecs)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS, req->rx_usecs);
+ if (req->_present.rx_max_frames)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES, req->rx_max_frames);
+ if (req->_present.rx_usecs_irq)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS_IRQ, req->rx_usecs_irq);
+ if (req->_present.rx_max_frames_irq)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, req->rx_max_frames_irq);
+ if (req->_present.tx_usecs)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS, req->tx_usecs);
+ if (req->_present.tx_max_frames)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES, req->tx_max_frames);
+ if (req->_present.tx_usecs_irq)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS_IRQ, req->tx_usecs_irq);
+ if (req->_present.tx_max_frames_irq)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, req->tx_max_frames_irq);
+ if (req->_present.stats_block_usecs)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, req->stats_block_usecs);
+ if (req->_present.use_adaptive_rx)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, req->use_adaptive_rx);
+ if (req->_present.use_adaptive_tx)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, req->use_adaptive_tx);
+ if (req->_present.pkt_rate_low)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_PKT_RATE_LOW, req->pkt_rate_low);
+ if (req->_present.rx_usecs_low)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS_LOW, req->rx_usecs_low);
+ if (req->_present.rx_max_frames_low)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, req->rx_max_frames_low);
+ if (req->_present.tx_usecs_low)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS_LOW, req->tx_usecs_low);
+ if (req->_present.tx_max_frames_low)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, req->tx_max_frames_low);
+ if (req->_present.pkt_rate_high)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_PKT_RATE_HIGH, req->pkt_rate_high);
+ if (req->_present.rx_usecs_high)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS_HIGH, req->rx_usecs_high);
+ if (req->_present.rx_max_frames_high)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, req->rx_max_frames_high);
+ if (req->_present.tx_usecs_high)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS_HIGH, req->tx_usecs_high);
+ if (req->_present.tx_max_frames_high)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, req->tx_max_frames_high);
+ if (req->_present.rate_sample_interval)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, req->rate_sample_interval);
+ if (req->_present.use_cqe_mode_tx)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, req->use_cqe_mode_tx);
+ if (req->_present.use_cqe_mode_rx)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, req->use_cqe_mode_rx);
+ if (req->_present.tx_aggr_max_bytes)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, req->tx_aggr_max_bytes);
+ if (req->_present.tx_aggr_max_frames)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, req->tx_aggr_max_frames);
+ if (req->_present.tx_aggr_time_usecs)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, req->tx_aggr_time_usecs);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_PAUSE_GET ============== */
+/* ETHTOOL_MSG_PAUSE_GET - do */
+void ethtool_pause_get_req_free(struct ethtool_pause_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_pause_get_rsp_free(struct ethtool_pause_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_pause_stat_free(&rsp->stats);
+ free(rsp);
+}
+
+int ethtool_pause_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_pause_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_PAUSE_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_PAUSE_AUTONEG) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.autoneg = 1;
+ dst->autoneg = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_PAUSE_RX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx = 1;
+ dst->rx = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_PAUSE_TX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx = 1;
+ dst->tx = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_PAUSE_STATS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.stats = 1;
+
+ parg.rsp_policy = &ethtool_pause_stat_nest;
+ parg.data = &dst->stats;
+ if (ethtool_pause_stat_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_PAUSE_STATS_SRC) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.stats_src = 1;
+ dst->stats_src = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_pause_get_rsp *
+ethtool_pause_get(struct ynl_sock *ys, struct ethtool_pause_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_pause_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PAUSE_GET, 1);
+ ys->req_policy = &ethtool_pause_nest;
+ yrs.yarg.rsp_policy = &ethtool_pause_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PAUSE_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_pause_get_rsp_parse;
+ yrs.rsp_cmd = 22;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_pause_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PAUSE_GET - dump */
+void ethtool_pause_get_list_free(struct ethtool_pause_get_list *rsp)
+{
+ struct ethtool_pause_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_pause_stat_free(&rsp->obj.stats);
+ free(rsp);
+ }
+}
+
+struct ethtool_pause_get_list *
+ethtool_pause_get_dump(struct ynl_sock *ys,
+ struct ethtool_pause_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_pause_get_list);
+ yds.cb = ethtool_pause_get_rsp_parse;
+ yds.rsp_cmd = 22;
+ yds.rsp_policy = &ethtool_pause_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PAUSE_GET, 1);
+ ys->req_policy = &ethtool_pause_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PAUSE_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_pause_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PAUSE_GET - notify */
+void ethtool_pause_get_ntf_free(struct ethtool_pause_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_pause_stat_free(&rsp->obj.stats);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_PAUSE_SET ============== */
+/* ETHTOOL_MSG_PAUSE_SET - do */
+void ethtool_pause_set_req_free(struct ethtool_pause_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_pause_stat_free(&req->stats);
+ free(req);
+}
+
+int ethtool_pause_set(struct ynl_sock *ys, struct ethtool_pause_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PAUSE_SET, 1);
+ ys->req_policy = &ethtool_pause_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PAUSE_HEADER, &req->header);
+ if (req->_present.autoneg)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_PAUSE_AUTONEG, req->autoneg);
+ if (req->_present.rx)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_PAUSE_RX, req->rx);
+ if (req->_present.tx)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_PAUSE_TX, req->tx);
+ if (req->_present.stats)
+ ethtool_pause_stat_put(nlh, ETHTOOL_A_PAUSE_STATS, &req->stats);
+ if (req->_present.stats_src)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PAUSE_STATS_SRC, req->stats_src);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_EEE_GET ============== */
+/* ETHTOOL_MSG_EEE_GET - do */
+void ethtool_eee_get_req_free(struct ethtool_eee_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_eee_get_rsp_free(struct ethtool_eee_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->modes_ours);
+ ethtool_bitset_free(&rsp->modes_peer);
+ free(rsp);
+}
+
+int ethtool_eee_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct ethtool_eee_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_EEE_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_EEE_MODES_OURS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.modes_ours = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->modes_ours;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_EEE_MODES_PEER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.modes_peer = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->modes_peer;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_EEE_ACTIVE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.active = 1;
+ dst->active = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_EEE_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.enabled = 1;
+ dst->enabled = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_EEE_TX_LPI_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_lpi_enabled = 1;
+ dst->tx_lpi_enabled = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_EEE_TX_LPI_TIMER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_lpi_timer = 1;
+ dst->tx_lpi_timer = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_eee_get_rsp *
+ethtool_eee_get(struct ynl_sock *ys, struct ethtool_eee_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_eee_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_EEE_GET, 1);
+ ys->req_policy = &ethtool_eee_nest;
+ yrs.yarg.rsp_policy = &ethtool_eee_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_EEE_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_eee_get_rsp_parse;
+ yrs.rsp_cmd = 24;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_eee_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_EEE_GET - dump */
+void ethtool_eee_get_list_free(struct ethtool_eee_get_list *rsp)
+{
+ struct ethtool_eee_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.modes_ours);
+ ethtool_bitset_free(&rsp->obj.modes_peer);
+ free(rsp);
+ }
+}
+
+struct ethtool_eee_get_list *
+ethtool_eee_get_dump(struct ynl_sock *ys, struct ethtool_eee_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_eee_get_list);
+ yds.cb = ethtool_eee_get_rsp_parse;
+ yds.rsp_cmd = 24;
+ yds.rsp_policy = &ethtool_eee_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_EEE_GET, 1);
+ ys->req_policy = &ethtool_eee_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_EEE_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_eee_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_EEE_GET - notify */
+void ethtool_eee_get_ntf_free(struct ethtool_eee_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.modes_ours);
+ ethtool_bitset_free(&rsp->obj.modes_peer);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_EEE_SET ============== */
+/* ETHTOOL_MSG_EEE_SET - do */
+void ethtool_eee_set_req_free(struct ethtool_eee_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_bitset_free(&req->modes_ours);
+ ethtool_bitset_free(&req->modes_peer);
+ free(req);
+}
+
+int ethtool_eee_set(struct ynl_sock *ys, struct ethtool_eee_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_EEE_SET, 1);
+ ys->req_policy = &ethtool_eee_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_EEE_HEADER, &req->header);
+ if (req->_present.modes_ours)
+ ethtool_bitset_put(nlh, ETHTOOL_A_EEE_MODES_OURS, &req->modes_ours);
+ if (req->_present.modes_peer)
+ ethtool_bitset_put(nlh, ETHTOOL_A_EEE_MODES_PEER, &req->modes_peer);
+ if (req->_present.active)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_EEE_ACTIVE, req->active);
+ if (req->_present.enabled)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_EEE_ENABLED, req->enabled);
+ if (req->_present.tx_lpi_enabled)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_EEE_TX_LPI_ENABLED, req->tx_lpi_enabled);
+ if (req->_present.tx_lpi_timer)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_EEE_TX_LPI_TIMER, req->tx_lpi_timer);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_TSINFO_GET ============== */
+/* ETHTOOL_MSG_TSINFO_GET - do */
+void ethtool_tsinfo_get_req_free(struct ethtool_tsinfo_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_tsinfo_get_rsp_free(struct ethtool_tsinfo_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->timestamping);
+ ethtool_bitset_free(&rsp->tx_types);
+ ethtool_bitset_free(&rsp->rx_filters);
+ free(rsp);
+}
+
+int ethtool_tsinfo_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_tsinfo_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_TSINFO_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_TSINFO_TIMESTAMPING) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.timestamping = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->timestamping;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_TSINFO_TX_TYPES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_types = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->tx_types;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_TSINFO_RX_FILTERS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_filters = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->rx_filters;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_TSINFO_PHC_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.phc_index = 1;
+ dst->phc_index = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_tsinfo_get_rsp *
+ethtool_tsinfo_get(struct ynl_sock *ys, struct ethtool_tsinfo_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_tsinfo_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_TSINFO_GET, 1);
+ ys->req_policy = &ethtool_tsinfo_nest;
+ yrs.yarg.rsp_policy = &ethtool_tsinfo_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_TSINFO_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_tsinfo_get_rsp_parse;
+ yrs.rsp_cmd = 26;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_tsinfo_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_TSINFO_GET - dump */
+void ethtool_tsinfo_get_list_free(struct ethtool_tsinfo_get_list *rsp)
+{
+ struct ethtool_tsinfo_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.timestamping);
+ ethtool_bitset_free(&rsp->obj.tx_types);
+ ethtool_bitset_free(&rsp->obj.rx_filters);
+ free(rsp);
+ }
+}
+
+struct ethtool_tsinfo_get_list *
+ethtool_tsinfo_get_dump(struct ynl_sock *ys,
+ struct ethtool_tsinfo_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_tsinfo_get_list);
+ yds.cb = ethtool_tsinfo_get_rsp_parse;
+ yds.rsp_cmd = 26;
+ yds.rsp_policy = &ethtool_tsinfo_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_TSINFO_GET, 1);
+ ys->req_policy = &ethtool_tsinfo_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_TSINFO_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_tsinfo_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_CABLE_TEST_ACT ============== */
+/* ETHTOOL_MSG_CABLE_TEST_ACT - do */
+void ethtool_cable_test_act_req_free(struct ethtool_cable_test_act_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_cable_test_act(struct ynl_sock *ys,
+ struct ethtool_cable_test_act_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CABLE_TEST_ACT, 1);
+ ys->req_policy = &ethtool_cable_test_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_HEADER, &req->header);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_CABLE_TEST_TDR_ACT ============== */
+/* ETHTOOL_MSG_CABLE_TEST_TDR_ACT - do */
+void
+ethtool_cable_test_tdr_act_req_free(struct ethtool_cable_test_tdr_act_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_cable_test_tdr_act(struct ynl_sock *ys,
+ struct ethtool_cable_test_tdr_act_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CABLE_TEST_TDR_ACT, 1);
+ ys->req_policy = &ethtool_cable_test_tdr_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_TDR_HEADER, &req->header);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_TUNNEL_INFO_GET ============== */
+/* ETHTOOL_MSG_TUNNEL_INFO_GET - do */
+void ethtool_tunnel_info_get_req_free(struct ethtool_tunnel_info_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_tunnel_info_get_rsp_free(struct ethtool_tunnel_info_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_tunnel_udp_free(&rsp->udp_ports);
+ free(rsp);
+}
+
+int ethtool_tunnel_info_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_tunnel_info_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_TUNNEL_INFO_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_TUNNEL_INFO_UDP_PORTS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.udp_ports = 1;
+
+ parg.rsp_policy = &ethtool_tunnel_udp_nest;
+ parg.data = &dst->udp_ports;
+ if (ethtool_tunnel_udp_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_tunnel_info_get_rsp *
+ethtool_tunnel_info_get(struct ynl_sock *ys,
+ struct ethtool_tunnel_info_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_tunnel_info_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_TUNNEL_INFO_GET, 1);
+ ys->req_policy = &ethtool_tunnel_info_nest;
+ yrs.yarg.rsp_policy = &ethtool_tunnel_info_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_TUNNEL_INFO_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_tunnel_info_get_rsp_parse;
+ yrs.rsp_cmd = 29;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_tunnel_info_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_TUNNEL_INFO_GET - dump */
+void
+ethtool_tunnel_info_get_list_free(struct ethtool_tunnel_info_get_list *rsp)
+{
+ struct ethtool_tunnel_info_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_tunnel_udp_free(&rsp->obj.udp_ports);
+ free(rsp);
+ }
+}
+
+struct ethtool_tunnel_info_get_list *
+ethtool_tunnel_info_get_dump(struct ynl_sock *ys,
+ struct ethtool_tunnel_info_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_tunnel_info_get_list);
+ yds.cb = ethtool_tunnel_info_get_rsp_parse;
+ yds.rsp_cmd = 29;
+ yds.rsp_policy = &ethtool_tunnel_info_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_TUNNEL_INFO_GET, 1);
+ ys->req_policy = &ethtool_tunnel_info_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_TUNNEL_INFO_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_tunnel_info_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_FEC_GET ============== */
+/* ETHTOOL_MSG_FEC_GET - do */
+void ethtool_fec_get_req_free(struct ethtool_fec_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_fec_get_rsp_free(struct ethtool_fec_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_bitset_free(&rsp->modes);
+ ethtool_fec_stat_free(&rsp->stats);
+ free(rsp);
+}
+
+int ethtool_fec_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct ethtool_fec_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_FEC_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEC_MODES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.modes = 1;
+
+ parg.rsp_policy = &ethtool_bitset_nest;
+ parg.data = &dst->modes;
+ if (ethtool_bitset_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_FEC_AUTO) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.auto_ = 1;
+ dst->auto_ = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_FEC_ACTIVE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.active = 1;
+ dst->active = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_FEC_STATS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.stats = 1;
+
+ parg.rsp_policy = &ethtool_fec_stat_nest;
+ parg.data = &dst->stats;
+ if (ethtool_fec_stat_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_fec_get_rsp *
+ethtool_fec_get(struct ynl_sock *ys, struct ethtool_fec_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_fec_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEC_GET, 1);
+ ys->req_policy = &ethtool_fec_nest;
+ yrs.yarg.rsp_policy = &ethtool_fec_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_FEC_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_fec_get_rsp_parse;
+ yrs.rsp_cmd = 30;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_fec_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_FEC_GET - dump */
+void ethtool_fec_get_list_free(struct ethtool_fec_get_list *rsp)
+{
+ struct ethtool_fec_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.modes);
+ ethtool_fec_stat_free(&rsp->obj.stats);
+ free(rsp);
+ }
+}
+
+struct ethtool_fec_get_list *
+ethtool_fec_get_dump(struct ynl_sock *ys, struct ethtool_fec_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_fec_get_list);
+ yds.cb = ethtool_fec_get_rsp_parse;
+ yds.rsp_cmd = 30;
+ yds.rsp_policy = &ethtool_fec_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_FEC_GET, 1);
+ ys->req_policy = &ethtool_fec_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_FEC_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_fec_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_FEC_GET - notify */
+void ethtool_fec_get_ntf_free(struct ethtool_fec_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_bitset_free(&rsp->obj.modes);
+ ethtool_fec_stat_free(&rsp->obj.stats);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_FEC_SET ============== */
+/* ETHTOOL_MSG_FEC_SET - do */
+void ethtool_fec_set_req_free(struct ethtool_fec_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ ethtool_bitset_free(&req->modes);
+ ethtool_fec_stat_free(&req->stats);
+ free(req);
+}
+
+int ethtool_fec_set(struct ynl_sock *ys, struct ethtool_fec_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEC_SET, 1);
+ ys->req_policy = &ethtool_fec_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_FEC_HEADER, &req->header);
+ if (req->_present.modes)
+ ethtool_bitset_put(nlh, ETHTOOL_A_FEC_MODES, &req->modes);
+ if (req->_present.auto_)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_FEC_AUTO, req->auto_);
+ if (req->_present.active)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_FEC_ACTIVE, req->active);
+ if (req->_present.stats)
+ ethtool_fec_stat_put(nlh, ETHTOOL_A_FEC_STATS, &req->stats);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_MODULE_EEPROM_GET ============== */
+/* ETHTOOL_MSG_MODULE_EEPROM_GET - do */
+void
+ethtool_module_eeprom_get_req_free(struct ethtool_module_eeprom_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void
+ethtool_module_eeprom_get_rsp_free(struct ethtool_module_eeprom_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp->data);
+ free(rsp);
+}
+
+int ethtool_module_eeprom_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_module_eeprom_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_MODULE_EEPROM_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_MODULE_EEPROM_OFFSET) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.offset = 1;
+ dst->offset = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_MODULE_EEPROM_LENGTH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.length = 1;
+ dst->length = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_MODULE_EEPROM_PAGE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.page = 1;
+ dst->page = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MODULE_EEPROM_BANK) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.bank = 1;
+ dst->bank = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.i2c_address = 1;
+ dst->i2c_address = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MODULE_EEPROM_DATA) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.data_len = len;
+ dst->data = malloc(len);
+ memcpy(dst->data, mnl_attr_get_payload(attr), len);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_module_eeprom_get_rsp *
+ethtool_module_eeprom_get(struct ynl_sock *ys,
+ struct ethtool_module_eeprom_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_module_eeprom_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MODULE_EEPROM_GET, 1);
+ ys->req_policy = &ethtool_module_eeprom_nest;
+ yrs.yarg.rsp_policy = &ethtool_module_eeprom_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MODULE_EEPROM_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_module_eeprom_get_rsp_parse;
+ yrs.rsp_cmd = 32;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_module_eeprom_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_MODULE_EEPROM_GET - dump */
+void
+ethtool_module_eeprom_get_list_free(struct ethtool_module_eeprom_get_list *rsp)
+{
+ struct ethtool_module_eeprom_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp->obj.data);
+ free(rsp);
+ }
+}
+
+struct ethtool_module_eeprom_get_list *
+ethtool_module_eeprom_get_dump(struct ynl_sock *ys,
+ struct ethtool_module_eeprom_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_module_eeprom_get_list);
+ yds.cb = ethtool_module_eeprom_get_rsp_parse;
+ yds.rsp_cmd = 32;
+ yds.rsp_policy = &ethtool_module_eeprom_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_MODULE_EEPROM_GET, 1);
+ ys->req_policy = &ethtool_module_eeprom_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MODULE_EEPROM_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_module_eeprom_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_PHC_VCLOCKS_GET ============== */
+/* ETHTOOL_MSG_PHC_VCLOCKS_GET - do */
+void ethtool_phc_vclocks_get_req_free(struct ethtool_phc_vclocks_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_phc_vclocks_get_rsp_free(struct ethtool_phc_vclocks_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_phc_vclocks_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_phc_vclocks_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_PHC_VCLOCKS_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_PHC_VCLOCKS_NUM) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.num = 1;
+ dst->num = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_phc_vclocks_get_rsp *
+ethtool_phc_vclocks_get(struct ynl_sock *ys,
+ struct ethtool_phc_vclocks_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_phc_vclocks_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PHC_VCLOCKS_GET, 1);
+ ys->req_policy = &ethtool_phc_vclocks_nest;
+ yrs.yarg.rsp_policy = &ethtool_phc_vclocks_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PHC_VCLOCKS_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_phc_vclocks_get_rsp_parse;
+ yrs.rsp_cmd = 34;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_phc_vclocks_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PHC_VCLOCKS_GET - dump */
+void
+ethtool_phc_vclocks_get_list_free(struct ethtool_phc_vclocks_get_list *rsp)
+{
+ struct ethtool_phc_vclocks_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_phc_vclocks_get_list *
+ethtool_phc_vclocks_get_dump(struct ynl_sock *ys,
+ struct ethtool_phc_vclocks_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_phc_vclocks_get_list);
+ yds.cb = ethtool_phc_vclocks_get_rsp_parse;
+ yds.rsp_cmd = 34;
+ yds.rsp_policy = &ethtool_phc_vclocks_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PHC_VCLOCKS_GET, 1);
+ ys->req_policy = &ethtool_phc_vclocks_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PHC_VCLOCKS_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_phc_vclocks_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_MODULE_GET ============== */
+/* ETHTOOL_MSG_MODULE_GET - do */
+void ethtool_module_get_req_free(struct ethtool_module_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_module_get_rsp_free(struct ethtool_module_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_module_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_module_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_MODULE_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_MODULE_POWER_MODE_POLICY) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.power_mode_policy = 1;
+ dst->power_mode_policy = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MODULE_POWER_MODE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.power_mode = 1;
+ dst->power_mode = mnl_attr_get_u8(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_module_get_rsp *
+ethtool_module_get(struct ynl_sock *ys, struct ethtool_module_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_module_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MODULE_GET, 1);
+ ys->req_policy = &ethtool_module_nest;
+ yrs.yarg.rsp_policy = &ethtool_module_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MODULE_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_module_get_rsp_parse;
+ yrs.rsp_cmd = 35;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_module_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_MODULE_GET - dump */
+void ethtool_module_get_list_free(struct ethtool_module_get_list *rsp)
+{
+ struct ethtool_module_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_module_get_list *
+ethtool_module_get_dump(struct ynl_sock *ys,
+ struct ethtool_module_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_module_get_list);
+ yds.cb = ethtool_module_get_rsp_parse;
+ yds.rsp_cmd = 35;
+ yds.rsp_policy = &ethtool_module_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_MODULE_GET, 1);
+ ys->req_policy = &ethtool_module_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MODULE_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_module_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_MODULE_GET - notify */
+void ethtool_module_get_ntf_free(struct ethtool_module_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_MODULE_SET ============== */
+/* ETHTOOL_MSG_MODULE_SET - do */
+void ethtool_module_set_req_free(struct ethtool_module_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_module_set(struct ynl_sock *ys, struct ethtool_module_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MODULE_SET, 1);
+ ys->req_policy = &ethtool_module_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MODULE_HEADER, &req->header);
+ if (req->_present.power_mode_policy)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_MODULE_POWER_MODE_POLICY, req->power_mode_policy);
+ if (req->_present.power_mode)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_MODULE_POWER_MODE, req->power_mode);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_PSE_GET ============== */
+/* ETHTOOL_MSG_PSE_GET - do */
+void ethtool_pse_get_req_free(struct ethtool_pse_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_pse_get_rsp_free(struct ethtool_pse_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_pse_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct ethtool_pse_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_PSE_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_PODL_PSE_ADMIN_STATE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.admin_state = 1;
+ dst->admin_state = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PODL_PSE_ADMIN_CONTROL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.admin_control = 1;
+ dst->admin_control = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PODL_PSE_PW_D_STATUS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.pw_d_status = 1;
+ dst->pw_d_status = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_pse_get_rsp *
+ethtool_pse_get(struct ynl_sock *ys, struct ethtool_pse_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_pse_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PSE_GET, 1);
+ ys->req_policy = &ethtool_pse_nest;
+ yrs.yarg.rsp_policy = &ethtool_pse_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PSE_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_pse_get_rsp_parse;
+ yrs.rsp_cmd = 37;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_pse_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PSE_GET - dump */
+void ethtool_pse_get_list_free(struct ethtool_pse_get_list *rsp)
+{
+ struct ethtool_pse_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_pse_get_list *
+ethtool_pse_get_dump(struct ynl_sock *ys, struct ethtool_pse_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_pse_get_list);
+ yds.cb = ethtool_pse_get_rsp_parse;
+ yds.rsp_cmd = 37;
+ yds.rsp_policy = &ethtool_pse_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PSE_GET, 1);
+ ys->req_policy = &ethtool_pse_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PSE_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_pse_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_PSE_SET ============== */
+/* ETHTOOL_MSG_PSE_SET - do */
+void ethtool_pse_set_req_free(struct ethtool_pse_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_pse_set(struct ynl_sock *ys, struct ethtool_pse_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PSE_SET, 1);
+ ys->req_policy = &ethtool_pse_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PSE_HEADER, &req->header);
+ if (req->_present.admin_state)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_ADMIN_STATE, req->admin_state);
+ if (req->_present.admin_control)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, req->admin_control);
+ if (req->_present.pw_d_status)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_PW_D_STATUS, req->pw_d_status);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_RSS_GET ============== */
+/* ETHTOOL_MSG_RSS_GET - do */
+void ethtool_rss_get_req_free(struct ethtool_rss_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_rss_get_rsp_free(struct ethtool_rss_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp->indir);
+ free(rsp->hkey);
+ free(rsp);
+}
+
+int ethtool_rss_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct ethtool_rss_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_RSS_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_RSS_CONTEXT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.context = 1;
+ dst->context = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RSS_HFUNC) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.hfunc = 1;
+ dst->hfunc = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_RSS_INDIR) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.indir_len = len;
+ dst->indir = malloc(len);
+ memcpy(dst->indir, mnl_attr_get_payload(attr), len);
+ } else if (type == ETHTOOL_A_RSS_HKEY) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.hkey_len = len;
+ dst->hkey = malloc(len);
+ memcpy(dst->hkey, mnl_attr_get_payload(attr), len);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_rss_get_rsp *
+ethtool_rss_get(struct ynl_sock *ys, struct ethtool_rss_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_rss_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_RSS_GET, 1);
+ ys->req_policy = &ethtool_rss_nest;
+ yrs.yarg.rsp_policy = &ethtool_rss_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_RSS_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_rss_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_RSS_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_rss_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_RSS_GET - dump */
+void ethtool_rss_get_list_free(struct ethtool_rss_get_list *rsp)
+{
+ struct ethtool_rss_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp->obj.indir);
+ free(rsp->obj.hkey);
+ free(rsp);
+ }
+}
+
+struct ethtool_rss_get_list *
+ethtool_rss_get_dump(struct ynl_sock *ys, struct ethtool_rss_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_rss_get_list);
+ yds.cb = ethtool_rss_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_RSS_GET;
+ yds.rsp_policy = &ethtool_rss_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_RSS_GET, 1);
+ ys->req_policy = &ethtool_rss_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_RSS_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_rss_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_PLCA_GET_CFG ============== */
+/* ETHTOOL_MSG_PLCA_GET_CFG - do */
+void ethtool_plca_get_cfg_req_free(struct ethtool_plca_get_cfg_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_plca_get_cfg_rsp_free(struct ethtool_plca_get_cfg_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_plca_get_cfg_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_plca_get_cfg_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_PLCA_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_PLCA_VERSION) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.version = 1;
+ dst->version = mnl_attr_get_u16(attr);
+ } else if (type == ETHTOOL_A_PLCA_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.enabled = 1;
+ dst->enabled = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_PLCA_STATUS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.status = 1;
+ dst->status = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_PLCA_NODE_CNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.node_cnt = 1;
+ dst->node_cnt = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_NODE_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.node_id = 1;
+ dst->node_id = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_TO_TMR) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.to_tmr = 1;
+ dst->to_tmr = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_BURST_CNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.burst_cnt = 1;
+ dst->burst_cnt = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_BURST_TMR) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.burst_tmr = 1;
+ dst->burst_tmr = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_plca_get_cfg_rsp *
+ethtool_plca_get_cfg(struct ynl_sock *ys, struct ethtool_plca_get_cfg_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_plca_get_cfg_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_CFG, 1);
+ ys->req_policy = &ethtool_plca_nest;
+ yrs.yarg.rsp_policy = &ethtool_plca_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_plca_get_cfg_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_PLCA_GET_CFG;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_plca_get_cfg_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PLCA_GET_CFG - dump */
+void ethtool_plca_get_cfg_list_free(struct ethtool_plca_get_cfg_list *rsp)
+{
+ struct ethtool_plca_get_cfg_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_plca_get_cfg_list *
+ethtool_plca_get_cfg_dump(struct ynl_sock *ys,
+ struct ethtool_plca_get_cfg_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_plca_get_cfg_list);
+ yds.cb = ethtool_plca_get_cfg_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_PLCA_GET_CFG;
+ yds.rsp_policy = &ethtool_plca_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_CFG, 1);
+ ys->req_policy = &ethtool_plca_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_plca_get_cfg_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PLCA_GET_CFG - notify */
+void ethtool_plca_get_cfg_ntf_free(struct ethtool_plca_get_cfg_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_PLCA_SET_CFG ============== */
+/* ETHTOOL_MSG_PLCA_SET_CFG - do */
+void ethtool_plca_set_cfg_req_free(struct ethtool_plca_set_cfg_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_plca_set_cfg(struct ynl_sock *ys,
+ struct ethtool_plca_set_cfg_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PLCA_SET_CFG, 1);
+ ys->req_policy = &ethtool_plca_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header);
+ if (req->_present.version)
+ mnl_attr_put_u16(nlh, ETHTOOL_A_PLCA_VERSION, req->version);
+ if (req->_present.enabled)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_PLCA_ENABLED, req->enabled);
+ if (req->_present.status)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_PLCA_STATUS, req->status);
+ if (req->_present.node_cnt)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_NODE_CNT, req->node_cnt);
+ if (req->_present.node_id)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_NODE_ID, req->node_id);
+ if (req->_present.to_tmr)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_TO_TMR, req->to_tmr);
+ if (req->_present.burst_cnt)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_BURST_CNT, req->burst_cnt);
+ if (req->_present.burst_tmr)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_BURST_TMR, req->burst_tmr);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== ETHTOOL_MSG_PLCA_GET_STATUS ============== */
+/* ETHTOOL_MSG_PLCA_GET_STATUS - do */
+void ethtool_plca_get_status_req_free(struct ethtool_plca_get_status_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_plca_get_status_rsp_free(struct ethtool_plca_get_status_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ free(rsp);
+}
+
+int ethtool_plca_get_status_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_plca_get_status_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_PLCA_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_PLCA_VERSION) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.version = 1;
+ dst->version = mnl_attr_get_u16(attr);
+ } else if (type == ETHTOOL_A_PLCA_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.enabled = 1;
+ dst->enabled = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_PLCA_STATUS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.status = 1;
+ dst->status = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_PLCA_NODE_CNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.node_cnt = 1;
+ dst->node_cnt = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_NODE_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.node_id = 1;
+ dst->node_id = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_TO_TMR) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.to_tmr = 1;
+ dst->to_tmr = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_BURST_CNT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.burst_cnt = 1;
+ dst->burst_cnt = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_PLCA_BURST_TMR) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.burst_tmr = 1;
+ dst->burst_tmr = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_plca_get_status_rsp *
+ethtool_plca_get_status(struct ynl_sock *ys,
+ struct ethtool_plca_get_status_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_plca_get_status_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_STATUS, 1);
+ ys->req_policy = &ethtool_plca_nest;
+ yrs.yarg.rsp_policy = &ethtool_plca_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_plca_get_status_rsp_parse;
+ yrs.rsp_cmd = 40;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_plca_get_status_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_PLCA_GET_STATUS - dump */
+void
+ethtool_plca_get_status_list_free(struct ethtool_plca_get_status_list *rsp)
+{
+ struct ethtool_plca_get_status_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+ }
+}
+
+struct ethtool_plca_get_status_list *
+ethtool_plca_get_status_dump(struct ynl_sock *ys,
+ struct ethtool_plca_get_status_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_plca_get_status_list);
+ yds.cb = ethtool_plca_get_status_rsp_parse;
+ yds.rsp_cmd = 40;
+ yds.rsp_policy = &ethtool_plca_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_STATUS, 1);
+ ys->req_policy = &ethtool_plca_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_plca_get_status_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== ETHTOOL_MSG_MM_GET ============== */
+/* ETHTOOL_MSG_MM_GET - do */
+void ethtool_mm_get_req_free(struct ethtool_mm_get_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+void ethtool_mm_get_rsp_free(struct ethtool_mm_get_rsp *rsp)
+{
+ ethtool_header_free(&rsp->header);
+ ethtool_mm_stat_free(&rsp->stats);
+ free(rsp);
+}
+
+int ethtool_mm_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct ethtool_mm_get_rsp *dst;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_MM_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_MM_PMAC_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.pmac_enabled = 1;
+ dst->pmac_enabled = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MM_TX_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_enabled = 1;
+ dst->tx_enabled = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MM_TX_ACTIVE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_active = 1;
+ dst->tx_active = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MM_TX_MIN_FRAG_SIZE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.tx_min_frag_size = 1;
+ dst->tx_min_frag_size = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_MM_RX_MIN_FRAG_SIZE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.rx_min_frag_size = 1;
+ dst->rx_min_frag_size = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_MM_VERIFY_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.verify_enabled = 1;
+ dst->verify_enabled = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_MM_VERIFY_TIME) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.verify_time = 1;
+ dst->verify_time = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_MM_MAX_VERIFY_TIME) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.max_verify_time = 1;
+ dst->max_verify_time = mnl_attr_get_u32(attr);
+ } else if (type == ETHTOOL_A_MM_STATS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.stats = 1;
+
+ parg.rsp_policy = &ethtool_mm_stat_nest;
+ parg.data = &dst->stats;
+ if (ethtool_mm_stat_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct ethtool_mm_get_rsp *
+ethtool_mm_get(struct ynl_sock *ys, struct ethtool_mm_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct ethtool_mm_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MM_GET, 1);
+ ys->req_policy = &ethtool_mm_nest;
+ yrs.yarg.rsp_policy = &ethtool_mm_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MM_HEADER, &req->header);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = ethtool_mm_get_rsp_parse;
+ yrs.rsp_cmd = ETHTOOL_MSG_MM_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ ethtool_mm_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_MM_GET - dump */
+void ethtool_mm_get_list_free(struct ethtool_mm_get_list *rsp)
+{
+ struct ethtool_mm_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_mm_stat_free(&rsp->obj.stats);
+ free(rsp);
+ }
+}
+
+struct ethtool_mm_get_list *
+ethtool_mm_get_dump(struct ynl_sock *ys, struct ethtool_mm_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct ethtool_mm_get_list);
+ yds.cb = ethtool_mm_get_rsp_parse;
+ yds.rsp_cmd = ETHTOOL_MSG_MM_GET;
+ yds.rsp_policy = &ethtool_mm_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_MM_GET, 1);
+ ys->req_policy = &ethtool_mm_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MM_HEADER, &req->header);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ ethtool_mm_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ETHTOOL_MSG_MM_GET - notify */
+void ethtool_mm_get_ntf_free(struct ethtool_mm_get_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_mm_stat_free(&rsp->obj.stats);
+ free(rsp);
+}
+
+/* ============== ETHTOOL_MSG_MM_SET ============== */
+/* ETHTOOL_MSG_MM_SET - do */
+void ethtool_mm_set_req_free(struct ethtool_mm_set_req *req)
+{
+ ethtool_header_free(&req->header);
+ free(req);
+}
+
+int ethtool_mm_set(struct ynl_sock *ys, struct ethtool_mm_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MM_SET, 1);
+ ys->req_policy = &ethtool_mm_nest;
+
+ if (req->_present.header)
+ ethtool_header_put(nlh, ETHTOOL_A_MM_HEADER, &req->header);
+ if (req->_present.verify_enabled)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_MM_VERIFY_ENABLED, req->verify_enabled);
+ if (req->_present.verify_time)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_MM_VERIFY_TIME, req->verify_time);
+ if (req->_present.tx_enabled)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_MM_TX_ENABLED, req->tx_enabled);
+ if (req->_present.pmac_enabled)
+ mnl_attr_put_u8(nlh, ETHTOOL_A_MM_PMAC_ENABLED, req->pmac_enabled);
+ if (req->_present.tx_min_frag_size)
+ mnl_attr_put_u32(nlh, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, req->tx_min_frag_size);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ETHTOOL_MSG_CABLE_TEST_NTF - event */
+int ethtool_cable_test_ntf_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ethtool_cable_test_ntf_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_CABLE_TEST_NTF_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_CABLE_TEST_NTF_STATUS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.status = 1;
+ dst->status = mnl_attr_get_u8(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+void ethtool_cable_test_ntf_free(struct ethtool_cable_test_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ free(rsp);
+}
+
+/* ETHTOOL_MSG_CABLE_TEST_TDR_NTF - event */
+int ethtool_cable_test_tdr_ntf_rsp_parse(const struct nlmsghdr *nlh,
+ void *data)
+{
+ struct ethtool_cable_test_tdr_ntf_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.header = 1;
+
+ parg.rsp_policy = &ethtool_header_nest;
+ parg.data = &dst->header;
+ if (ethtool_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.status = 1;
+ dst->status = mnl_attr_get_u8(attr);
+ } else if (type == ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.nest = 1;
+
+ parg.rsp_policy = &ethtool_cable_nest_nest;
+ parg.data = &dst->nest;
+ if (ethtool_cable_nest_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+void ethtool_cable_test_tdr_ntf_free(struct ethtool_cable_test_tdr_ntf *rsp)
+{
+ ethtool_header_free(&rsp->obj.header);
+ ethtool_cable_nest_free(&rsp->obj.nest);
+ free(rsp);
+}
+
+static const struct ynl_ntf_info ethtool_ntf_info[] = {
+ [ETHTOOL_MSG_LINKINFO_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_linkinfo_get_ntf),
+ .cb = ethtool_linkinfo_get_rsp_parse,
+ .policy = &ethtool_linkinfo_nest,
+ .free = (void *)ethtool_linkinfo_get_ntf_free,
+ },
+ [ETHTOOL_MSG_LINKMODES_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_linkmodes_get_ntf),
+ .cb = ethtool_linkmodes_get_rsp_parse,
+ .policy = &ethtool_linkmodes_nest,
+ .free = (void *)ethtool_linkmodes_get_ntf_free,
+ },
+ [ETHTOOL_MSG_DEBUG_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_debug_get_ntf),
+ .cb = ethtool_debug_get_rsp_parse,
+ .policy = &ethtool_debug_nest,
+ .free = (void *)ethtool_debug_get_ntf_free,
+ },
+ [ETHTOOL_MSG_WOL_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_wol_get_ntf),
+ .cb = ethtool_wol_get_rsp_parse,
+ .policy = &ethtool_wol_nest,
+ .free = (void *)ethtool_wol_get_ntf_free,
+ },
+ [ETHTOOL_MSG_FEATURES_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_features_get_ntf),
+ .cb = ethtool_features_get_rsp_parse,
+ .policy = &ethtool_features_nest,
+ .free = (void *)ethtool_features_get_ntf_free,
+ },
+ [ETHTOOL_MSG_PRIVFLAGS_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_privflags_get_ntf),
+ .cb = ethtool_privflags_get_rsp_parse,
+ .policy = &ethtool_privflags_nest,
+ .free = (void *)ethtool_privflags_get_ntf_free,
+ },
+ [ETHTOOL_MSG_RINGS_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_rings_get_ntf),
+ .cb = ethtool_rings_get_rsp_parse,
+ .policy = &ethtool_rings_nest,
+ .free = (void *)ethtool_rings_get_ntf_free,
+ },
+ [ETHTOOL_MSG_CHANNELS_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_channels_get_ntf),
+ .cb = ethtool_channels_get_rsp_parse,
+ .policy = &ethtool_channels_nest,
+ .free = (void *)ethtool_channels_get_ntf_free,
+ },
+ [ETHTOOL_MSG_COALESCE_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_coalesce_get_ntf),
+ .cb = ethtool_coalesce_get_rsp_parse,
+ .policy = &ethtool_coalesce_nest,
+ .free = (void *)ethtool_coalesce_get_ntf_free,
+ },
+ [ETHTOOL_MSG_PAUSE_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_pause_get_ntf),
+ .cb = ethtool_pause_get_rsp_parse,
+ .policy = &ethtool_pause_nest,
+ .free = (void *)ethtool_pause_get_ntf_free,
+ },
+ [ETHTOOL_MSG_EEE_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_eee_get_ntf),
+ .cb = ethtool_eee_get_rsp_parse,
+ .policy = &ethtool_eee_nest,
+ .free = (void *)ethtool_eee_get_ntf_free,
+ },
+ [ETHTOOL_MSG_CABLE_TEST_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_cable_test_ntf),
+ .cb = ethtool_cable_test_ntf_rsp_parse,
+ .policy = &ethtool_cable_test_ntf_nest,
+ .free = (void *)ethtool_cable_test_ntf_free,
+ },
+ [ETHTOOL_MSG_CABLE_TEST_TDR_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_cable_test_tdr_ntf),
+ .cb = ethtool_cable_test_tdr_ntf_rsp_parse,
+ .policy = &ethtool_cable_test_tdr_ntf_nest,
+ .free = (void *)ethtool_cable_test_tdr_ntf_free,
+ },
+ [ETHTOOL_MSG_FEC_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_fec_get_ntf),
+ .cb = ethtool_fec_get_rsp_parse,
+ .policy = &ethtool_fec_nest,
+ .free = (void *)ethtool_fec_get_ntf_free,
+ },
+ [ETHTOOL_MSG_MODULE_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_module_get_ntf),
+ .cb = ethtool_module_get_rsp_parse,
+ .policy = &ethtool_module_nest,
+ .free = (void *)ethtool_module_get_ntf_free,
+ },
+ [ETHTOOL_MSG_PLCA_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_plca_get_cfg_ntf),
+ .cb = ethtool_plca_get_cfg_rsp_parse,
+ .policy = &ethtool_plca_nest,
+ .free = (void *)ethtool_plca_get_cfg_ntf_free,
+ },
+ [ETHTOOL_MSG_MM_NTF] = {
+ .alloc_sz = sizeof(struct ethtool_mm_get_ntf),
+ .cb = ethtool_mm_get_rsp_parse,
+ .policy = &ethtool_mm_nest,
+ .free = (void *)ethtool_mm_get_ntf_free,
+ },
+};
+
+const struct ynl_family ynl_ethtool_family = {
+ .name = "ethtool",
+ .ntf_info = ethtool_ntf_info,
+ .ntf_info_size = MNL_ARRAY_SIZE(ethtool_ntf_info),
+};
diff --git a/tools/net/ynl/generated/ethtool-user.h b/tools/net/ynl/generated/ethtool-user.h
new file mode 100644
index 000000000000..d7d4ba855f43
--- /dev/null
+++ b/tools/net/ynl/generated/ethtool-user.h
@@ -0,0 +1,5531 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ethtool.yaml */
+/* YNL-GEN user header */
+/* YNL-ARG --user-header linux/ethtool_netlink.h --exclude-op stats-get */
+
+#ifndef _LINUX_ETHTOOL_GEN_H
+#define _LINUX_ETHTOOL_GEN_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <linux/types.h>
+#include <linux/ethtool.h>
+
+struct ynl_sock;
+
+extern const struct ynl_family ynl_ethtool_family;
+
+/* Enums */
+const char *ethtool_op_str(int op);
+const char *ethtool_udp_tunnel_type_str(int value);
+const char *ethtool_stringset_str(enum ethtool_stringset value);
+
+/* Common nested types */
+struct ethtool_header {
+ struct {
+ __u32 dev_index:1;
+ __u32 dev_name_len;
+ __u32 flags:1;
+ } _present;
+
+ __u32 dev_index;
+ char *dev_name;
+ __u32 flags;
+};
+
+struct ethtool_pause_stat {
+ struct {
+ __u32 tx_frames:1;
+ __u32 rx_frames:1;
+ } _present;
+
+ __u64 tx_frames;
+ __u64 rx_frames;
+};
+
+struct ethtool_cable_test_tdr_cfg {
+ struct {
+ __u32 first:1;
+ __u32 last:1;
+ __u32 step:1;
+ __u32 pair:1;
+ } _present;
+
+ __u32 first;
+ __u32 last;
+ __u32 step;
+ __u8 pair;
+};
+
+struct ethtool_fec_stat {
+ struct {
+ __u32 corrected_len;
+ __u32 uncorr_len;
+ __u32 corr_bits_len;
+ } _present;
+
+ void *corrected;
+ void *uncorr;
+ void *corr_bits;
+};
+
+struct ethtool_mm_stat {
+ struct {
+ __u32 reassembly_errors:1;
+ __u32 smd_errors:1;
+ __u32 reassembly_ok:1;
+ __u32 rx_frag_count:1;
+ __u32 tx_frag_count:1;
+ __u32 hold_count:1;
+ } _present;
+
+ __u64 reassembly_errors;
+ __u64 smd_errors;
+ __u64 reassembly_ok;
+ __u64 rx_frag_count;
+ __u64 tx_frag_count;
+ __u64 hold_count;
+};
+
+struct ethtool_cable_result {
+ struct {
+ __u32 pair:1;
+ __u32 code:1;
+ } _present;
+
+ __u8 pair;
+ __u8 code;
+};
+
+struct ethtool_cable_fault_length {
+ struct {
+ __u32 pair:1;
+ __u32 cm:1;
+ } _present;
+
+ __u8 pair;
+ __u32 cm;
+};
+
+struct ethtool_bitset_bit {
+ struct {
+ __u32 index:1;
+ __u32 name_len;
+ __u32 value:1;
+ } _present;
+
+ __u32 index;
+ char *name;
+};
+
+struct ethtool_tunnel_udp_entry {
+ struct {
+ __u32 port:1;
+ __u32 type:1;
+ } _present;
+
+ __u16 port /* big-endian */;
+ __u32 type;
+};
+
+struct ethtool_string {
+ struct {
+ __u32 index:1;
+ __u32 value_len;
+ } _present;
+
+ __u32 index;
+ char *value;
+};
+
+struct ethtool_cable_nest {
+ struct {
+ __u32 result:1;
+ __u32 fault_length:1;
+ } _present;
+
+ struct ethtool_cable_result result;
+ struct ethtool_cable_fault_length fault_length;
+};
+
+struct ethtool_bitset_bits {
+ unsigned int n_bit;
+ struct ethtool_bitset_bit *bit;
+};
+
+struct ethtool_strings {
+ unsigned int n_string;
+ struct ethtool_string *string;
+};
+
+struct ethtool_bitset {
+ struct {
+ __u32 nomask:1;
+ __u32 size:1;
+ __u32 bits:1;
+ } _present;
+
+ __u32 size;
+ struct ethtool_bitset_bits bits;
+};
+
+struct ethtool_stringset_ {
+ struct {
+ __u32 id:1;
+ __u32 count:1;
+ } _present;
+
+ __u32 id;
+ __u32 count;
+ unsigned int n_strings;
+ struct ethtool_strings *strings;
+};
+
+struct ethtool_tunnel_udp_table {
+ struct {
+ __u32 size:1;
+ __u32 types:1;
+ } _present;
+
+ __u32 size;
+ struct ethtool_bitset types;
+ unsigned int n_entry;
+ struct ethtool_tunnel_udp_entry *entry;
+};
+
+struct ethtool_stringsets {
+ unsigned int n_stringset;
+ struct ethtool_stringset_ *stringset;
+};
+
+struct ethtool_tunnel_udp {
+ struct {
+ __u32 table:1;
+ } _present;
+
+ struct ethtool_tunnel_udp_table table;
+};
+
+/* ============== ETHTOOL_MSG_STRSET_GET ============== */
+/* ETHTOOL_MSG_STRSET_GET - do */
+struct ethtool_strset_get_req {
+ struct {
+ __u32 header:1;
+ __u32 stringsets:1;
+ __u32 counts_only:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_stringsets stringsets;
+};
+
+static inline struct ethtool_strset_get_req *ethtool_strset_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_strset_get_req));
+}
+void ethtool_strset_get_req_free(struct ethtool_strset_get_req *req);
+
+static inline void
+ethtool_strset_get_req_set_header_dev_index(struct ethtool_strset_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_strset_get_req_set_header_dev_name(struct ethtool_strset_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_strset_get_req_set_header_flags(struct ethtool_strset_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+__ethtool_strset_get_req_set_stringsets_stringset(struct ethtool_strset_get_req *req,
+ struct ethtool_stringset_ *stringset,
+ unsigned int n_stringset)
+{
+ free(req->stringsets.stringset);
+ req->stringsets.stringset = stringset;
+ req->stringsets.n_stringset = n_stringset;
+}
+static inline void
+ethtool_strset_get_req_set_counts_only(struct ethtool_strset_get_req *req)
+{
+ req->_present.counts_only = 1;
+}
+
+struct ethtool_strset_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 stringsets:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_stringsets stringsets;
+};
+
+void ethtool_strset_get_rsp_free(struct ethtool_strset_get_rsp *rsp);
+
+/*
+ * Get string set from the kernel.
+ */
+struct ethtool_strset_get_rsp *
+ethtool_strset_get(struct ynl_sock *ys, struct ethtool_strset_get_req *req);
+
+/* ETHTOOL_MSG_STRSET_GET - dump */
+struct ethtool_strset_get_req_dump {
+ struct {
+ __u32 header:1;
+ __u32 stringsets:1;
+ __u32 counts_only:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_stringsets stringsets;
+};
+
+static inline struct ethtool_strset_get_req_dump *
+ethtool_strset_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_strset_get_req_dump));
+}
+void ethtool_strset_get_req_dump_free(struct ethtool_strset_get_req_dump *req);
+
+static inline void
+ethtool_strset_get_req_dump_set_header_dev_index(struct ethtool_strset_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_strset_get_req_dump_set_header_dev_name(struct ethtool_strset_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_strset_get_req_dump_set_header_flags(struct ethtool_strset_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+__ethtool_strset_get_req_dump_set_stringsets_stringset(struct ethtool_strset_get_req_dump *req,
+ struct ethtool_stringset_ *stringset,
+ unsigned int n_stringset)
+{
+ free(req->stringsets.stringset);
+ req->stringsets.stringset = stringset;
+ req->stringsets.n_stringset = n_stringset;
+}
+static inline void
+ethtool_strset_get_req_dump_set_counts_only(struct ethtool_strset_get_req_dump *req)
+{
+ req->_present.counts_only = 1;
+}
+
+struct ethtool_strset_get_list {
+ struct ethtool_strset_get_list *next;
+ struct ethtool_strset_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_strset_get_list_free(struct ethtool_strset_get_list *rsp);
+
+struct ethtool_strset_get_list *
+ethtool_strset_get_dump(struct ynl_sock *ys,
+ struct ethtool_strset_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_LINKINFO_GET ============== */
+/* ETHTOOL_MSG_LINKINFO_GET - do */
+struct ethtool_linkinfo_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_linkinfo_get_req *
+ethtool_linkinfo_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkinfo_get_req));
+}
+void ethtool_linkinfo_get_req_free(struct ethtool_linkinfo_get_req *req);
+
+static inline void
+ethtool_linkinfo_get_req_set_header_dev_index(struct ethtool_linkinfo_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkinfo_get_req_set_header_dev_name(struct ethtool_linkinfo_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkinfo_get_req_set_header_flags(struct ethtool_linkinfo_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_linkinfo_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 port:1;
+ __u32 phyaddr:1;
+ __u32 tp_mdix:1;
+ __u32 tp_mdix_ctrl:1;
+ __u32 transceiver:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 port;
+ __u8 phyaddr;
+ __u8 tp_mdix;
+ __u8 tp_mdix_ctrl;
+ __u8 transceiver;
+};
+
+void ethtool_linkinfo_get_rsp_free(struct ethtool_linkinfo_get_rsp *rsp);
+
+/*
+ * Get link info.
+ */
+struct ethtool_linkinfo_get_rsp *
+ethtool_linkinfo_get(struct ynl_sock *ys, struct ethtool_linkinfo_get_req *req);
+
+/* ETHTOOL_MSG_LINKINFO_GET - dump */
+struct ethtool_linkinfo_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_linkinfo_get_req_dump *
+ethtool_linkinfo_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkinfo_get_req_dump));
+}
+void
+ethtool_linkinfo_get_req_dump_free(struct ethtool_linkinfo_get_req_dump *req);
+
+static inline void
+ethtool_linkinfo_get_req_dump_set_header_dev_index(struct ethtool_linkinfo_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkinfo_get_req_dump_set_header_dev_name(struct ethtool_linkinfo_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkinfo_get_req_dump_set_header_flags(struct ethtool_linkinfo_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_linkinfo_get_list {
+ struct ethtool_linkinfo_get_list *next;
+ struct ethtool_linkinfo_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_linkinfo_get_list_free(struct ethtool_linkinfo_get_list *rsp);
+
+struct ethtool_linkinfo_get_list *
+ethtool_linkinfo_get_dump(struct ynl_sock *ys,
+ struct ethtool_linkinfo_get_req_dump *req);
+
+/* ETHTOOL_MSG_LINKINFO_GET - notify */
+struct ethtool_linkinfo_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_linkinfo_get_ntf *ntf);
+ struct ethtool_linkinfo_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_linkinfo_get_ntf_free(struct ethtool_linkinfo_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_LINKINFO_SET ============== */
+/* ETHTOOL_MSG_LINKINFO_SET - do */
+struct ethtool_linkinfo_set_req {
+ struct {
+ __u32 header:1;
+ __u32 port:1;
+ __u32 phyaddr:1;
+ __u32 tp_mdix:1;
+ __u32 tp_mdix_ctrl:1;
+ __u32 transceiver:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 port;
+ __u8 phyaddr;
+ __u8 tp_mdix;
+ __u8 tp_mdix_ctrl;
+ __u8 transceiver;
+};
+
+static inline struct ethtool_linkinfo_set_req *
+ethtool_linkinfo_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkinfo_set_req));
+}
+void ethtool_linkinfo_set_req_free(struct ethtool_linkinfo_set_req *req);
+
+static inline void
+ethtool_linkinfo_set_req_set_header_dev_index(struct ethtool_linkinfo_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkinfo_set_req_set_header_dev_name(struct ethtool_linkinfo_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkinfo_set_req_set_header_flags(struct ethtool_linkinfo_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_linkinfo_set_req_set_port(struct ethtool_linkinfo_set_req *req,
+ __u8 port)
+{
+ req->_present.port = 1;
+ req->port = port;
+}
+static inline void
+ethtool_linkinfo_set_req_set_phyaddr(struct ethtool_linkinfo_set_req *req,
+ __u8 phyaddr)
+{
+ req->_present.phyaddr = 1;
+ req->phyaddr = phyaddr;
+}
+static inline void
+ethtool_linkinfo_set_req_set_tp_mdix(struct ethtool_linkinfo_set_req *req,
+ __u8 tp_mdix)
+{
+ req->_present.tp_mdix = 1;
+ req->tp_mdix = tp_mdix;
+}
+static inline void
+ethtool_linkinfo_set_req_set_tp_mdix_ctrl(struct ethtool_linkinfo_set_req *req,
+ __u8 tp_mdix_ctrl)
+{
+ req->_present.tp_mdix_ctrl = 1;
+ req->tp_mdix_ctrl = tp_mdix_ctrl;
+}
+static inline void
+ethtool_linkinfo_set_req_set_transceiver(struct ethtool_linkinfo_set_req *req,
+ __u8 transceiver)
+{
+ req->_present.transceiver = 1;
+ req->transceiver = transceiver;
+}
+
+/*
+ * Set link info.
+ */
+int ethtool_linkinfo_set(struct ynl_sock *ys,
+ struct ethtool_linkinfo_set_req *req);
+
+/* ============== ETHTOOL_MSG_LINKMODES_GET ============== */
+/* ETHTOOL_MSG_LINKMODES_GET - do */
+struct ethtool_linkmodes_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_linkmodes_get_req *
+ethtool_linkmodes_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkmodes_get_req));
+}
+void ethtool_linkmodes_get_req_free(struct ethtool_linkmodes_get_req *req);
+
+static inline void
+ethtool_linkmodes_get_req_set_header_dev_index(struct ethtool_linkmodes_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkmodes_get_req_set_header_dev_name(struct ethtool_linkmodes_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkmodes_get_req_set_header_flags(struct ethtool_linkmodes_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_linkmodes_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 autoneg:1;
+ __u32 ours:1;
+ __u32 peer:1;
+ __u32 speed:1;
+ __u32 duplex:1;
+ __u32 master_slave_cfg:1;
+ __u32 master_slave_state:1;
+ __u32 lanes:1;
+ __u32 rate_matching:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 autoneg;
+ struct ethtool_bitset ours;
+ struct ethtool_bitset peer;
+ __u32 speed;
+ __u8 duplex;
+ __u8 master_slave_cfg;
+ __u8 master_slave_state;
+ __u32 lanes;
+ __u8 rate_matching;
+};
+
+void ethtool_linkmodes_get_rsp_free(struct ethtool_linkmodes_get_rsp *rsp);
+
+/*
+ * Get link modes.
+ */
+struct ethtool_linkmodes_get_rsp *
+ethtool_linkmodes_get(struct ynl_sock *ys,
+ struct ethtool_linkmodes_get_req *req);
+
+/* ETHTOOL_MSG_LINKMODES_GET - dump */
+struct ethtool_linkmodes_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_linkmodes_get_req_dump *
+ethtool_linkmodes_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkmodes_get_req_dump));
+}
+void
+ethtool_linkmodes_get_req_dump_free(struct ethtool_linkmodes_get_req_dump *req);
+
+static inline void
+ethtool_linkmodes_get_req_dump_set_header_dev_index(struct ethtool_linkmodes_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkmodes_get_req_dump_set_header_dev_name(struct ethtool_linkmodes_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkmodes_get_req_dump_set_header_flags(struct ethtool_linkmodes_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_linkmodes_get_list {
+ struct ethtool_linkmodes_get_list *next;
+ struct ethtool_linkmodes_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_linkmodes_get_list_free(struct ethtool_linkmodes_get_list *rsp);
+
+struct ethtool_linkmodes_get_list *
+ethtool_linkmodes_get_dump(struct ynl_sock *ys,
+ struct ethtool_linkmodes_get_req_dump *req);
+
+/* ETHTOOL_MSG_LINKMODES_GET - notify */
+struct ethtool_linkmodes_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_linkmodes_get_ntf *ntf);
+ struct ethtool_linkmodes_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_linkmodes_get_ntf_free(struct ethtool_linkmodes_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_LINKMODES_SET ============== */
+/* ETHTOOL_MSG_LINKMODES_SET - do */
+struct ethtool_linkmodes_set_req {
+ struct {
+ __u32 header:1;
+ __u32 autoneg:1;
+ __u32 ours:1;
+ __u32 peer:1;
+ __u32 speed:1;
+ __u32 duplex:1;
+ __u32 master_slave_cfg:1;
+ __u32 master_slave_state:1;
+ __u32 lanes:1;
+ __u32 rate_matching:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 autoneg;
+ struct ethtool_bitset ours;
+ struct ethtool_bitset peer;
+ __u32 speed;
+ __u8 duplex;
+ __u8 master_slave_cfg;
+ __u8 master_slave_state;
+ __u32 lanes;
+ __u8 rate_matching;
+};
+
+static inline struct ethtool_linkmodes_set_req *
+ethtool_linkmodes_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkmodes_set_req));
+}
+void ethtool_linkmodes_set_req_free(struct ethtool_linkmodes_set_req *req);
+
+static inline void
+ethtool_linkmodes_set_req_set_header_dev_index(struct ethtool_linkmodes_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkmodes_set_req_set_header_dev_name(struct ethtool_linkmodes_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkmodes_set_req_set_header_flags(struct ethtool_linkmodes_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_linkmodes_set_req_set_autoneg(struct ethtool_linkmodes_set_req *req,
+ __u8 autoneg)
+{
+ req->_present.autoneg = 1;
+ req->autoneg = autoneg;
+}
+static inline void
+ethtool_linkmodes_set_req_set_ours_nomask(struct ethtool_linkmodes_set_req *req)
+{
+ req->_present.ours = 1;
+ req->ours._present.nomask = 1;
+}
+static inline void
+ethtool_linkmodes_set_req_set_ours_size(struct ethtool_linkmodes_set_req *req,
+ __u32 size)
+{
+ req->_present.ours = 1;
+ req->ours._present.size = 1;
+ req->ours.size = size;
+}
+static inline void
+__ethtool_linkmodes_set_req_set_ours_bits_bit(struct ethtool_linkmodes_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->ours.bits.bit);
+ req->ours.bits.bit = bit;
+ req->ours.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_linkmodes_set_req_set_peer_nomask(struct ethtool_linkmodes_set_req *req)
+{
+ req->_present.peer = 1;
+ req->peer._present.nomask = 1;
+}
+static inline void
+ethtool_linkmodes_set_req_set_peer_size(struct ethtool_linkmodes_set_req *req,
+ __u32 size)
+{
+ req->_present.peer = 1;
+ req->peer._present.size = 1;
+ req->peer.size = size;
+}
+static inline void
+__ethtool_linkmodes_set_req_set_peer_bits_bit(struct ethtool_linkmodes_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->peer.bits.bit);
+ req->peer.bits.bit = bit;
+ req->peer.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_linkmodes_set_req_set_speed(struct ethtool_linkmodes_set_req *req,
+ __u32 speed)
+{
+ req->_present.speed = 1;
+ req->speed = speed;
+}
+static inline void
+ethtool_linkmodes_set_req_set_duplex(struct ethtool_linkmodes_set_req *req,
+ __u8 duplex)
+{
+ req->_present.duplex = 1;
+ req->duplex = duplex;
+}
+static inline void
+ethtool_linkmodes_set_req_set_master_slave_cfg(struct ethtool_linkmodes_set_req *req,
+ __u8 master_slave_cfg)
+{
+ req->_present.master_slave_cfg = 1;
+ req->master_slave_cfg = master_slave_cfg;
+}
+static inline void
+ethtool_linkmodes_set_req_set_master_slave_state(struct ethtool_linkmodes_set_req *req,
+ __u8 master_slave_state)
+{
+ req->_present.master_slave_state = 1;
+ req->master_slave_state = master_slave_state;
+}
+static inline void
+ethtool_linkmodes_set_req_set_lanes(struct ethtool_linkmodes_set_req *req,
+ __u32 lanes)
+{
+ req->_present.lanes = 1;
+ req->lanes = lanes;
+}
+static inline void
+ethtool_linkmodes_set_req_set_rate_matching(struct ethtool_linkmodes_set_req *req,
+ __u8 rate_matching)
+{
+ req->_present.rate_matching = 1;
+ req->rate_matching = rate_matching;
+}
+
+/*
+ * Set link modes.
+ */
+int ethtool_linkmodes_set(struct ynl_sock *ys,
+ struct ethtool_linkmodes_set_req *req);
+
+/* ============== ETHTOOL_MSG_LINKSTATE_GET ============== */
+/* ETHTOOL_MSG_LINKSTATE_GET - do */
+struct ethtool_linkstate_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_linkstate_get_req *
+ethtool_linkstate_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkstate_get_req));
+}
+void ethtool_linkstate_get_req_free(struct ethtool_linkstate_get_req *req);
+
+static inline void
+ethtool_linkstate_get_req_set_header_dev_index(struct ethtool_linkstate_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkstate_get_req_set_header_dev_name(struct ethtool_linkstate_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkstate_get_req_set_header_flags(struct ethtool_linkstate_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_linkstate_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 link:1;
+ __u32 sqi:1;
+ __u32 sqi_max:1;
+ __u32 ext_state:1;
+ __u32 ext_substate:1;
+ __u32 ext_down_cnt:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 link;
+ __u32 sqi;
+ __u32 sqi_max;
+ __u8 ext_state;
+ __u8 ext_substate;
+ __u32 ext_down_cnt;
+};
+
+void ethtool_linkstate_get_rsp_free(struct ethtool_linkstate_get_rsp *rsp);
+
+/*
+ * Get link state.
+ */
+struct ethtool_linkstate_get_rsp *
+ethtool_linkstate_get(struct ynl_sock *ys,
+ struct ethtool_linkstate_get_req *req);
+
+/* ETHTOOL_MSG_LINKSTATE_GET - dump */
+struct ethtool_linkstate_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_linkstate_get_req_dump *
+ethtool_linkstate_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_linkstate_get_req_dump));
+}
+void
+ethtool_linkstate_get_req_dump_free(struct ethtool_linkstate_get_req_dump *req);
+
+static inline void
+ethtool_linkstate_get_req_dump_set_header_dev_index(struct ethtool_linkstate_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_linkstate_get_req_dump_set_header_dev_name(struct ethtool_linkstate_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_linkstate_get_req_dump_set_header_flags(struct ethtool_linkstate_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_linkstate_get_list {
+ struct ethtool_linkstate_get_list *next;
+ struct ethtool_linkstate_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_linkstate_get_list_free(struct ethtool_linkstate_get_list *rsp);
+
+struct ethtool_linkstate_get_list *
+ethtool_linkstate_get_dump(struct ynl_sock *ys,
+ struct ethtool_linkstate_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_DEBUG_GET ============== */
+/* ETHTOOL_MSG_DEBUG_GET - do */
+struct ethtool_debug_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_debug_get_req *ethtool_debug_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_debug_get_req));
+}
+void ethtool_debug_get_req_free(struct ethtool_debug_get_req *req);
+
+static inline void
+ethtool_debug_get_req_set_header_dev_index(struct ethtool_debug_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_debug_get_req_set_header_dev_name(struct ethtool_debug_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_debug_get_req_set_header_flags(struct ethtool_debug_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_debug_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 msgmask:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset msgmask;
+};
+
+void ethtool_debug_get_rsp_free(struct ethtool_debug_get_rsp *rsp);
+
+/*
+ * Get debug message mask.
+ */
+struct ethtool_debug_get_rsp *
+ethtool_debug_get(struct ynl_sock *ys, struct ethtool_debug_get_req *req);
+
+/* ETHTOOL_MSG_DEBUG_GET - dump */
+struct ethtool_debug_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_debug_get_req_dump *
+ethtool_debug_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_debug_get_req_dump));
+}
+void ethtool_debug_get_req_dump_free(struct ethtool_debug_get_req_dump *req);
+
+static inline void
+ethtool_debug_get_req_dump_set_header_dev_index(struct ethtool_debug_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_debug_get_req_dump_set_header_dev_name(struct ethtool_debug_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_debug_get_req_dump_set_header_flags(struct ethtool_debug_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_debug_get_list {
+ struct ethtool_debug_get_list *next;
+ struct ethtool_debug_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_debug_get_list_free(struct ethtool_debug_get_list *rsp);
+
+struct ethtool_debug_get_list *
+ethtool_debug_get_dump(struct ynl_sock *ys,
+ struct ethtool_debug_get_req_dump *req);
+
+/* ETHTOOL_MSG_DEBUG_GET - notify */
+struct ethtool_debug_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_debug_get_ntf *ntf);
+ struct ethtool_debug_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_debug_get_ntf_free(struct ethtool_debug_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_DEBUG_SET ============== */
+/* ETHTOOL_MSG_DEBUG_SET - do */
+struct ethtool_debug_set_req {
+ struct {
+ __u32 header:1;
+ __u32 msgmask:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset msgmask;
+};
+
+static inline struct ethtool_debug_set_req *ethtool_debug_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_debug_set_req));
+}
+void ethtool_debug_set_req_free(struct ethtool_debug_set_req *req);
+
+static inline void
+ethtool_debug_set_req_set_header_dev_index(struct ethtool_debug_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_debug_set_req_set_header_dev_name(struct ethtool_debug_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_debug_set_req_set_header_flags(struct ethtool_debug_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_debug_set_req_set_msgmask_nomask(struct ethtool_debug_set_req *req)
+{
+ req->_present.msgmask = 1;
+ req->msgmask._present.nomask = 1;
+}
+static inline void
+ethtool_debug_set_req_set_msgmask_size(struct ethtool_debug_set_req *req,
+ __u32 size)
+{
+ req->_present.msgmask = 1;
+ req->msgmask._present.size = 1;
+ req->msgmask.size = size;
+}
+static inline void
+__ethtool_debug_set_req_set_msgmask_bits_bit(struct ethtool_debug_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->msgmask.bits.bit);
+ req->msgmask.bits.bit = bit;
+ req->msgmask.bits.n_bit = n_bit;
+}
+
+/*
+ * Set debug message mask.
+ */
+int ethtool_debug_set(struct ynl_sock *ys, struct ethtool_debug_set_req *req);
+
+/* ============== ETHTOOL_MSG_WOL_GET ============== */
+/* ETHTOOL_MSG_WOL_GET - do */
+struct ethtool_wol_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_wol_get_req *ethtool_wol_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_wol_get_req));
+}
+void ethtool_wol_get_req_free(struct ethtool_wol_get_req *req);
+
+static inline void
+ethtool_wol_get_req_set_header_dev_index(struct ethtool_wol_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_wol_get_req_set_header_dev_name(struct ethtool_wol_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_wol_get_req_set_header_flags(struct ethtool_wol_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_wol_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 modes:1;
+ __u32 sopass_len;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset modes;
+ void *sopass;
+};
+
+void ethtool_wol_get_rsp_free(struct ethtool_wol_get_rsp *rsp);
+
+/*
+ * Get WOL params.
+ */
+struct ethtool_wol_get_rsp *
+ethtool_wol_get(struct ynl_sock *ys, struct ethtool_wol_get_req *req);
+
+/* ETHTOOL_MSG_WOL_GET - dump */
+struct ethtool_wol_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_wol_get_req_dump *
+ethtool_wol_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_wol_get_req_dump));
+}
+void ethtool_wol_get_req_dump_free(struct ethtool_wol_get_req_dump *req);
+
+static inline void
+ethtool_wol_get_req_dump_set_header_dev_index(struct ethtool_wol_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_wol_get_req_dump_set_header_dev_name(struct ethtool_wol_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_wol_get_req_dump_set_header_flags(struct ethtool_wol_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_wol_get_list {
+ struct ethtool_wol_get_list *next;
+ struct ethtool_wol_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_wol_get_list_free(struct ethtool_wol_get_list *rsp);
+
+struct ethtool_wol_get_list *
+ethtool_wol_get_dump(struct ynl_sock *ys, struct ethtool_wol_get_req_dump *req);
+
+/* ETHTOOL_MSG_WOL_GET - notify */
+struct ethtool_wol_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_wol_get_ntf *ntf);
+ struct ethtool_wol_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_wol_get_ntf_free(struct ethtool_wol_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_WOL_SET ============== */
+/* ETHTOOL_MSG_WOL_SET - do */
+struct ethtool_wol_set_req {
+ struct {
+ __u32 header:1;
+ __u32 modes:1;
+ __u32 sopass_len;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset modes;
+ void *sopass;
+};
+
+static inline struct ethtool_wol_set_req *ethtool_wol_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_wol_set_req));
+}
+void ethtool_wol_set_req_free(struct ethtool_wol_set_req *req);
+
+static inline void
+ethtool_wol_set_req_set_header_dev_index(struct ethtool_wol_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_wol_set_req_set_header_dev_name(struct ethtool_wol_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_wol_set_req_set_header_flags(struct ethtool_wol_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_wol_set_req_set_modes_nomask(struct ethtool_wol_set_req *req)
+{
+ req->_present.modes = 1;
+ req->modes._present.nomask = 1;
+}
+static inline void
+ethtool_wol_set_req_set_modes_size(struct ethtool_wol_set_req *req, __u32 size)
+{
+ req->_present.modes = 1;
+ req->modes._present.size = 1;
+ req->modes.size = size;
+}
+static inline void
+__ethtool_wol_set_req_set_modes_bits_bit(struct ethtool_wol_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->modes.bits.bit);
+ req->modes.bits.bit = bit;
+ req->modes.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_wol_set_req_set_sopass(struct ethtool_wol_set_req *req,
+ const void *sopass, size_t len)
+{
+ free(req->sopass);
+ req->sopass = malloc(req->_present.sopass_len);
+ memcpy(req->sopass, sopass, req->_present.sopass_len);
+}
+
+/*
+ * Set WOL params.
+ */
+int ethtool_wol_set(struct ynl_sock *ys, struct ethtool_wol_set_req *req);
+
+/* ============== ETHTOOL_MSG_FEATURES_GET ============== */
+/* ETHTOOL_MSG_FEATURES_GET - do */
+struct ethtool_features_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_features_get_req *
+ethtool_features_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_features_get_req));
+}
+void ethtool_features_get_req_free(struct ethtool_features_get_req *req);
+
+static inline void
+ethtool_features_get_req_set_header_dev_index(struct ethtool_features_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_features_get_req_set_header_dev_name(struct ethtool_features_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_features_get_req_set_header_flags(struct ethtool_features_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_features_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 hw:1;
+ __u32 wanted:1;
+ __u32 active:1;
+ __u32 nochange:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset hw;
+ struct ethtool_bitset wanted;
+ struct ethtool_bitset active;
+ struct ethtool_bitset nochange;
+};
+
+void ethtool_features_get_rsp_free(struct ethtool_features_get_rsp *rsp);
+
+/*
+ * Get features.
+ */
+struct ethtool_features_get_rsp *
+ethtool_features_get(struct ynl_sock *ys, struct ethtool_features_get_req *req);
+
+/* ETHTOOL_MSG_FEATURES_GET - dump */
+struct ethtool_features_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_features_get_req_dump *
+ethtool_features_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_features_get_req_dump));
+}
+void
+ethtool_features_get_req_dump_free(struct ethtool_features_get_req_dump *req);
+
+static inline void
+ethtool_features_get_req_dump_set_header_dev_index(struct ethtool_features_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_features_get_req_dump_set_header_dev_name(struct ethtool_features_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_features_get_req_dump_set_header_flags(struct ethtool_features_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_features_get_list {
+ struct ethtool_features_get_list *next;
+ struct ethtool_features_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_features_get_list_free(struct ethtool_features_get_list *rsp);
+
+struct ethtool_features_get_list *
+ethtool_features_get_dump(struct ynl_sock *ys,
+ struct ethtool_features_get_req_dump *req);
+
+/* ETHTOOL_MSG_FEATURES_GET - notify */
+struct ethtool_features_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_features_get_ntf *ntf);
+ struct ethtool_features_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_features_get_ntf_free(struct ethtool_features_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_FEATURES_SET ============== */
+/* ETHTOOL_MSG_FEATURES_SET - do */
+struct ethtool_features_set_req {
+ struct {
+ __u32 header:1;
+ __u32 hw:1;
+ __u32 wanted:1;
+ __u32 active:1;
+ __u32 nochange:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset hw;
+ struct ethtool_bitset wanted;
+ struct ethtool_bitset active;
+ struct ethtool_bitset nochange;
+};
+
+static inline struct ethtool_features_set_req *
+ethtool_features_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_features_set_req));
+}
+void ethtool_features_set_req_free(struct ethtool_features_set_req *req);
+
+static inline void
+ethtool_features_set_req_set_header_dev_index(struct ethtool_features_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_features_set_req_set_header_dev_name(struct ethtool_features_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_features_set_req_set_header_flags(struct ethtool_features_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_features_set_req_set_hw_nomask(struct ethtool_features_set_req *req)
+{
+ req->_present.hw = 1;
+ req->hw._present.nomask = 1;
+}
+static inline void
+ethtool_features_set_req_set_hw_size(struct ethtool_features_set_req *req,
+ __u32 size)
+{
+ req->_present.hw = 1;
+ req->hw._present.size = 1;
+ req->hw.size = size;
+}
+static inline void
+__ethtool_features_set_req_set_hw_bits_bit(struct ethtool_features_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->hw.bits.bit);
+ req->hw.bits.bit = bit;
+ req->hw.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_features_set_req_set_wanted_nomask(struct ethtool_features_set_req *req)
+{
+ req->_present.wanted = 1;
+ req->wanted._present.nomask = 1;
+}
+static inline void
+ethtool_features_set_req_set_wanted_size(struct ethtool_features_set_req *req,
+ __u32 size)
+{
+ req->_present.wanted = 1;
+ req->wanted._present.size = 1;
+ req->wanted.size = size;
+}
+static inline void
+__ethtool_features_set_req_set_wanted_bits_bit(struct ethtool_features_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->wanted.bits.bit);
+ req->wanted.bits.bit = bit;
+ req->wanted.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_features_set_req_set_active_nomask(struct ethtool_features_set_req *req)
+{
+ req->_present.active = 1;
+ req->active._present.nomask = 1;
+}
+static inline void
+ethtool_features_set_req_set_active_size(struct ethtool_features_set_req *req,
+ __u32 size)
+{
+ req->_present.active = 1;
+ req->active._present.size = 1;
+ req->active.size = size;
+}
+static inline void
+__ethtool_features_set_req_set_active_bits_bit(struct ethtool_features_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->active.bits.bit);
+ req->active.bits.bit = bit;
+ req->active.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_features_set_req_set_nochange_nomask(struct ethtool_features_set_req *req)
+{
+ req->_present.nochange = 1;
+ req->nochange._present.nomask = 1;
+}
+static inline void
+ethtool_features_set_req_set_nochange_size(struct ethtool_features_set_req *req,
+ __u32 size)
+{
+ req->_present.nochange = 1;
+ req->nochange._present.size = 1;
+ req->nochange.size = size;
+}
+static inline void
+__ethtool_features_set_req_set_nochange_bits_bit(struct ethtool_features_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->nochange.bits.bit);
+ req->nochange.bits.bit = bit;
+ req->nochange.bits.n_bit = n_bit;
+}
+
+struct ethtool_features_set_rsp {
+ struct {
+ __u32 header:1;
+ __u32 hw:1;
+ __u32 wanted:1;
+ __u32 active:1;
+ __u32 nochange:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset hw;
+ struct ethtool_bitset wanted;
+ struct ethtool_bitset active;
+ struct ethtool_bitset nochange;
+};
+
+void ethtool_features_set_rsp_free(struct ethtool_features_set_rsp *rsp);
+
+/*
+ * Set features.
+ */
+struct ethtool_features_set_rsp *
+ethtool_features_set(struct ynl_sock *ys, struct ethtool_features_set_req *req);
+
+/* ============== ETHTOOL_MSG_PRIVFLAGS_GET ============== */
+/* ETHTOOL_MSG_PRIVFLAGS_GET - do */
+struct ethtool_privflags_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_privflags_get_req *
+ethtool_privflags_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_privflags_get_req));
+}
+void ethtool_privflags_get_req_free(struct ethtool_privflags_get_req *req);
+
+static inline void
+ethtool_privflags_get_req_set_header_dev_index(struct ethtool_privflags_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_privflags_get_req_set_header_dev_name(struct ethtool_privflags_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_privflags_get_req_set_header_flags(struct ethtool_privflags_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_privflags_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 flags:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset flags;
+};
+
+void ethtool_privflags_get_rsp_free(struct ethtool_privflags_get_rsp *rsp);
+
+/*
+ * Get device private flags.
+ */
+struct ethtool_privflags_get_rsp *
+ethtool_privflags_get(struct ynl_sock *ys,
+ struct ethtool_privflags_get_req *req);
+
+/* ETHTOOL_MSG_PRIVFLAGS_GET - dump */
+struct ethtool_privflags_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_privflags_get_req_dump *
+ethtool_privflags_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_privflags_get_req_dump));
+}
+void
+ethtool_privflags_get_req_dump_free(struct ethtool_privflags_get_req_dump *req);
+
+static inline void
+ethtool_privflags_get_req_dump_set_header_dev_index(struct ethtool_privflags_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_privflags_get_req_dump_set_header_dev_name(struct ethtool_privflags_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_privflags_get_req_dump_set_header_flags(struct ethtool_privflags_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_privflags_get_list {
+ struct ethtool_privflags_get_list *next;
+ struct ethtool_privflags_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_privflags_get_list_free(struct ethtool_privflags_get_list *rsp);
+
+struct ethtool_privflags_get_list *
+ethtool_privflags_get_dump(struct ynl_sock *ys,
+ struct ethtool_privflags_get_req_dump *req);
+
+/* ETHTOOL_MSG_PRIVFLAGS_GET - notify */
+struct ethtool_privflags_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_privflags_get_ntf *ntf);
+ struct ethtool_privflags_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_privflags_get_ntf_free(struct ethtool_privflags_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_PRIVFLAGS_SET ============== */
+/* ETHTOOL_MSG_PRIVFLAGS_SET - do */
+struct ethtool_privflags_set_req {
+ struct {
+ __u32 header:1;
+ __u32 flags:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset flags;
+};
+
+static inline struct ethtool_privflags_set_req *
+ethtool_privflags_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_privflags_set_req));
+}
+void ethtool_privflags_set_req_free(struct ethtool_privflags_set_req *req);
+
+static inline void
+ethtool_privflags_set_req_set_header_dev_index(struct ethtool_privflags_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_privflags_set_req_set_header_dev_name(struct ethtool_privflags_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_privflags_set_req_set_header_flags(struct ethtool_privflags_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_privflags_set_req_set_flags_nomask(struct ethtool_privflags_set_req *req)
+{
+ req->_present.flags = 1;
+ req->flags._present.nomask = 1;
+}
+static inline void
+ethtool_privflags_set_req_set_flags_size(struct ethtool_privflags_set_req *req,
+ __u32 size)
+{
+ req->_present.flags = 1;
+ req->flags._present.size = 1;
+ req->flags.size = size;
+}
+static inline void
+__ethtool_privflags_set_req_set_flags_bits_bit(struct ethtool_privflags_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->flags.bits.bit);
+ req->flags.bits.bit = bit;
+ req->flags.bits.n_bit = n_bit;
+}
+
+/*
+ * Set device private flags.
+ */
+int ethtool_privflags_set(struct ynl_sock *ys,
+ struct ethtool_privflags_set_req *req);
+
+/* ============== ETHTOOL_MSG_RINGS_GET ============== */
+/* ETHTOOL_MSG_RINGS_GET - do */
+struct ethtool_rings_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_rings_get_req *ethtool_rings_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_rings_get_req));
+}
+void ethtool_rings_get_req_free(struct ethtool_rings_get_req *req);
+
+static inline void
+ethtool_rings_get_req_set_header_dev_index(struct ethtool_rings_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_rings_get_req_set_header_dev_name(struct ethtool_rings_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_rings_get_req_set_header_flags(struct ethtool_rings_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_rings_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 rx_max:1;
+ __u32 rx_mini_max:1;
+ __u32 rx_jumbo_max:1;
+ __u32 tx_max:1;
+ __u32 rx:1;
+ __u32 rx_mini:1;
+ __u32 rx_jumbo:1;
+ __u32 tx:1;
+ __u32 rx_buf_len:1;
+ __u32 tcp_data_split:1;
+ __u32 cqe_size:1;
+ __u32 tx_push:1;
+ __u32 rx_push:1;
+ __u32 tx_push_buf_len:1;
+ __u32 tx_push_buf_len_max:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 rx_max;
+ __u32 rx_mini_max;
+ __u32 rx_jumbo_max;
+ __u32 tx_max;
+ __u32 rx;
+ __u32 rx_mini;
+ __u32 rx_jumbo;
+ __u32 tx;
+ __u32 rx_buf_len;
+ __u8 tcp_data_split;
+ __u32 cqe_size;
+ __u8 tx_push;
+ __u8 rx_push;
+ __u32 tx_push_buf_len;
+ __u32 tx_push_buf_len_max;
+};
+
+void ethtool_rings_get_rsp_free(struct ethtool_rings_get_rsp *rsp);
+
+/*
+ * Get ring params.
+ */
+struct ethtool_rings_get_rsp *
+ethtool_rings_get(struct ynl_sock *ys, struct ethtool_rings_get_req *req);
+
+/* ETHTOOL_MSG_RINGS_GET - dump */
+struct ethtool_rings_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_rings_get_req_dump *
+ethtool_rings_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_rings_get_req_dump));
+}
+void ethtool_rings_get_req_dump_free(struct ethtool_rings_get_req_dump *req);
+
+static inline void
+ethtool_rings_get_req_dump_set_header_dev_index(struct ethtool_rings_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_rings_get_req_dump_set_header_dev_name(struct ethtool_rings_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_rings_get_req_dump_set_header_flags(struct ethtool_rings_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_rings_get_list {
+ struct ethtool_rings_get_list *next;
+ struct ethtool_rings_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_rings_get_list_free(struct ethtool_rings_get_list *rsp);
+
+struct ethtool_rings_get_list *
+ethtool_rings_get_dump(struct ynl_sock *ys,
+ struct ethtool_rings_get_req_dump *req);
+
+/* ETHTOOL_MSG_RINGS_GET - notify */
+struct ethtool_rings_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_rings_get_ntf *ntf);
+ struct ethtool_rings_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_rings_get_ntf_free(struct ethtool_rings_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_RINGS_SET ============== */
+/* ETHTOOL_MSG_RINGS_SET - do */
+struct ethtool_rings_set_req {
+ struct {
+ __u32 header:1;
+ __u32 rx_max:1;
+ __u32 rx_mini_max:1;
+ __u32 rx_jumbo_max:1;
+ __u32 tx_max:1;
+ __u32 rx:1;
+ __u32 rx_mini:1;
+ __u32 rx_jumbo:1;
+ __u32 tx:1;
+ __u32 rx_buf_len:1;
+ __u32 tcp_data_split:1;
+ __u32 cqe_size:1;
+ __u32 tx_push:1;
+ __u32 rx_push:1;
+ __u32 tx_push_buf_len:1;
+ __u32 tx_push_buf_len_max:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 rx_max;
+ __u32 rx_mini_max;
+ __u32 rx_jumbo_max;
+ __u32 tx_max;
+ __u32 rx;
+ __u32 rx_mini;
+ __u32 rx_jumbo;
+ __u32 tx;
+ __u32 rx_buf_len;
+ __u8 tcp_data_split;
+ __u32 cqe_size;
+ __u8 tx_push;
+ __u8 rx_push;
+ __u32 tx_push_buf_len;
+ __u32 tx_push_buf_len_max;
+};
+
+static inline struct ethtool_rings_set_req *ethtool_rings_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_rings_set_req));
+}
+void ethtool_rings_set_req_free(struct ethtool_rings_set_req *req);
+
+static inline void
+ethtool_rings_set_req_set_header_dev_index(struct ethtool_rings_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_rings_set_req_set_header_dev_name(struct ethtool_rings_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_rings_set_req_set_header_flags(struct ethtool_rings_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_rings_set_req_set_rx_max(struct ethtool_rings_set_req *req,
+ __u32 rx_max)
+{
+ req->_present.rx_max = 1;
+ req->rx_max = rx_max;
+}
+static inline void
+ethtool_rings_set_req_set_rx_mini_max(struct ethtool_rings_set_req *req,
+ __u32 rx_mini_max)
+{
+ req->_present.rx_mini_max = 1;
+ req->rx_mini_max = rx_mini_max;
+}
+static inline void
+ethtool_rings_set_req_set_rx_jumbo_max(struct ethtool_rings_set_req *req,
+ __u32 rx_jumbo_max)
+{
+ req->_present.rx_jumbo_max = 1;
+ req->rx_jumbo_max = rx_jumbo_max;
+}
+static inline void
+ethtool_rings_set_req_set_tx_max(struct ethtool_rings_set_req *req,
+ __u32 tx_max)
+{
+ req->_present.tx_max = 1;
+ req->tx_max = tx_max;
+}
+static inline void
+ethtool_rings_set_req_set_rx(struct ethtool_rings_set_req *req, __u32 rx)
+{
+ req->_present.rx = 1;
+ req->rx = rx;
+}
+static inline void
+ethtool_rings_set_req_set_rx_mini(struct ethtool_rings_set_req *req,
+ __u32 rx_mini)
+{
+ req->_present.rx_mini = 1;
+ req->rx_mini = rx_mini;
+}
+static inline void
+ethtool_rings_set_req_set_rx_jumbo(struct ethtool_rings_set_req *req,
+ __u32 rx_jumbo)
+{
+ req->_present.rx_jumbo = 1;
+ req->rx_jumbo = rx_jumbo;
+}
+static inline void
+ethtool_rings_set_req_set_tx(struct ethtool_rings_set_req *req, __u32 tx)
+{
+ req->_present.tx = 1;
+ req->tx = tx;
+}
+static inline void
+ethtool_rings_set_req_set_rx_buf_len(struct ethtool_rings_set_req *req,
+ __u32 rx_buf_len)
+{
+ req->_present.rx_buf_len = 1;
+ req->rx_buf_len = rx_buf_len;
+}
+static inline void
+ethtool_rings_set_req_set_tcp_data_split(struct ethtool_rings_set_req *req,
+ __u8 tcp_data_split)
+{
+ req->_present.tcp_data_split = 1;
+ req->tcp_data_split = tcp_data_split;
+}
+static inline void
+ethtool_rings_set_req_set_cqe_size(struct ethtool_rings_set_req *req,
+ __u32 cqe_size)
+{
+ req->_present.cqe_size = 1;
+ req->cqe_size = cqe_size;
+}
+static inline void
+ethtool_rings_set_req_set_tx_push(struct ethtool_rings_set_req *req,
+ __u8 tx_push)
+{
+ req->_present.tx_push = 1;
+ req->tx_push = tx_push;
+}
+static inline void
+ethtool_rings_set_req_set_rx_push(struct ethtool_rings_set_req *req,
+ __u8 rx_push)
+{
+ req->_present.rx_push = 1;
+ req->rx_push = rx_push;
+}
+static inline void
+ethtool_rings_set_req_set_tx_push_buf_len(struct ethtool_rings_set_req *req,
+ __u32 tx_push_buf_len)
+{
+ req->_present.tx_push_buf_len = 1;
+ req->tx_push_buf_len = tx_push_buf_len;
+}
+static inline void
+ethtool_rings_set_req_set_tx_push_buf_len_max(struct ethtool_rings_set_req *req,
+ __u32 tx_push_buf_len_max)
+{
+ req->_present.tx_push_buf_len_max = 1;
+ req->tx_push_buf_len_max = tx_push_buf_len_max;
+}
+
+/*
+ * Set ring params.
+ */
+int ethtool_rings_set(struct ynl_sock *ys, struct ethtool_rings_set_req *req);
+
+/* ============== ETHTOOL_MSG_CHANNELS_GET ============== */
+/* ETHTOOL_MSG_CHANNELS_GET - do */
+struct ethtool_channels_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_channels_get_req *
+ethtool_channels_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_channels_get_req));
+}
+void ethtool_channels_get_req_free(struct ethtool_channels_get_req *req);
+
+static inline void
+ethtool_channels_get_req_set_header_dev_index(struct ethtool_channels_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_channels_get_req_set_header_dev_name(struct ethtool_channels_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_channels_get_req_set_header_flags(struct ethtool_channels_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_channels_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 rx_max:1;
+ __u32 tx_max:1;
+ __u32 other_max:1;
+ __u32 combined_max:1;
+ __u32 rx_count:1;
+ __u32 tx_count:1;
+ __u32 other_count:1;
+ __u32 combined_count:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 rx_max;
+ __u32 tx_max;
+ __u32 other_max;
+ __u32 combined_max;
+ __u32 rx_count;
+ __u32 tx_count;
+ __u32 other_count;
+ __u32 combined_count;
+};
+
+void ethtool_channels_get_rsp_free(struct ethtool_channels_get_rsp *rsp);
+
+/*
+ * Get channel params.
+ */
+struct ethtool_channels_get_rsp *
+ethtool_channels_get(struct ynl_sock *ys, struct ethtool_channels_get_req *req);
+
+/* ETHTOOL_MSG_CHANNELS_GET - dump */
+struct ethtool_channels_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_channels_get_req_dump *
+ethtool_channels_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_channels_get_req_dump));
+}
+void
+ethtool_channels_get_req_dump_free(struct ethtool_channels_get_req_dump *req);
+
+static inline void
+ethtool_channels_get_req_dump_set_header_dev_index(struct ethtool_channels_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_channels_get_req_dump_set_header_dev_name(struct ethtool_channels_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_channels_get_req_dump_set_header_flags(struct ethtool_channels_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_channels_get_list {
+ struct ethtool_channels_get_list *next;
+ struct ethtool_channels_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_channels_get_list_free(struct ethtool_channels_get_list *rsp);
+
+struct ethtool_channels_get_list *
+ethtool_channels_get_dump(struct ynl_sock *ys,
+ struct ethtool_channels_get_req_dump *req);
+
+/* ETHTOOL_MSG_CHANNELS_GET - notify */
+struct ethtool_channels_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_channels_get_ntf *ntf);
+ struct ethtool_channels_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_channels_get_ntf_free(struct ethtool_channels_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_CHANNELS_SET ============== */
+/* ETHTOOL_MSG_CHANNELS_SET - do */
+struct ethtool_channels_set_req {
+ struct {
+ __u32 header:1;
+ __u32 rx_max:1;
+ __u32 tx_max:1;
+ __u32 other_max:1;
+ __u32 combined_max:1;
+ __u32 rx_count:1;
+ __u32 tx_count:1;
+ __u32 other_count:1;
+ __u32 combined_count:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 rx_max;
+ __u32 tx_max;
+ __u32 other_max;
+ __u32 combined_max;
+ __u32 rx_count;
+ __u32 tx_count;
+ __u32 other_count;
+ __u32 combined_count;
+};
+
+static inline struct ethtool_channels_set_req *
+ethtool_channels_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_channels_set_req));
+}
+void ethtool_channels_set_req_free(struct ethtool_channels_set_req *req);
+
+static inline void
+ethtool_channels_set_req_set_header_dev_index(struct ethtool_channels_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_channels_set_req_set_header_dev_name(struct ethtool_channels_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_channels_set_req_set_header_flags(struct ethtool_channels_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_channels_set_req_set_rx_max(struct ethtool_channels_set_req *req,
+ __u32 rx_max)
+{
+ req->_present.rx_max = 1;
+ req->rx_max = rx_max;
+}
+static inline void
+ethtool_channels_set_req_set_tx_max(struct ethtool_channels_set_req *req,
+ __u32 tx_max)
+{
+ req->_present.tx_max = 1;
+ req->tx_max = tx_max;
+}
+static inline void
+ethtool_channels_set_req_set_other_max(struct ethtool_channels_set_req *req,
+ __u32 other_max)
+{
+ req->_present.other_max = 1;
+ req->other_max = other_max;
+}
+static inline void
+ethtool_channels_set_req_set_combined_max(struct ethtool_channels_set_req *req,
+ __u32 combined_max)
+{
+ req->_present.combined_max = 1;
+ req->combined_max = combined_max;
+}
+static inline void
+ethtool_channels_set_req_set_rx_count(struct ethtool_channels_set_req *req,
+ __u32 rx_count)
+{
+ req->_present.rx_count = 1;
+ req->rx_count = rx_count;
+}
+static inline void
+ethtool_channels_set_req_set_tx_count(struct ethtool_channels_set_req *req,
+ __u32 tx_count)
+{
+ req->_present.tx_count = 1;
+ req->tx_count = tx_count;
+}
+static inline void
+ethtool_channels_set_req_set_other_count(struct ethtool_channels_set_req *req,
+ __u32 other_count)
+{
+ req->_present.other_count = 1;
+ req->other_count = other_count;
+}
+static inline void
+ethtool_channels_set_req_set_combined_count(struct ethtool_channels_set_req *req,
+ __u32 combined_count)
+{
+ req->_present.combined_count = 1;
+ req->combined_count = combined_count;
+}
+
+/*
+ * Set channel params.
+ */
+int ethtool_channels_set(struct ynl_sock *ys,
+ struct ethtool_channels_set_req *req);
+
+/* ============== ETHTOOL_MSG_COALESCE_GET ============== */
+/* ETHTOOL_MSG_COALESCE_GET - do */
+struct ethtool_coalesce_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_coalesce_get_req *
+ethtool_coalesce_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_coalesce_get_req));
+}
+void ethtool_coalesce_get_req_free(struct ethtool_coalesce_get_req *req);
+
+static inline void
+ethtool_coalesce_get_req_set_header_dev_index(struct ethtool_coalesce_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_coalesce_get_req_set_header_dev_name(struct ethtool_coalesce_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_coalesce_get_req_set_header_flags(struct ethtool_coalesce_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_coalesce_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 rx_usecs:1;
+ __u32 rx_max_frames:1;
+ __u32 rx_usecs_irq:1;
+ __u32 rx_max_frames_irq:1;
+ __u32 tx_usecs:1;
+ __u32 tx_max_frames:1;
+ __u32 tx_usecs_irq:1;
+ __u32 tx_max_frames_irq:1;
+ __u32 stats_block_usecs:1;
+ __u32 use_adaptive_rx:1;
+ __u32 use_adaptive_tx:1;
+ __u32 pkt_rate_low:1;
+ __u32 rx_usecs_low:1;
+ __u32 rx_max_frames_low:1;
+ __u32 tx_usecs_low:1;
+ __u32 tx_max_frames_low:1;
+ __u32 pkt_rate_high:1;
+ __u32 rx_usecs_high:1;
+ __u32 rx_max_frames_high:1;
+ __u32 tx_usecs_high:1;
+ __u32 tx_max_frames_high:1;
+ __u32 rate_sample_interval:1;
+ __u32 use_cqe_mode_tx:1;
+ __u32 use_cqe_mode_rx:1;
+ __u32 tx_aggr_max_bytes:1;
+ __u32 tx_aggr_max_frames:1;
+ __u32 tx_aggr_time_usecs:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 rx_usecs;
+ __u32 rx_max_frames;
+ __u32 rx_usecs_irq;
+ __u32 rx_max_frames_irq;
+ __u32 tx_usecs;
+ __u32 tx_max_frames;
+ __u32 tx_usecs_irq;
+ __u32 tx_max_frames_irq;
+ __u32 stats_block_usecs;
+ __u8 use_adaptive_rx;
+ __u8 use_adaptive_tx;
+ __u32 pkt_rate_low;
+ __u32 rx_usecs_low;
+ __u32 rx_max_frames_low;
+ __u32 tx_usecs_low;
+ __u32 tx_max_frames_low;
+ __u32 pkt_rate_high;
+ __u32 rx_usecs_high;
+ __u32 rx_max_frames_high;
+ __u32 tx_usecs_high;
+ __u32 tx_max_frames_high;
+ __u32 rate_sample_interval;
+ __u8 use_cqe_mode_tx;
+ __u8 use_cqe_mode_rx;
+ __u32 tx_aggr_max_bytes;
+ __u32 tx_aggr_max_frames;
+ __u32 tx_aggr_time_usecs;
+};
+
+void ethtool_coalesce_get_rsp_free(struct ethtool_coalesce_get_rsp *rsp);
+
+/*
+ * Get coalesce params.
+ */
+struct ethtool_coalesce_get_rsp *
+ethtool_coalesce_get(struct ynl_sock *ys, struct ethtool_coalesce_get_req *req);
+
+/* ETHTOOL_MSG_COALESCE_GET - dump */
+struct ethtool_coalesce_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_coalesce_get_req_dump *
+ethtool_coalesce_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_coalesce_get_req_dump));
+}
+void
+ethtool_coalesce_get_req_dump_free(struct ethtool_coalesce_get_req_dump *req);
+
+static inline void
+ethtool_coalesce_get_req_dump_set_header_dev_index(struct ethtool_coalesce_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_coalesce_get_req_dump_set_header_dev_name(struct ethtool_coalesce_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_coalesce_get_req_dump_set_header_flags(struct ethtool_coalesce_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_coalesce_get_list {
+ struct ethtool_coalesce_get_list *next;
+ struct ethtool_coalesce_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_coalesce_get_list_free(struct ethtool_coalesce_get_list *rsp);
+
+struct ethtool_coalesce_get_list *
+ethtool_coalesce_get_dump(struct ynl_sock *ys,
+ struct ethtool_coalesce_get_req_dump *req);
+
+/* ETHTOOL_MSG_COALESCE_GET - notify */
+struct ethtool_coalesce_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_coalesce_get_ntf *ntf);
+ struct ethtool_coalesce_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_coalesce_get_ntf_free(struct ethtool_coalesce_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_COALESCE_SET ============== */
+/* ETHTOOL_MSG_COALESCE_SET - do */
+struct ethtool_coalesce_set_req {
+ struct {
+ __u32 header:1;
+ __u32 rx_usecs:1;
+ __u32 rx_max_frames:1;
+ __u32 rx_usecs_irq:1;
+ __u32 rx_max_frames_irq:1;
+ __u32 tx_usecs:1;
+ __u32 tx_max_frames:1;
+ __u32 tx_usecs_irq:1;
+ __u32 tx_max_frames_irq:1;
+ __u32 stats_block_usecs:1;
+ __u32 use_adaptive_rx:1;
+ __u32 use_adaptive_tx:1;
+ __u32 pkt_rate_low:1;
+ __u32 rx_usecs_low:1;
+ __u32 rx_max_frames_low:1;
+ __u32 tx_usecs_low:1;
+ __u32 tx_max_frames_low:1;
+ __u32 pkt_rate_high:1;
+ __u32 rx_usecs_high:1;
+ __u32 rx_max_frames_high:1;
+ __u32 tx_usecs_high:1;
+ __u32 tx_max_frames_high:1;
+ __u32 rate_sample_interval:1;
+ __u32 use_cqe_mode_tx:1;
+ __u32 use_cqe_mode_rx:1;
+ __u32 tx_aggr_max_bytes:1;
+ __u32 tx_aggr_max_frames:1;
+ __u32 tx_aggr_time_usecs:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 rx_usecs;
+ __u32 rx_max_frames;
+ __u32 rx_usecs_irq;
+ __u32 rx_max_frames_irq;
+ __u32 tx_usecs;
+ __u32 tx_max_frames;
+ __u32 tx_usecs_irq;
+ __u32 tx_max_frames_irq;
+ __u32 stats_block_usecs;
+ __u8 use_adaptive_rx;
+ __u8 use_adaptive_tx;
+ __u32 pkt_rate_low;
+ __u32 rx_usecs_low;
+ __u32 rx_max_frames_low;
+ __u32 tx_usecs_low;
+ __u32 tx_max_frames_low;
+ __u32 pkt_rate_high;
+ __u32 rx_usecs_high;
+ __u32 rx_max_frames_high;
+ __u32 tx_usecs_high;
+ __u32 tx_max_frames_high;
+ __u32 rate_sample_interval;
+ __u8 use_cqe_mode_tx;
+ __u8 use_cqe_mode_rx;
+ __u32 tx_aggr_max_bytes;
+ __u32 tx_aggr_max_frames;
+ __u32 tx_aggr_time_usecs;
+};
+
+static inline struct ethtool_coalesce_set_req *
+ethtool_coalesce_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_coalesce_set_req));
+}
+void ethtool_coalesce_set_req_free(struct ethtool_coalesce_set_req *req);
+
+static inline void
+ethtool_coalesce_set_req_set_header_dev_index(struct ethtool_coalesce_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_coalesce_set_req_set_header_dev_name(struct ethtool_coalesce_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_coalesce_set_req_set_header_flags(struct ethtool_coalesce_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_usecs(struct ethtool_coalesce_set_req *req,
+ __u32 rx_usecs)
+{
+ req->_present.rx_usecs = 1;
+ req->rx_usecs = rx_usecs;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_max_frames(struct ethtool_coalesce_set_req *req,
+ __u32 rx_max_frames)
+{
+ req->_present.rx_max_frames = 1;
+ req->rx_max_frames = rx_max_frames;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_usecs_irq(struct ethtool_coalesce_set_req *req,
+ __u32 rx_usecs_irq)
+{
+ req->_present.rx_usecs_irq = 1;
+ req->rx_usecs_irq = rx_usecs_irq;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_max_frames_irq(struct ethtool_coalesce_set_req *req,
+ __u32 rx_max_frames_irq)
+{
+ req->_present.rx_max_frames_irq = 1;
+ req->rx_max_frames_irq = rx_max_frames_irq;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_usecs(struct ethtool_coalesce_set_req *req,
+ __u32 tx_usecs)
+{
+ req->_present.tx_usecs = 1;
+ req->tx_usecs = tx_usecs;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_max_frames(struct ethtool_coalesce_set_req *req,
+ __u32 tx_max_frames)
+{
+ req->_present.tx_max_frames = 1;
+ req->tx_max_frames = tx_max_frames;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_usecs_irq(struct ethtool_coalesce_set_req *req,
+ __u32 tx_usecs_irq)
+{
+ req->_present.tx_usecs_irq = 1;
+ req->tx_usecs_irq = tx_usecs_irq;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_max_frames_irq(struct ethtool_coalesce_set_req *req,
+ __u32 tx_max_frames_irq)
+{
+ req->_present.tx_max_frames_irq = 1;
+ req->tx_max_frames_irq = tx_max_frames_irq;
+}
+static inline void
+ethtool_coalesce_set_req_set_stats_block_usecs(struct ethtool_coalesce_set_req *req,
+ __u32 stats_block_usecs)
+{
+ req->_present.stats_block_usecs = 1;
+ req->stats_block_usecs = stats_block_usecs;
+}
+static inline void
+ethtool_coalesce_set_req_set_use_adaptive_rx(struct ethtool_coalesce_set_req *req,
+ __u8 use_adaptive_rx)
+{
+ req->_present.use_adaptive_rx = 1;
+ req->use_adaptive_rx = use_adaptive_rx;
+}
+static inline void
+ethtool_coalesce_set_req_set_use_adaptive_tx(struct ethtool_coalesce_set_req *req,
+ __u8 use_adaptive_tx)
+{
+ req->_present.use_adaptive_tx = 1;
+ req->use_adaptive_tx = use_adaptive_tx;
+}
+static inline void
+ethtool_coalesce_set_req_set_pkt_rate_low(struct ethtool_coalesce_set_req *req,
+ __u32 pkt_rate_low)
+{
+ req->_present.pkt_rate_low = 1;
+ req->pkt_rate_low = pkt_rate_low;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_usecs_low(struct ethtool_coalesce_set_req *req,
+ __u32 rx_usecs_low)
+{
+ req->_present.rx_usecs_low = 1;
+ req->rx_usecs_low = rx_usecs_low;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_max_frames_low(struct ethtool_coalesce_set_req *req,
+ __u32 rx_max_frames_low)
+{
+ req->_present.rx_max_frames_low = 1;
+ req->rx_max_frames_low = rx_max_frames_low;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_usecs_low(struct ethtool_coalesce_set_req *req,
+ __u32 tx_usecs_low)
+{
+ req->_present.tx_usecs_low = 1;
+ req->tx_usecs_low = tx_usecs_low;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_max_frames_low(struct ethtool_coalesce_set_req *req,
+ __u32 tx_max_frames_low)
+{
+ req->_present.tx_max_frames_low = 1;
+ req->tx_max_frames_low = tx_max_frames_low;
+}
+static inline void
+ethtool_coalesce_set_req_set_pkt_rate_high(struct ethtool_coalesce_set_req *req,
+ __u32 pkt_rate_high)
+{
+ req->_present.pkt_rate_high = 1;
+ req->pkt_rate_high = pkt_rate_high;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_usecs_high(struct ethtool_coalesce_set_req *req,
+ __u32 rx_usecs_high)
+{
+ req->_present.rx_usecs_high = 1;
+ req->rx_usecs_high = rx_usecs_high;
+}
+static inline void
+ethtool_coalesce_set_req_set_rx_max_frames_high(struct ethtool_coalesce_set_req *req,
+ __u32 rx_max_frames_high)
+{
+ req->_present.rx_max_frames_high = 1;
+ req->rx_max_frames_high = rx_max_frames_high;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_usecs_high(struct ethtool_coalesce_set_req *req,
+ __u32 tx_usecs_high)
+{
+ req->_present.tx_usecs_high = 1;
+ req->tx_usecs_high = tx_usecs_high;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_max_frames_high(struct ethtool_coalesce_set_req *req,
+ __u32 tx_max_frames_high)
+{
+ req->_present.tx_max_frames_high = 1;
+ req->tx_max_frames_high = tx_max_frames_high;
+}
+static inline void
+ethtool_coalesce_set_req_set_rate_sample_interval(struct ethtool_coalesce_set_req *req,
+ __u32 rate_sample_interval)
+{
+ req->_present.rate_sample_interval = 1;
+ req->rate_sample_interval = rate_sample_interval;
+}
+static inline void
+ethtool_coalesce_set_req_set_use_cqe_mode_tx(struct ethtool_coalesce_set_req *req,
+ __u8 use_cqe_mode_tx)
+{
+ req->_present.use_cqe_mode_tx = 1;
+ req->use_cqe_mode_tx = use_cqe_mode_tx;
+}
+static inline void
+ethtool_coalesce_set_req_set_use_cqe_mode_rx(struct ethtool_coalesce_set_req *req,
+ __u8 use_cqe_mode_rx)
+{
+ req->_present.use_cqe_mode_rx = 1;
+ req->use_cqe_mode_rx = use_cqe_mode_rx;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_aggr_max_bytes(struct ethtool_coalesce_set_req *req,
+ __u32 tx_aggr_max_bytes)
+{
+ req->_present.tx_aggr_max_bytes = 1;
+ req->tx_aggr_max_bytes = tx_aggr_max_bytes;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_aggr_max_frames(struct ethtool_coalesce_set_req *req,
+ __u32 tx_aggr_max_frames)
+{
+ req->_present.tx_aggr_max_frames = 1;
+ req->tx_aggr_max_frames = tx_aggr_max_frames;
+}
+static inline void
+ethtool_coalesce_set_req_set_tx_aggr_time_usecs(struct ethtool_coalesce_set_req *req,
+ __u32 tx_aggr_time_usecs)
+{
+ req->_present.tx_aggr_time_usecs = 1;
+ req->tx_aggr_time_usecs = tx_aggr_time_usecs;
+}
+
+/*
+ * Set coalesce params.
+ */
+int ethtool_coalesce_set(struct ynl_sock *ys,
+ struct ethtool_coalesce_set_req *req);
+
+/* ============== ETHTOOL_MSG_PAUSE_GET ============== */
+/* ETHTOOL_MSG_PAUSE_GET - do */
+struct ethtool_pause_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_pause_get_req *ethtool_pause_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_pause_get_req));
+}
+void ethtool_pause_get_req_free(struct ethtool_pause_get_req *req);
+
+static inline void
+ethtool_pause_get_req_set_header_dev_index(struct ethtool_pause_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_pause_get_req_set_header_dev_name(struct ethtool_pause_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_pause_get_req_set_header_flags(struct ethtool_pause_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_pause_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 autoneg:1;
+ __u32 rx:1;
+ __u32 tx:1;
+ __u32 stats:1;
+ __u32 stats_src:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 autoneg;
+ __u8 rx;
+ __u8 tx;
+ struct ethtool_pause_stat stats;
+ __u32 stats_src;
+};
+
+void ethtool_pause_get_rsp_free(struct ethtool_pause_get_rsp *rsp);
+
+/*
+ * Get pause params.
+ */
+struct ethtool_pause_get_rsp *
+ethtool_pause_get(struct ynl_sock *ys, struct ethtool_pause_get_req *req);
+
+/* ETHTOOL_MSG_PAUSE_GET - dump */
+struct ethtool_pause_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_pause_get_req_dump *
+ethtool_pause_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_pause_get_req_dump));
+}
+void ethtool_pause_get_req_dump_free(struct ethtool_pause_get_req_dump *req);
+
+static inline void
+ethtool_pause_get_req_dump_set_header_dev_index(struct ethtool_pause_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_pause_get_req_dump_set_header_dev_name(struct ethtool_pause_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_pause_get_req_dump_set_header_flags(struct ethtool_pause_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_pause_get_list {
+ struct ethtool_pause_get_list *next;
+ struct ethtool_pause_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_pause_get_list_free(struct ethtool_pause_get_list *rsp);
+
+struct ethtool_pause_get_list *
+ethtool_pause_get_dump(struct ynl_sock *ys,
+ struct ethtool_pause_get_req_dump *req);
+
+/* ETHTOOL_MSG_PAUSE_GET - notify */
+struct ethtool_pause_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_pause_get_ntf *ntf);
+ struct ethtool_pause_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_pause_get_ntf_free(struct ethtool_pause_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_PAUSE_SET ============== */
+/* ETHTOOL_MSG_PAUSE_SET - do */
+struct ethtool_pause_set_req {
+ struct {
+ __u32 header:1;
+ __u32 autoneg:1;
+ __u32 rx:1;
+ __u32 tx:1;
+ __u32 stats:1;
+ __u32 stats_src:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 autoneg;
+ __u8 rx;
+ __u8 tx;
+ struct ethtool_pause_stat stats;
+ __u32 stats_src;
+};
+
+static inline struct ethtool_pause_set_req *ethtool_pause_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_pause_set_req));
+}
+void ethtool_pause_set_req_free(struct ethtool_pause_set_req *req);
+
+static inline void
+ethtool_pause_set_req_set_header_dev_index(struct ethtool_pause_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_pause_set_req_set_header_dev_name(struct ethtool_pause_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_pause_set_req_set_header_flags(struct ethtool_pause_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_pause_set_req_set_autoneg(struct ethtool_pause_set_req *req,
+ __u8 autoneg)
+{
+ req->_present.autoneg = 1;
+ req->autoneg = autoneg;
+}
+static inline void
+ethtool_pause_set_req_set_rx(struct ethtool_pause_set_req *req, __u8 rx)
+{
+ req->_present.rx = 1;
+ req->rx = rx;
+}
+static inline void
+ethtool_pause_set_req_set_tx(struct ethtool_pause_set_req *req, __u8 tx)
+{
+ req->_present.tx = 1;
+ req->tx = tx;
+}
+static inline void
+ethtool_pause_set_req_set_stats_tx_frames(struct ethtool_pause_set_req *req,
+ __u64 tx_frames)
+{
+ req->_present.stats = 1;
+ req->stats._present.tx_frames = 1;
+ req->stats.tx_frames = tx_frames;
+}
+static inline void
+ethtool_pause_set_req_set_stats_rx_frames(struct ethtool_pause_set_req *req,
+ __u64 rx_frames)
+{
+ req->_present.stats = 1;
+ req->stats._present.rx_frames = 1;
+ req->stats.rx_frames = rx_frames;
+}
+static inline void
+ethtool_pause_set_req_set_stats_src(struct ethtool_pause_set_req *req,
+ __u32 stats_src)
+{
+ req->_present.stats_src = 1;
+ req->stats_src = stats_src;
+}
+
+/*
+ * Set pause params.
+ */
+int ethtool_pause_set(struct ynl_sock *ys, struct ethtool_pause_set_req *req);
+
+/* ============== ETHTOOL_MSG_EEE_GET ============== */
+/* ETHTOOL_MSG_EEE_GET - do */
+struct ethtool_eee_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_eee_get_req *ethtool_eee_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_eee_get_req));
+}
+void ethtool_eee_get_req_free(struct ethtool_eee_get_req *req);
+
+static inline void
+ethtool_eee_get_req_set_header_dev_index(struct ethtool_eee_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_eee_get_req_set_header_dev_name(struct ethtool_eee_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_eee_get_req_set_header_flags(struct ethtool_eee_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_eee_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 modes_ours:1;
+ __u32 modes_peer:1;
+ __u32 active:1;
+ __u32 enabled:1;
+ __u32 tx_lpi_enabled:1;
+ __u32 tx_lpi_timer:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset modes_ours;
+ struct ethtool_bitset modes_peer;
+ __u8 active;
+ __u8 enabled;
+ __u8 tx_lpi_enabled;
+ __u32 tx_lpi_timer;
+};
+
+void ethtool_eee_get_rsp_free(struct ethtool_eee_get_rsp *rsp);
+
+/*
+ * Get eee params.
+ */
+struct ethtool_eee_get_rsp *
+ethtool_eee_get(struct ynl_sock *ys, struct ethtool_eee_get_req *req);
+
+/* ETHTOOL_MSG_EEE_GET - dump */
+struct ethtool_eee_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_eee_get_req_dump *
+ethtool_eee_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_eee_get_req_dump));
+}
+void ethtool_eee_get_req_dump_free(struct ethtool_eee_get_req_dump *req);
+
+static inline void
+ethtool_eee_get_req_dump_set_header_dev_index(struct ethtool_eee_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_eee_get_req_dump_set_header_dev_name(struct ethtool_eee_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_eee_get_req_dump_set_header_flags(struct ethtool_eee_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_eee_get_list {
+ struct ethtool_eee_get_list *next;
+ struct ethtool_eee_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_eee_get_list_free(struct ethtool_eee_get_list *rsp);
+
+struct ethtool_eee_get_list *
+ethtool_eee_get_dump(struct ynl_sock *ys, struct ethtool_eee_get_req_dump *req);
+
+/* ETHTOOL_MSG_EEE_GET - notify */
+struct ethtool_eee_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_eee_get_ntf *ntf);
+ struct ethtool_eee_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_eee_get_ntf_free(struct ethtool_eee_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_EEE_SET ============== */
+/* ETHTOOL_MSG_EEE_SET - do */
+struct ethtool_eee_set_req {
+ struct {
+ __u32 header:1;
+ __u32 modes_ours:1;
+ __u32 modes_peer:1;
+ __u32 active:1;
+ __u32 enabled:1;
+ __u32 tx_lpi_enabled:1;
+ __u32 tx_lpi_timer:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset modes_ours;
+ struct ethtool_bitset modes_peer;
+ __u8 active;
+ __u8 enabled;
+ __u8 tx_lpi_enabled;
+ __u32 tx_lpi_timer;
+};
+
+static inline struct ethtool_eee_set_req *ethtool_eee_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_eee_set_req));
+}
+void ethtool_eee_set_req_free(struct ethtool_eee_set_req *req);
+
+static inline void
+ethtool_eee_set_req_set_header_dev_index(struct ethtool_eee_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_eee_set_req_set_header_dev_name(struct ethtool_eee_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_eee_set_req_set_header_flags(struct ethtool_eee_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_eee_set_req_set_modes_ours_nomask(struct ethtool_eee_set_req *req)
+{
+ req->_present.modes_ours = 1;
+ req->modes_ours._present.nomask = 1;
+}
+static inline void
+ethtool_eee_set_req_set_modes_ours_size(struct ethtool_eee_set_req *req,
+ __u32 size)
+{
+ req->_present.modes_ours = 1;
+ req->modes_ours._present.size = 1;
+ req->modes_ours.size = size;
+}
+static inline void
+__ethtool_eee_set_req_set_modes_ours_bits_bit(struct ethtool_eee_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->modes_ours.bits.bit);
+ req->modes_ours.bits.bit = bit;
+ req->modes_ours.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_eee_set_req_set_modes_peer_nomask(struct ethtool_eee_set_req *req)
+{
+ req->_present.modes_peer = 1;
+ req->modes_peer._present.nomask = 1;
+}
+static inline void
+ethtool_eee_set_req_set_modes_peer_size(struct ethtool_eee_set_req *req,
+ __u32 size)
+{
+ req->_present.modes_peer = 1;
+ req->modes_peer._present.size = 1;
+ req->modes_peer.size = size;
+}
+static inline void
+__ethtool_eee_set_req_set_modes_peer_bits_bit(struct ethtool_eee_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->modes_peer.bits.bit);
+ req->modes_peer.bits.bit = bit;
+ req->modes_peer.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_eee_set_req_set_active(struct ethtool_eee_set_req *req, __u8 active)
+{
+ req->_present.active = 1;
+ req->active = active;
+}
+static inline void
+ethtool_eee_set_req_set_enabled(struct ethtool_eee_set_req *req, __u8 enabled)
+{
+ req->_present.enabled = 1;
+ req->enabled = enabled;
+}
+static inline void
+ethtool_eee_set_req_set_tx_lpi_enabled(struct ethtool_eee_set_req *req,
+ __u8 tx_lpi_enabled)
+{
+ req->_present.tx_lpi_enabled = 1;
+ req->tx_lpi_enabled = tx_lpi_enabled;
+}
+static inline void
+ethtool_eee_set_req_set_tx_lpi_timer(struct ethtool_eee_set_req *req,
+ __u32 tx_lpi_timer)
+{
+ req->_present.tx_lpi_timer = 1;
+ req->tx_lpi_timer = tx_lpi_timer;
+}
+
+/*
+ * Set eee params.
+ */
+int ethtool_eee_set(struct ynl_sock *ys, struct ethtool_eee_set_req *req);
+
+/* ============== ETHTOOL_MSG_TSINFO_GET ============== */
+/* ETHTOOL_MSG_TSINFO_GET - do */
+struct ethtool_tsinfo_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_tsinfo_get_req *ethtool_tsinfo_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_tsinfo_get_req));
+}
+void ethtool_tsinfo_get_req_free(struct ethtool_tsinfo_get_req *req);
+
+static inline void
+ethtool_tsinfo_get_req_set_header_dev_index(struct ethtool_tsinfo_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_tsinfo_get_req_set_header_dev_name(struct ethtool_tsinfo_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_tsinfo_get_req_set_header_flags(struct ethtool_tsinfo_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_tsinfo_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 timestamping:1;
+ __u32 tx_types:1;
+ __u32 rx_filters:1;
+ __u32 phc_index:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset timestamping;
+ struct ethtool_bitset tx_types;
+ struct ethtool_bitset rx_filters;
+ __u32 phc_index;
+};
+
+void ethtool_tsinfo_get_rsp_free(struct ethtool_tsinfo_get_rsp *rsp);
+
+/*
+ * Get tsinfo params.
+ */
+struct ethtool_tsinfo_get_rsp *
+ethtool_tsinfo_get(struct ynl_sock *ys, struct ethtool_tsinfo_get_req *req);
+
+/* ETHTOOL_MSG_TSINFO_GET - dump */
+struct ethtool_tsinfo_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_tsinfo_get_req_dump *
+ethtool_tsinfo_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_tsinfo_get_req_dump));
+}
+void ethtool_tsinfo_get_req_dump_free(struct ethtool_tsinfo_get_req_dump *req);
+
+static inline void
+ethtool_tsinfo_get_req_dump_set_header_dev_index(struct ethtool_tsinfo_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_tsinfo_get_req_dump_set_header_dev_name(struct ethtool_tsinfo_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_tsinfo_get_req_dump_set_header_flags(struct ethtool_tsinfo_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_tsinfo_get_list {
+ struct ethtool_tsinfo_get_list *next;
+ struct ethtool_tsinfo_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_tsinfo_get_list_free(struct ethtool_tsinfo_get_list *rsp);
+
+struct ethtool_tsinfo_get_list *
+ethtool_tsinfo_get_dump(struct ynl_sock *ys,
+ struct ethtool_tsinfo_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_CABLE_TEST_ACT ============== */
+/* ETHTOOL_MSG_CABLE_TEST_ACT - do */
+struct ethtool_cable_test_act_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_cable_test_act_req *
+ethtool_cable_test_act_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_cable_test_act_req));
+}
+void ethtool_cable_test_act_req_free(struct ethtool_cable_test_act_req *req);
+
+static inline void
+ethtool_cable_test_act_req_set_header_dev_index(struct ethtool_cable_test_act_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_cable_test_act_req_set_header_dev_name(struct ethtool_cable_test_act_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_cable_test_act_req_set_header_flags(struct ethtool_cable_test_act_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+/*
+ * Cable test.
+ */
+int ethtool_cable_test_act(struct ynl_sock *ys,
+ struct ethtool_cable_test_act_req *req);
+
+/* ============== ETHTOOL_MSG_CABLE_TEST_TDR_ACT ============== */
+/* ETHTOOL_MSG_CABLE_TEST_TDR_ACT - do */
+struct ethtool_cable_test_tdr_act_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_cable_test_tdr_act_req *
+ethtool_cable_test_tdr_act_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_cable_test_tdr_act_req));
+}
+void
+ethtool_cable_test_tdr_act_req_free(struct ethtool_cable_test_tdr_act_req *req);
+
+static inline void
+ethtool_cable_test_tdr_act_req_set_header_dev_index(struct ethtool_cable_test_tdr_act_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_cable_test_tdr_act_req_set_header_dev_name(struct ethtool_cable_test_tdr_act_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_cable_test_tdr_act_req_set_header_flags(struct ethtool_cable_test_tdr_act_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+/*
+ * Cable test TDR.
+ */
+int ethtool_cable_test_tdr_act(struct ynl_sock *ys,
+ struct ethtool_cable_test_tdr_act_req *req);
+
+/* ============== ETHTOOL_MSG_TUNNEL_INFO_GET ============== */
+/* ETHTOOL_MSG_TUNNEL_INFO_GET - do */
+struct ethtool_tunnel_info_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_tunnel_info_get_req *
+ethtool_tunnel_info_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_tunnel_info_get_req));
+}
+void ethtool_tunnel_info_get_req_free(struct ethtool_tunnel_info_get_req *req);
+
+static inline void
+ethtool_tunnel_info_get_req_set_header_dev_index(struct ethtool_tunnel_info_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_tunnel_info_get_req_set_header_dev_name(struct ethtool_tunnel_info_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_tunnel_info_get_req_set_header_flags(struct ethtool_tunnel_info_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_tunnel_info_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 udp_ports:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_tunnel_udp udp_ports;
+};
+
+void ethtool_tunnel_info_get_rsp_free(struct ethtool_tunnel_info_get_rsp *rsp);
+
+/*
+ * Get tsinfo params.
+ */
+struct ethtool_tunnel_info_get_rsp *
+ethtool_tunnel_info_get(struct ynl_sock *ys,
+ struct ethtool_tunnel_info_get_req *req);
+
+/* ETHTOOL_MSG_TUNNEL_INFO_GET - dump */
+struct ethtool_tunnel_info_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_tunnel_info_get_req_dump *
+ethtool_tunnel_info_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_tunnel_info_get_req_dump));
+}
+void
+ethtool_tunnel_info_get_req_dump_free(struct ethtool_tunnel_info_get_req_dump *req);
+
+static inline void
+ethtool_tunnel_info_get_req_dump_set_header_dev_index(struct ethtool_tunnel_info_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_tunnel_info_get_req_dump_set_header_dev_name(struct ethtool_tunnel_info_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_tunnel_info_get_req_dump_set_header_flags(struct ethtool_tunnel_info_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_tunnel_info_get_list {
+ struct ethtool_tunnel_info_get_list *next;
+ struct ethtool_tunnel_info_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+ethtool_tunnel_info_get_list_free(struct ethtool_tunnel_info_get_list *rsp);
+
+struct ethtool_tunnel_info_get_list *
+ethtool_tunnel_info_get_dump(struct ynl_sock *ys,
+ struct ethtool_tunnel_info_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_FEC_GET ============== */
+/* ETHTOOL_MSG_FEC_GET - do */
+struct ethtool_fec_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_fec_get_req *ethtool_fec_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_fec_get_req));
+}
+void ethtool_fec_get_req_free(struct ethtool_fec_get_req *req);
+
+static inline void
+ethtool_fec_get_req_set_header_dev_index(struct ethtool_fec_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_fec_get_req_set_header_dev_name(struct ethtool_fec_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_fec_get_req_set_header_flags(struct ethtool_fec_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_fec_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 modes:1;
+ __u32 auto_:1;
+ __u32 active:1;
+ __u32 stats:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset modes;
+ __u8 auto_;
+ __u32 active;
+ struct ethtool_fec_stat stats;
+};
+
+void ethtool_fec_get_rsp_free(struct ethtool_fec_get_rsp *rsp);
+
+/*
+ * Get FEC params.
+ */
+struct ethtool_fec_get_rsp *
+ethtool_fec_get(struct ynl_sock *ys, struct ethtool_fec_get_req *req);
+
+/* ETHTOOL_MSG_FEC_GET - dump */
+struct ethtool_fec_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_fec_get_req_dump *
+ethtool_fec_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_fec_get_req_dump));
+}
+void ethtool_fec_get_req_dump_free(struct ethtool_fec_get_req_dump *req);
+
+static inline void
+ethtool_fec_get_req_dump_set_header_dev_index(struct ethtool_fec_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_fec_get_req_dump_set_header_dev_name(struct ethtool_fec_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_fec_get_req_dump_set_header_flags(struct ethtool_fec_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_fec_get_list {
+ struct ethtool_fec_get_list *next;
+ struct ethtool_fec_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_fec_get_list_free(struct ethtool_fec_get_list *rsp);
+
+struct ethtool_fec_get_list *
+ethtool_fec_get_dump(struct ynl_sock *ys, struct ethtool_fec_get_req_dump *req);
+
+/* ETHTOOL_MSG_FEC_GET - notify */
+struct ethtool_fec_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_fec_get_ntf *ntf);
+ struct ethtool_fec_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_fec_get_ntf_free(struct ethtool_fec_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_FEC_SET ============== */
+/* ETHTOOL_MSG_FEC_SET - do */
+struct ethtool_fec_set_req {
+ struct {
+ __u32 header:1;
+ __u32 modes:1;
+ __u32 auto_:1;
+ __u32 active:1;
+ __u32 stats:1;
+ } _present;
+
+ struct ethtool_header header;
+ struct ethtool_bitset modes;
+ __u8 auto_;
+ __u32 active;
+ struct ethtool_fec_stat stats;
+};
+
+static inline struct ethtool_fec_set_req *ethtool_fec_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_fec_set_req));
+}
+void ethtool_fec_set_req_free(struct ethtool_fec_set_req *req);
+
+static inline void
+ethtool_fec_set_req_set_header_dev_index(struct ethtool_fec_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_fec_set_req_set_header_dev_name(struct ethtool_fec_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_fec_set_req_set_header_flags(struct ethtool_fec_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_fec_set_req_set_modes_nomask(struct ethtool_fec_set_req *req)
+{
+ req->_present.modes = 1;
+ req->modes._present.nomask = 1;
+}
+static inline void
+ethtool_fec_set_req_set_modes_size(struct ethtool_fec_set_req *req, __u32 size)
+{
+ req->_present.modes = 1;
+ req->modes._present.size = 1;
+ req->modes.size = size;
+}
+static inline void
+__ethtool_fec_set_req_set_modes_bits_bit(struct ethtool_fec_set_req *req,
+ struct ethtool_bitset_bit *bit,
+ unsigned int n_bit)
+{
+ free(req->modes.bits.bit);
+ req->modes.bits.bit = bit;
+ req->modes.bits.n_bit = n_bit;
+}
+static inline void
+ethtool_fec_set_req_set_auto_(struct ethtool_fec_set_req *req, __u8 auto_)
+{
+ req->_present.auto_ = 1;
+ req->auto_ = auto_;
+}
+static inline void
+ethtool_fec_set_req_set_active(struct ethtool_fec_set_req *req, __u32 active)
+{
+ req->_present.active = 1;
+ req->active = active;
+}
+static inline void
+ethtool_fec_set_req_set_stats_corrected(struct ethtool_fec_set_req *req,
+ const void *corrected, size_t len)
+{
+ free(req->stats.corrected);
+ req->stats.corrected = malloc(req->stats._present.corrected_len);
+ memcpy(req->stats.corrected, corrected, req->stats._present.corrected_len);
+}
+static inline void
+ethtool_fec_set_req_set_stats_uncorr(struct ethtool_fec_set_req *req,
+ const void *uncorr, size_t len)
+{
+ free(req->stats.uncorr);
+ req->stats.uncorr = malloc(req->stats._present.uncorr_len);
+ memcpy(req->stats.uncorr, uncorr, req->stats._present.uncorr_len);
+}
+static inline void
+ethtool_fec_set_req_set_stats_corr_bits(struct ethtool_fec_set_req *req,
+ const void *corr_bits, size_t len)
+{
+ free(req->stats.corr_bits);
+ req->stats.corr_bits = malloc(req->stats._present.corr_bits_len);
+ memcpy(req->stats.corr_bits, corr_bits, req->stats._present.corr_bits_len);
+}
+
+/*
+ * Set FEC params.
+ */
+int ethtool_fec_set(struct ynl_sock *ys, struct ethtool_fec_set_req *req);
+
+/* ============== ETHTOOL_MSG_MODULE_EEPROM_GET ============== */
+/* ETHTOOL_MSG_MODULE_EEPROM_GET - do */
+struct ethtool_module_eeprom_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_module_eeprom_get_req *
+ethtool_module_eeprom_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_module_eeprom_get_req));
+}
+void
+ethtool_module_eeprom_get_req_free(struct ethtool_module_eeprom_get_req *req);
+
+static inline void
+ethtool_module_eeprom_get_req_set_header_dev_index(struct ethtool_module_eeprom_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_module_eeprom_get_req_set_header_dev_name(struct ethtool_module_eeprom_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_module_eeprom_get_req_set_header_flags(struct ethtool_module_eeprom_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_module_eeprom_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 offset:1;
+ __u32 length:1;
+ __u32 page:1;
+ __u32 bank:1;
+ __u32 i2c_address:1;
+ __u32 data_len;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 offset;
+ __u32 length;
+ __u8 page;
+ __u8 bank;
+ __u8 i2c_address;
+ void *data;
+};
+
+void
+ethtool_module_eeprom_get_rsp_free(struct ethtool_module_eeprom_get_rsp *rsp);
+
+/*
+ * Get module EEPROM params.
+ */
+struct ethtool_module_eeprom_get_rsp *
+ethtool_module_eeprom_get(struct ynl_sock *ys,
+ struct ethtool_module_eeprom_get_req *req);
+
+/* ETHTOOL_MSG_MODULE_EEPROM_GET - dump */
+struct ethtool_module_eeprom_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_module_eeprom_get_req_dump *
+ethtool_module_eeprom_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_module_eeprom_get_req_dump));
+}
+void
+ethtool_module_eeprom_get_req_dump_free(struct ethtool_module_eeprom_get_req_dump *req);
+
+static inline void
+ethtool_module_eeprom_get_req_dump_set_header_dev_index(struct ethtool_module_eeprom_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_module_eeprom_get_req_dump_set_header_dev_name(struct ethtool_module_eeprom_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_module_eeprom_get_req_dump_set_header_flags(struct ethtool_module_eeprom_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_module_eeprom_get_list {
+ struct ethtool_module_eeprom_get_list *next;
+ struct ethtool_module_eeprom_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+ethtool_module_eeprom_get_list_free(struct ethtool_module_eeprom_get_list *rsp);
+
+struct ethtool_module_eeprom_get_list *
+ethtool_module_eeprom_get_dump(struct ynl_sock *ys,
+ struct ethtool_module_eeprom_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_PHC_VCLOCKS_GET ============== */
+/* ETHTOOL_MSG_PHC_VCLOCKS_GET - do */
+struct ethtool_phc_vclocks_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_phc_vclocks_get_req *
+ethtool_phc_vclocks_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_phc_vclocks_get_req));
+}
+void ethtool_phc_vclocks_get_req_free(struct ethtool_phc_vclocks_get_req *req);
+
+static inline void
+ethtool_phc_vclocks_get_req_set_header_dev_index(struct ethtool_phc_vclocks_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_phc_vclocks_get_req_set_header_dev_name(struct ethtool_phc_vclocks_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_phc_vclocks_get_req_set_header_flags(struct ethtool_phc_vclocks_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_phc_vclocks_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 num:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 num;
+};
+
+void ethtool_phc_vclocks_get_rsp_free(struct ethtool_phc_vclocks_get_rsp *rsp);
+
+/*
+ * Get PHC VCLOCKs.
+ */
+struct ethtool_phc_vclocks_get_rsp *
+ethtool_phc_vclocks_get(struct ynl_sock *ys,
+ struct ethtool_phc_vclocks_get_req *req);
+
+/* ETHTOOL_MSG_PHC_VCLOCKS_GET - dump */
+struct ethtool_phc_vclocks_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_phc_vclocks_get_req_dump *
+ethtool_phc_vclocks_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_phc_vclocks_get_req_dump));
+}
+void
+ethtool_phc_vclocks_get_req_dump_free(struct ethtool_phc_vclocks_get_req_dump *req);
+
+static inline void
+ethtool_phc_vclocks_get_req_dump_set_header_dev_index(struct ethtool_phc_vclocks_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_phc_vclocks_get_req_dump_set_header_dev_name(struct ethtool_phc_vclocks_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_phc_vclocks_get_req_dump_set_header_flags(struct ethtool_phc_vclocks_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_phc_vclocks_get_list {
+ struct ethtool_phc_vclocks_get_list *next;
+ struct ethtool_phc_vclocks_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+ethtool_phc_vclocks_get_list_free(struct ethtool_phc_vclocks_get_list *rsp);
+
+struct ethtool_phc_vclocks_get_list *
+ethtool_phc_vclocks_get_dump(struct ynl_sock *ys,
+ struct ethtool_phc_vclocks_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_MODULE_GET ============== */
+/* ETHTOOL_MSG_MODULE_GET - do */
+struct ethtool_module_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_module_get_req *ethtool_module_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_module_get_req));
+}
+void ethtool_module_get_req_free(struct ethtool_module_get_req *req);
+
+static inline void
+ethtool_module_get_req_set_header_dev_index(struct ethtool_module_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_module_get_req_set_header_dev_name(struct ethtool_module_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_module_get_req_set_header_flags(struct ethtool_module_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_module_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 power_mode_policy:1;
+ __u32 power_mode:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 power_mode_policy;
+ __u8 power_mode;
+};
+
+void ethtool_module_get_rsp_free(struct ethtool_module_get_rsp *rsp);
+
+/*
+ * Get module params.
+ */
+struct ethtool_module_get_rsp *
+ethtool_module_get(struct ynl_sock *ys, struct ethtool_module_get_req *req);
+
+/* ETHTOOL_MSG_MODULE_GET - dump */
+struct ethtool_module_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_module_get_req_dump *
+ethtool_module_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_module_get_req_dump));
+}
+void ethtool_module_get_req_dump_free(struct ethtool_module_get_req_dump *req);
+
+static inline void
+ethtool_module_get_req_dump_set_header_dev_index(struct ethtool_module_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_module_get_req_dump_set_header_dev_name(struct ethtool_module_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_module_get_req_dump_set_header_flags(struct ethtool_module_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_module_get_list {
+ struct ethtool_module_get_list *next;
+ struct ethtool_module_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_module_get_list_free(struct ethtool_module_get_list *rsp);
+
+struct ethtool_module_get_list *
+ethtool_module_get_dump(struct ynl_sock *ys,
+ struct ethtool_module_get_req_dump *req);
+
+/* ETHTOOL_MSG_MODULE_GET - notify */
+struct ethtool_module_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_module_get_ntf *ntf);
+ struct ethtool_module_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_module_get_ntf_free(struct ethtool_module_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_MODULE_SET ============== */
+/* ETHTOOL_MSG_MODULE_SET - do */
+struct ethtool_module_set_req {
+ struct {
+ __u32 header:1;
+ __u32 power_mode_policy:1;
+ __u32 power_mode:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 power_mode_policy;
+ __u8 power_mode;
+};
+
+static inline struct ethtool_module_set_req *ethtool_module_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_module_set_req));
+}
+void ethtool_module_set_req_free(struct ethtool_module_set_req *req);
+
+static inline void
+ethtool_module_set_req_set_header_dev_index(struct ethtool_module_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_module_set_req_set_header_dev_name(struct ethtool_module_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_module_set_req_set_header_flags(struct ethtool_module_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_module_set_req_set_power_mode_policy(struct ethtool_module_set_req *req,
+ __u8 power_mode_policy)
+{
+ req->_present.power_mode_policy = 1;
+ req->power_mode_policy = power_mode_policy;
+}
+static inline void
+ethtool_module_set_req_set_power_mode(struct ethtool_module_set_req *req,
+ __u8 power_mode)
+{
+ req->_present.power_mode = 1;
+ req->power_mode = power_mode;
+}
+
+/*
+ * Set module params.
+ */
+int ethtool_module_set(struct ynl_sock *ys, struct ethtool_module_set_req *req);
+
+/* ============== ETHTOOL_MSG_PSE_GET ============== */
+/* ETHTOOL_MSG_PSE_GET - do */
+struct ethtool_pse_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_pse_get_req *ethtool_pse_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_pse_get_req));
+}
+void ethtool_pse_get_req_free(struct ethtool_pse_get_req *req);
+
+static inline void
+ethtool_pse_get_req_set_header_dev_index(struct ethtool_pse_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_pse_get_req_set_header_dev_name(struct ethtool_pse_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_pse_get_req_set_header_flags(struct ethtool_pse_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_pse_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 admin_state:1;
+ __u32 admin_control:1;
+ __u32 pw_d_status:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 admin_state;
+ __u32 admin_control;
+ __u32 pw_d_status;
+};
+
+void ethtool_pse_get_rsp_free(struct ethtool_pse_get_rsp *rsp);
+
+/*
+ * Get Power Sourcing Equipment params.
+ */
+struct ethtool_pse_get_rsp *
+ethtool_pse_get(struct ynl_sock *ys, struct ethtool_pse_get_req *req);
+
+/* ETHTOOL_MSG_PSE_GET - dump */
+struct ethtool_pse_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_pse_get_req_dump *
+ethtool_pse_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_pse_get_req_dump));
+}
+void ethtool_pse_get_req_dump_free(struct ethtool_pse_get_req_dump *req);
+
+static inline void
+ethtool_pse_get_req_dump_set_header_dev_index(struct ethtool_pse_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_pse_get_req_dump_set_header_dev_name(struct ethtool_pse_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_pse_get_req_dump_set_header_flags(struct ethtool_pse_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_pse_get_list {
+ struct ethtool_pse_get_list *next;
+ struct ethtool_pse_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_pse_get_list_free(struct ethtool_pse_get_list *rsp);
+
+struct ethtool_pse_get_list *
+ethtool_pse_get_dump(struct ynl_sock *ys, struct ethtool_pse_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_PSE_SET ============== */
+/* ETHTOOL_MSG_PSE_SET - do */
+struct ethtool_pse_set_req {
+ struct {
+ __u32 header:1;
+ __u32 admin_state:1;
+ __u32 admin_control:1;
+ __u32 pw_d_status:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 admin_state;
+ __u32 admin_control;
+ __u32 pw_d_status;
+};
+
+static inline struct ethtool_pse_set_req *ethtool_pse_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_pse_set_req));
+}
+void ethtool_pse_set_req_free(struct ethtool_pse_set_req *req);
+
+static inline void
+ethtool_pse_set_req_set_header_dev_index(struct ethtool_pse_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_pse_set_req_set_header_dev_name(struct ethtool_pse_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_pse_set_req_set_header_flags(struct ethtool_pse_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_pse_set_req_set_admin_state(struct ethtool_pse_set_req *req,
+ __u32 admin_state)
+{
+ req->_present.admin_state = 1;
+ req->admin_state = admin_state;
+}
+static inline void
+ethtool_pse_set_req_set_admin_control(struct ethtool_pse_set_req *req,
+ __u32 admin_control)
+{
+ req->_present.admin_control = 1;
+ req->admin_control = admin_control;
+}
+static inline void
+ethtool_pse_set_req_set_pw_d_status(struct ethtool_pse_set_req *req,
+ __u32 pw_d_status)
+{
+ req->_present.pw_d_status = 1;
+ req->pw_d_status = pw_d_status;
+}
+
+/*
+ * Set Power Sourcing Equipment params.
+ */
+int ethtool_pse_set(struct ynl_sock *ys, struct ethtool_pse_set_req *req);
+
+/* ============== ETHTOOL_MSG_RSS_GET ============== */
+/* ETHTOOL_MSG_RSS_GET - do */
+struct ethtool_rss_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_rss_get_req *ethtool_rss_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_rss_get_req));
+}
+void ethtool_rss_get_req_free(struct ethtool_rss_get_req *req);
+
+static inline void
+ethtool_rss_get_req_set_header_dev_index(struct ethtool_rss_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_rss_get_req_set_header_dev_name(struct ethtool_rss_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_rss_get_req_set_header_flags(struct ethtool_rss_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_rss_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 context:1;
+ __u32 hfunc:1;
+ __u32 indir_len;
+ __u32 hkey_len;
+ } _present;
+
+ struct ethtool_header header;
+ __u32 context;
+ __u32 hfunc;
+ void *indir;
+ void *hkey;
+};
+
+void ethtool_rss_get_rsp_free(struct ethtool_rss_get_rsp *rsp);
+
+/*
+ * Get RSS params.
+ */
+struct ethtool_rss_get_rsp *
+ethtool_rss_get(struct ynl_sock *ys, struct ethtool_rss_get_req *req);
+
+/* ETHTOOL_MSG_RSS_GET - dump */
+struct ethtool_rss_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_rss_get_req_dump *
+ethtool_rss_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_rss_get_req_dump));
+}
+void ethtool_rss_get_req_dump_free(struct ethtool_rss_get_req_dump *req);
+
+static inline void
+ethtool_rss_get_req_dump_set_header_dev_index(struct ethtool_rss_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_rss_get_req_dump_set_header_dev_name(struct ethtool_rss_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_rss_get_req_dump_set_header_flags(struct ethtool_rss_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_rss_get_list {
+ struct ethtool_rss_get_list *next;
+ struct ethtool_rss_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_rss_get_list_free(struct ethtool_rss_get_list *rsp);
+
+struct ethtool_rss_get_list *
+ethtool_rss_get_dump(struct ynl_sock *ys, struct ethtool_rss_get_req_dump *req);
+
+/* ============== ETHTOOL_MSG_PLCA_GET_CFG ============== */
+/* ETHTOOL_MSG_PLCA_GET_CFG - do */
+struct ethtool_plca_get_cfg_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_plca_get_cfg_req *
+ethtool_plca_get_cfg_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_plca_get_cfg_req));
+}
+void ethtool_plca_get_cfg_req_free(struct ethtool_plca_get_cfg_req *req);
+
+static inline void
+ethtool_plca_get_cfg_req_set_header_dev_index(struct ethtool_plca_get_cfg_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_plca_get_cfg_req_set_header_dev_name(struct ethtool_plca_get_cfg_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_plca_get_cfg_req_set_header_flags(struct ethtool_plca_get_cfg_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_plca_get_cfg_rsp {
+ struct {
+ __u32 header:1;
+ __u32 version:1;
+ __u32 enabled:1;
+ __u32 status:1;
+ __u32 node_cnt:1;
+ __u32 node_id:1;
+ __u32 to_tmr:1;
+ __u32 burst_cnt:1;
+ __u32 burst_tmr:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u16 version;
+ __u8 enabled;
+ __u8 status;
+ __u32 node_cnt;
+ __u32 node_id;
+ __u32 to_tmr;
+ __u32 burst_cnt;
+ __u32 burst_tmr;
+};
+
+void ethtool_plca_get_cfg_rsp_free(struct ethtool_plca_get_cfg_rsp *rsp);
+
+/*
+ * Get PLCA params.
+ */
+struct ethtool_plca_get_cfg_rsp *
+ethtool_plca_get_cfg(struct ynl_sock *ys, struct ethtool_plca_get_cfg_req *req);
+
+/* ETHTOOL_MSG_PLCA_GET_CFG - dump */
+struct ethtool_plca_get_cfg_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_plca_get_cfg_req_dump *
+ethtool_plca_get_cfg_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_plca_get_cfg_req_dump));
+}
+void
+ethtool_plca_get_cfg_req_dump_free(struct ethtool_plca_get_cfg_req_dump *req);
+
+static inline void
+ethtool_plca_get_cfg_req_dump_set_header_dev_index(struct ethtool_plca_get_cfg_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_plca_get_cfg_req_dump_set_header_dev_name(struct ethtool_plca_get_cfg_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_plca_get_cfg_req_dump_set_header_flags(struct ethtool_plca_get_cfg_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_plca_get_cfg_list {
+ struct ethtool_plca_get_cfg_list *next;
+ struct ethtool_plca_get_cfg_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_plca_get_cfg_list_free(struct ethtool_plca_get_cfg_list *rsp);
+
+struct ethtool_plca_get_cfg_list *
+ethtool_plca_get_cfg_dump(struct ynl_sock *ys,
+ struct ethtool_plca_get_cfg_req_dump *req);
+
+/* ETHTOOL_MSG_PLCA_GET_CFG - notify */
+struct ethtool_plca_get_cfg_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_plca_get_cfg_ntf *ntf);
+ struct ethtool_plca_get_cfg_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_plca_get_cfg_ntf_free(struct ethtool_plca_get_cfg_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_PLCA_SET_CFG ============== */
+/* ETHTOOL_MSG_PLCA_SET_CFG - do */
+struct ethtool_plca_set_cfg_req {
+ struct {
+ __u32 header:1;
+ __u32 version:1;
+ __u32 enabled:1;
+ __u32 status:1;
+ __u32 node_cnt:1;
+ __u32 node_id:1;
+ __u32 to_tmr:1;
+ __u32 burst_cnt:1;
+ __u32 burst_tmr:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u16 version;
+ __u8 enabled;
+ __u8 status;
+ __u32 node_cnt;
+ __u32 node_id;
+ __u32 to_tmr;
+ __u32 burst_cnt;
+ __u32 burst_tmr;
+};
+
+static inline struct ethtool_plca_set_cfg_req *
+ethtool_plca_set_cfg_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_plca_set_cfg_req));
+}
+void ethtool_plca_set_cfg_req_free(struct ethtool_plca_set_cfg_req *req);
+
+static inline void
+ethtool_plca_set_cfg_req_set_header_dev_index(struct ethtool_plca_set_cfg_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_header_dev_name(struct ethtool_plca_set_cfg_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_header_flags(struct ethtool_plca_set_cfg_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_version(struct ethtool_plca_set_cfg_req *req,
+ __u16 version)
+{
+ req->_present.version = 1;
+ req->version = version;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_enabled(struct ethtool_plca_set_cfg_req *req,
+ __u8 enabled)
+{
+ req->_present.enabled = 1;
+ req->enabled = enabled;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_status(struct ethtool_plca_set_cfg_req *req,
+ __u8 status)
+{
+ req->_present.status = 1;
+ req->status = status;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_node_cnt(struct ethtool_plca_set_cfg_req *req,
+ __u32 node_cnt)
+{
+ req->_present.node_cnt = 1;
+ req->node_cnt = node_cnt;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_node_id(struct ethtool_plca_set_cfg_req *req,
+ __u32 node_id)
+{
+ req->_present.node_id = 1;
+ req->node_id = node_id;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_to_tmr(struct ethtool_plca_set_cfg_req *req,
+ __u32 to_tmr)
+{
+ req->_present.to_tmr = 1;
+ req->to_tmr = to_tmr;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_burst_cnt(struct ethtool_plca_set_cfg_req *req,
+ __u32 burst_cnt)
+{
+ req->_present.burst_cnt = 1;
+ req->burst_cnt = burst_cnt;
+}
+static inline void
+ethtool_plca_set_cfg_req_set_burst_tmr(struct ethtool_plca_set_cfg_req *req,
+ __u32 burst_tmr)
+{
+ req->_present.burst_tmr = 1;
+ req->burst_tmr = burst_tmr;
+}
+
+/*
+ * Set PLCA params.
+ */
+int ethtool_plca_set_cfg(struct ynl_sock *ys,
+ struct ethtool_plca_set_cfg_req *req);
+
+/* ============== ETHTOOL_MSG_PLCA_GET_STATUS ============== */
+/* ETHTOOL_MSG_PLCA_GET_STATUS - do */
+struct ethtool_plca_get_status_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_plca_get_status_req *
+ethtool_plca_get_status_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_plca_get_status_req));
+}
+void ethtool_plca_get_status_req_free(struct ethtool_plca_get_status_req *req);
+
+static inline void
+ethtool_plca_get_status_req_set_header_dev_index(struct ethtool_plca_get_status_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_plca_get_status_req_set_header_dev_name(struct ethtool_plca_get_status_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_plca_get_status_req_set_header_flags(struct ethtool_plca_get_status_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_plca_get_status_rsp {
+ struct {
+ __u32 header:1;
+ __u32 version:1;
+ __u32 enabled:1;
+ __u32 status:1;
+ __u32 node_cnt:1;
+ __u32 node_id:1;
+ __u32 to_tmr:1;
+ __u32 burst_cnt:1;
+ __u32 burst_tmr:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u16 version;
+ __u8 enabled;
+ __u8 status;
+ __u32 node_cnt;
+ __u32 node_id;
+ __u32 to_tmr;
+ __u32 burst_cnt;
+ __u32 burst_tmr;
+};
+
+void ethtool_plca_get_status_rsp_free(struct ethtool_plca_get_status_rsp *rsp);
+
+/*
+ * Get PLCA status params.
+ */
+struct ethtool_plca_get_status_rsp *
+ethtool_plca_get_status(struct ynl_sock *ys,
+ struct ethtool_plca_get_status_req *req);
+
+/* ETHTOOL_MSG_PLCA_GET_STATUS - dump */
+struct ethtool_plca_get_status_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_plca_get_status_req_dump *
+ethtool_plca_get_status_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_plca_get_status_req_dump));
+}
+void
+ethtool_plca_get_status_req_dump_free(struct ethtool_plca_get_status_req_dump *req);
+
+static inline void
+ethtool_plca_get_status_req_dump_set_header_dev_index(struct ethtool_plca_get_status_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_plca_get_status_req_dump_set_header_dev_name(struct ethtool_plca_get_status_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_plca_get_status_req_dump_set_header_flags(struct ethtool_plca_get_status_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_plca_get_status_list {
+ struct ethtool_plca_get_status_list *next;
+ struct ethtool_plca_get_status_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+ethtool_plca_get_status_list_free(struct ethtool_plca_get_status_list *rsp);
+
+struct ethtool_plca_get_status_list *
+ethtool_plca_get_status_dump(struct ynl_sock *ys,
+ struct ethtool_plca_get_status_req_dump *req);
+
+/* ============== ETHTOOL_MSG_MM_GET ============== */
+/* ETHTOOL_MSG_MM_GET - do */
+struct ethtool_mm_get_req {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_mm_get_req *ethtool_mm_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_mm_get_req));
+}
+void ethtool_mm_get_req_free(struct ethtool_mm_get_req *req);
+
+static inline void
+ethtool_mm_get_req_set_header_dev_index(struct ethtool_mm_get_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_mm_get_req_set_header_dev_name(struct ethtool_mm_get_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_mm_get_req_set_header_flags(struct ethtool_mm_get_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_mm_get_rsp {
+ struct {
+ __u32 header:1;
+ __u32 pmac_enabled:1;
+ __u32 tx_enabled:1;
+ __u32 tx_active:1;
+ __u32 tx_min_frag_size:1;
+ __u32 rx_min_frag_size:1;
+ __u32 verify_enabled:1;
+ __u32 verify_time:1;
+ __u32 max_verify_time:1;
+ __u32 stats:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 pmac_enabled;
+ __u8 tx_enabled;
+ __u8 tx_active;
+ __u32 tx_min_frag_size;
+ __u32 rx_min_frag_size;
+ __u8 verify_enabled;
+ __u32 verify_time;
+ __u32 max_verify_time;
+ struct ethtool_mm_stat stats;
+};
+
+void ethtool_mm_get_rsp_free(struct ethtool_mm_get_rsp *rsp);
+
+/*
+ * Get MAC Merge configuration and state
+ */
+struct ethtool_mm_get_rsp *
+ethtool_mm_get(struct ynl_sock *ys, struct ethtool_mm_get_req *req);
+
+/* ETHTOOL_MSG_MM_GET - dump */
+struct ethtool_mm_get_req_dump {
+ struct {
+ __u32 header:1;
+ } _present;
+
+ struct ethtool_header header;
+};
+
+static inline struct ethtool_mm_get_req_dump *
+ethtool_mm_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_mm_get_req_dump));
+}
+void ethtool_mm_get_req_dump_free(struct ethtool_mm_get_req_dump *req);
+
+static inline void
+ethtool_mm_get_req_dump_set_header_dev_index(struct ethtool_mm_get_req_dump *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_mm_get_req_dump_set_header_dev_name(struct ethtool_mm_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_mm_get_req_dump_set_header_flags(struct ethtool_mm_get_req_dump *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+
+struct ethtool_mm_get_list {
+ struct ethtool_mm_get_list *next;
+ struct ethtool_mm_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_mm_get_list_free(struct ethtool_mm_get_list *rsp);
+
+struct ethtool_mm_get_list *
+ethtool_mm_get_dump(struct ynl_sock *ys, struct ethtool_mm_get_req_dump *req);
+
+/* ETHTOOL_MSG_MM_GET - notify */
+struct ethtool_mm_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_mm_get_ntf *ntf);
+ struct ethtool_mm_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_mm_get_ntf_free(struct ethtool_mm_get_ntf *rsp);
+
+/* ============== ETHTOOL_MSG_MM_SET ============== */
+/* ETHTOOL_MSG_MM_SET - do */
+struct ethtool_mm_set_req {
+ struct {
+ __u32 header:1;
+ __u32 verify_enabled:1;
+ __u32 verify_time:1;
+ __u32 tx_enabled:1;
+ __u32 pmac_enabled:1;
+ __u32 tx_min_frag_size:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 verify_enabled;
+ __u32 verify_time;
+ __u8 tx_enabled;
+ __u8 pmac_enabled;
+ __u32 tx_min_frag_size;
+};
+
+static inline struct ethtool_mm_set_req *ethtool_mm_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct ethtool_mm_set_req));
+}
+void ethtool_mm_set_req_free(struct ethtool_mm_set_req *req);
+
+static inline void
+ethtool_mm_set_req_set_header_dev_index(struct ethtool_mm_set_req *req,
+ __u32 dev_index)
+{
+ req->_present.header = 1;
+ req->header._present.dev_index = 1;
+ req->header.dev_index = dev_index;
+}
+static inline void
+ethtool_mm_set_req_set_header_dev_name(struct ethtool_mm_set_req *req,
+ const char *dev_name)
+{
+ free(req->header.dev_name);
+ req->header._present.dev_name_len = strlen(dev_name);
+ req->header.dev_name = malloc(req->header._present.dev_name_len + 1);
+ memcpy(req->header.dev_name, dev_name, req->header._present.dev_name_len);
+ req->header.dev_name[req->header._present.dev_name_len] = 0;
+}
+static inline void
+ethtool_mm_set_req_set_header_flags(struct ethtool_mm_set_req *req,
+ __u32 flags)
+{
+ req->_present.header = 1;
+ req->header._present.flags = 1;
+ req->header.flags = flags;
+}
+static inline void
+ethtool_mm_set_req_set_verify_enabled(struct ethtool_mm_set_req *req,
+ __u8 verify_enabled)
+{
+ req->_present.verify_enabled = 1;
+ req->verify_enabled = verify_enabled;
+}
+static inline void
+ethtool_mm_set_req_set_verify_time(struct ethtool_mm_set_req *req,
+ __u32 verify_time)
+{
+ req->_present.verify_time = 1;
+ req->verify_time = verify_time;
+}
+static inline void
+ethtool_mm_set_req_set_tx_enabled(struct ethtool_mm_set_req *req,
+ __u8 tx_enabled)
+{
+ req->_present.tx_enabled = 1;
+ req->tx_enabled = tx_enabled;
+}
+static inline void
+ethtool_mm_set_req_set_pmac_enabled(struct ethtool_mm_set_req *req,
+ __u8 pmac_enabled)
+{
+ req->_present.pmac_enabled = 1;
+ req->pmac_enabled = pmac_enabled;
+}
+static inline void
+ethtool_mm_set_req_set_tx_min_frag_size(struct ethtool_mm_set_req *req,
+ __u32 tx_min_frag_size)
+{
+ req->_present.tx_min_frag_size = 1;
+ req->tx_min_frag_size = tx_min_frag_size;
+}
+
+/*
+ * Set MAC Merge configuration
+ */
+int ethtool_mm_set(struct ynl_sock *ys, struct ethtool_mm_set_req *req);
+
+/* ETHTOOL_MSG_CABLE_TEST_NTF - event */
+struct ethtool_cable_test_ntf_rsp {
+ struct {
+ __u32 header:1;
+ __u32 status:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 status;
+};
+
+struct ethtool_cable_test_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_cable_test_ntf *ntf);
+ struct ethtool_cable_test_ntf_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_cable_test_ntf_free(struct ethtool_cable_test_ntf *rsp);
+
+/* ETHTOOL_MSG_CABLE_TEST_TDR_NTF - event */
+struct ethtool_cable_test_tdr_ntf_rsp {
+ struct {
+ __u32 header:1;
+ __u32 status:1;
+ __u32 nest:1;
+ } _present;
+
+ struct ethtool_header header;
+ __u8 status;
+ struct ethtool_cable_nest nest;
+};
+
+struct ethtool_cable_test_tdr_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ethtool_cable_test_tdr_ntf *ntf);
+ struct ethtool_cable_test_tdr_ntf_rsp obj __attribute__ ((aligned (8)));
+};
+
+void ethtool_cable_test_tdr_ntf_free(struct ethtool_cable_test_tdr_ntf *rsp);
+
+#endif /* _LINUX_ETHTOOL_GEN_H */
diff --git a/tools/net/ynl/generated/fou-user.c b/tools/net/ynl/generated/fou-user.c
new file mode 100644
index 000000000000..4271b5d43c58
--- /dev/null
+++ b/tools/net/ynl/generated/fou-user.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/fou.yaml */
+/* YNL-GEN user source */
+
+#include <stdlib.h>
+#include <string.h>
+#include "fou-user.h"
+#include "ynl.h"
+#include <linux/fou.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+/* Enums */
+static const char * const fou_op_strmap[] = {
+ [FOU_CMD_ADD] = "add",
+ [FOU_CMD_DEL] = "del",
+ [FOU_CMD_GET] = "get",
+};
+
+const char *fou_op_str(int op)
+{
+ if (op < 0 || op >= (int)MNL_ARRAY_SIZE(fou_op_strmap))
+ return NULL;
+ return fou_op_strmap[op];
+}
+
+static const char * const fou_encap_type_strmap[] = {
+ [0] = "unspec",
+ [1] = "direct",
+ [2] = "gue",
+};
+
+const char *fou_encap_type_str(int value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(fou_encap_type_strmap))
+ return NULL;
+ return fou_encap_type_strmap[value];
+}
+
+/* Policies */
+struct ynl_policy_attr fou_policy[FOU_ATTR_MAX + 1] = {
+ [FOU_ATTR_UNSPEC] = { .name = "unspec", .type = YNL_PT_REJECT, },
+ [FOU_ATTR_PORT] = { .name = "port", .type = YNL_PT_U16, },
+ [FOU_ATTR_AF] = { .name = "af", .type = YNL_PT_U8, },
+ [FOU_ATTR_IPPROTO] = { .name = "ipproto", .type = YNL_PT_U8, },
+ [FOU_ATTR_TYPE] = { .name = "type", .type = YNL_PT_U8, },
+ [FOU_ATTR_REMCSUM_NOPARTIAL] = { .name = "remcsum_nopartial", .type = YNL_PT_FLAG, },
+ [FOU_ATTR_LOCAL_V4] = { .name = "local_v4", .type = YNL_PT_U32, },
+ [FOU_ATTR_LOCAL_V6] = { .name = "local_v6", .type = YNL_PT_BINARY,},
+ [FOU_ATTR_PEER_V4] = { .name = "peer_v4", .type = YNL_PT_U32, },
+ [FOU_ATTR_PEER_V6] = { .name = "peer_v6", .type = YNL_PT_BINARY,},
+ [FOU_ATTR_PEER_PORT] = { .name = "peer_port", .type = YNL_PT_U16, },
+ [FOU_ATTR_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest fou_nest = {
+ .max_attr = FOU_ATTR_MAX,
+ .table = fou_policy,
+};
+
+/* Common nested types */
+/* ============== FOU_CMD_ADD ============== */
+/* FOU_CMD_ADD - do */
+void fou_add_req_free(struct fou_add_req *req)
+{
+ free(req->local_v6);
+ free(req->peer_v6);
+ free(req);
+}
+
+int fou_add(struct ynl_sock *ys, struct fou_add_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, FOU_CMD_ADD, 1);
+ ys->req_policy = &fou_nest;
+
+ if (req->_present.port)
+ mnl_attr_put_u16(nlh, FOU_ATTR_PORT, req->port);
+ if (req->_present.ipproto)
+ mnl_attr_put_u8(nlh, FOU_ATTR_IPPROTO, req->ipproto);
+ if (req->_present.type)
+ mnl_attr_put_u8(nlh, FOU_ATTR_TYPE, req->type);
+ if (req->_present.remcsum_nopartial)
+ mnl_attr_put(nlh, FOU_ATTR_REMCSUM_NOPARTIAL, 0, NULL);
+ if (req->_present.local_v4)
+ mnl_attr_put_u32(nlh, FOU_ATTR_LOCAL_V4, req->local_v4);
+ if (req->_present.peer_v4)
+ mnl_attr_put_u32(nlh, FOU_ATTR_PEER_V4, req->peer_v4);
+ if (req->_present.local_v6_len)
+ mnl_attr_put(nlh, FOU_ATTR_LOCAL_V6, req->_present.local_v6_len, req->local_v6);
+ if (req->_present.peer_v6_len)
+ mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6);
+ if (req->_present.peer_port)
+ mnl_attr_put_u16(nlh, FOU_ATTR_PEER_PORT, req->peer_port);
+ if (req->_present.ifindex)
+ mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== FOU_CMD_DEL ============== */
+/* FOU_CMD_DEL - do */
+void fou_del_req_free(struct fou_del_req *req)
+{
+ free(req->local_v6);
+ free(req->peer_v6);
+ free(req);
+}
+
+int fou_del(struct ynl_sock *ys, struct fou_del_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, FOU_CMD_DEL, 1);
+ ys->req_policy = &fou_nest;
+
+ if (req->_present.af)
+ mnl_attr_put_u8(nlh, FOU_ATTR_AF, req->af);
+ if (req->_present.ifindex)
+ mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex);
+ if (req->_present.port)
+ mnl_attr_put_u16(nlh, FOU_ATTR_PORT, req->port);
+ if (req->_present.peer_port)
+ mnl_attr_put_u16(nlh, FOU_ATTR_PEER_PORT, req->peer_port);
+ if (req->_present.local_v4)
+ mnl_attr_put_u32(nlh, FOU_ATTR_LOCAL_V4, req->local_v4);
+ if (req->_present.peer_v4)
+ mnl_attr_put_u32(nlh, FOU_ATTR_PEER_V4, req->peer_v4);
+ if (req->_present.local_v6_len)
+ mnl_attr_put(nlh, FOU_ATTR_LOCAL_V6, req->_present.local_v6_len, req->local_v6);
+ if (req->_present.peer_v6_len)
+ mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== FOU_CMD_GET ============== */
+/* FOU_CMD_GET - do */
+void fou_get_req_free(struct fou_get_req *req)
+{
+ free(req->local_v6);
+ free(req->peer_v6);
+ free(req);
+}
+
+void fou_get_rsp_free(struct fou_get_rsp *rsp)
+{
+ free(rsp->local_v6);
+ free(rsp->peer_v6);
+ free(rsp);
+}
+
+int fou_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct fou_get_rsp *dst;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == FOU_ATTR_PORT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port = 1;
+ dst->port = mnl_attr_get_u16(attr);
+ } else if (type == FOU_ATTR_IPPROTO) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ipproto = 1;
+ dst->ipproto = mnl_attr_get_u8(attr);
+ } else if (type == FOU_ATTR_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.type = 1;
+ dst->type = mnl_attr_get_u8(attr);
+ } else if (type == FOU_ATTR_REMCSUM_NOPARTIAL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.remcsum_nopartial = 1;
+ } else if (type == FOU_ATTR_LOCAL_V4) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.local_v4 = 1;
+ dst->local_v4 = mnl_attr_get_u32(attr);
+ } else if (type == FOU_ATTR_PEER_V4) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.peer_v4 = 1;
+ dst->peer_v4 = mnl_attr_get_u32(attr);
+ } else if (type == FOU_ATTR_LOCAL_V6) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.local_v6_len = len;
+ dst->local_v6 = malloc(len);
+ memcpy(dst->local_v6, mnl_attr_get_payload(attr), len);
+ } else if (type == FOU_ATTR_PEER_V6) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.peer_v6_len = len;
+ dst->peer_v6 = malloc(len);
+ memcpy(dst->peer_v6, mnl_attr_get_payload(attr), len);
+ } else if (type == FOU_ATTR_PEER_PORT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.peer_port = 1;
+ dst->peer_port = mnl_attr_get_u16(attr);
+ } else if (type == FOU_ATTR_IFINDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ifindex = 1;
+ dst->ifindex = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct fou_get_rsp *fou_get(struct ynl_sock *ys, struct fou_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct fou_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, FOU_CMD_GET, 1);
+ ys->req_policy = &fou_nest;
+ yrs.yarg.rsp_policy = &fou_nest;
+
+ if (req->_present.af)
+ mnl_attr_put_u8(nlh, FOU_ATTR_AF, req->af);
+ if (req->_present.ifindex)
+ mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex);
+ if (req->_present.port)
+ mnl_attr_put_u16(nlh, FOU_ATTR_PORT, req->port);
+ if (req->_present.peer_port)
+ mnl_attr_put_u16(nlh, FOU_ATTR_PEER_PORT, req->peer_port);
+ if (req->_present.local_v4)
+ mnl_attr_put_u32(nlh, FOU_ATTR_LOCAL_V4, req->local_v4);
+ if (req->_present.peer_v4)
+ mnl_attr_put_u32(nlh, FOU_ATTR_PEER_V4, req->peer_v4);
+ if (req->_present.local_v6_len)
+ mnl_attr_put(nlh, FOU_ATTR_LOCAL_V6, req->_present.local_v6_len, req->local_v6);
+ if (req->_present.peer_v6_len)
+ mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = fou_get_rsp_parse;
+ yrs.rsp_cmd = FOU_CMD_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ fou_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* FOU_CMD_GET - dump */
+void fou_get_list_free(struct fou_get_list *rsp)
+{
+ struct fou_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.local_v6);
+ free(rsp->obj.peer_v6);
+ free(rsp);
+ }
+}
+
+struct fou_get_list *fou_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct fou_get_list);
+ yds.cb = fou_get_rsp_parse;
+ yds.rsp_cmd = FOU_CMD_GET;
+ yds.rsp_policy = &fou_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, FOU_CMD_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ fou_get_list_free(yds.first);
+ return NULL;
+}
+
+const struct ynl_family ynl_fou_family = {
+ .name = "fou",
+};
diff --git a/tools/net/ynl/generated/fou-user.h b/tools/net/ynl/generated/fou-user.h
new file mode 100644
index 000000000000..d8ab50579cd1
--- /dev/null
+++ b/tools/net/ynl/generated/fou-user.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/fou.yaml */
+/* YNL-GEN user header */
+
+#ifndef _LINUX_FOU_GEN_H
+#define _LINUX_FOU_GEN_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <linux/types.h>
+#include <linux/fou.h>
+
+struct ynl_sock;
+
+extern const struct ynl_family ynl_fou_family;
+
+/* Enums */
+const char *fou_op_str(int op);
+const char *fou_encap_type_str(int value);
+
+/* Common nested types */
+/* ============== FOU_CMD_ADD ============== */
+/* FOU_CMD_ADD - do */
+struct fou_add_req {
+ struct {
+ __u32 port:1;
+ __u32 ipproto:1;
+ __u32 type:1;
+ __u32 remcsum_nopartial:1;
+ __u32 local_v4:1;
+ __u32 peer_v4:1;
+ __u32 local_v6_len;
+ __u32 peer_v6_len;
+ __u32 peer_port:1;
+ __u32 ifindex:1;
+ } _present;
+
+ __u16 port /* big-endian */;
+ __u8 ipproto;
+ __u8 type;
+ __u32 local_v4;
+ __u32 peer_v4;
+ void *local_v6;
+ void *peer_v6;
+ __u16 peer_port /* big-endian */;
+ __s32 ifindex;
+};
+
+static inline struct fou_add_req *fou_add_req_alloc(void)
+{
+ return calloc(1, sizeof(struct fou_add_req));
+}
+void fou_add_req_free(struct fou_add_req *req);
+
+static inline void
+fou_add_req_set_port(struct fou_add_req *req, __u16 port /* big-endian */)
+{
+ req->_present.port = 1;
+ req->port = port;
+}
+static inline void
+fou_add_req_set_ipproto(struct fou_add_req *req, __u8 ipproto)
+{
+ req->_present.ipproto = 1;
+ req->ipproto = ipproto;
+}
+static inline void fou_add_req_set_type(struct fou_add_req *req, __u8 type)
+{
+ req->_present.type = 1;
+ req->type = type;
+}
+static inline void fou_add_req_set_remcsum_nopartial(struct fou_add_req *req)
+{
+ req->_present.remcsum_nopartial = 1;
+}
+static inline void
+fou_add_req_set_local_v4(struct fou_add_req *req, __u32 local_v4)
+{
+ req->_present.local_v4 = 1;
+ req->local_v4 = local_v4;
+}
+static inline void
+fou_add_req_set_peer_v4(struct fou_add_req *req, __u32 peer_v4)
+{
+ req->_present.peer_v4 = 1;
+ req->peer_v4 = peer_v4;
+}
+static inline void
+fou_add_req_set_local_v6(struct fou_add_req *req, const void *local_v6,
+ size_t len)
+{
+ free(req->local_v6);
+ req->local_v6 = malloc(req->_present.local_v6_len);
+ memcpy(req->local_v6, local_v6, req->_present.local_v6_len);
+}
+static inline void
+fou_add_req_set_peer_v6(struct fou_add_req *req, const void *peer_v6,
+ size_t len)
+{
+ free(req->peer_v6);
+ req->peer_v6 = malloc(req->_present.peer_v6_len);
+ memcpy(req->peer_v6, peer_v6, req->_present.peer_v6_len);
+}
+static inline void
+fou_add_req_set_peer_port(struct fou_add_req *req,
+ __u16 peer_port /* big-endian */)
+{
+ req->_present.peer_port = 1;
+ req->peer_port = peer_port;
+}
+static inline void
+fou_add_req_set_ifindex(struct fou_add_req *req, __s32 ifindex)
+{
+ req->_present.ifindex = 1;
+ req->ifindex = ifindex;
+}
+
+/*
+ * Add port.
+ */
+int fou_add(struct ynl_sock *ys, struct fou_add_req *req);
+
+/* ============== FOU_CMD_DEL ============== */
+/* FOU_CMD_DEL - do */
+struct fou_del_req {
+ struct {
+ __u32 af:1;
+ __u32 ifindex:1;
+ __u32 port:1;
+ __u32 peer_port:1;
+ __u32 local_v4:1;
+ __u32 peer_v4:1;
+ __u32 local_v6_len;
+ __u32 peer_v6_len;
+ } _present;
+
+ __u8 af;
+ __s32 ifindex;
+ __u16 port /* big-endian */;
+ __u16 peer_port /* big-endian */;
+ __u32 local_v4;
+ __u32 peer_v4;
+ void *local_v6;
+ void *peer_v6;
+};
+
+static inline struct fou_del_req *fou_del_req_alloc(void)
+{
+ return calloc(1, sizeof(struct fou_del_req));
+}
+void fou_del_req_free(struct fou_del_req *req);
+
+static inline void fou_del_req_set_af(struct fou_del_req *req, __u8 af)
+{
+ req->_present.af = 1;
+ req->af = af;
+}
+static inline void
+fou_del_req_set_ifindex(struct fou_del_req *req, __s32 ifindex)
+{
+ req->_present.ifindex = 1;
+ req->ifindex = ifindex;
+}
+static inline void
+fou_del_req_set_port(struct fou_del_req *req, __u16 port /* big-endian */)
+{
+ req->_present.port = 1;
+ req->port = port;
+}
+static inline void
+fou_del_req_set_peer_port(struct fou_del_req *req,
+ __u16 peer_port /* big-endian */)
+{
+ req->_present.peer_port = 1;
+ req->peer_port = peer_port;
+}
+static inline void
+fou_del_req_set_local_v4(struct fou_del_req *req, __u32 local_v4)
+{
+ req->_present.local_v4 = 1;
+ req->local_v4 = local_v4;
+}
+static inline void
+fou_del_req_set_peer_v4(struct fou_del_req *req, __u32 peer_v4)
+{
+ req->_present.peer_v4 = 1;
+ req->peer_v4 = peer_v4;
+}
+static inline void
+fou_del_req_set_local_v6(struct fou_del_req *req, const void *local_v6,
+ size_t len)
+{
+ free(req->local_v6);
+ req->local_v6 = malloc(req->_present.local_v6_len);
+ memcpy(req->local_v6, local_v6, req->_present.local_v6_len);
+}
+static inline void
+fou_del_req_set_peer_v6(struct fou_del_req *req, const void *peer_v6,
+ size_t len)
+{
+ free(req->peer_v6);
+ req->peer_v6 = malloc(req->_present.peer_v6_len);
+ memcpy(req->peer_v6, peer_v6, req->_present.peer_v6_len);
+}
+
+/*
+ * Delete port.
+ */
+int fou_del(struct ynl_sock *ys, struct fou_del_req *req);
+
+/* ============== FOU_CMD_GET ============== */
+/* FOU_CMD_GET - do */
+struct fou_get_req {
+ struct {
+ __u32 af:1;
+ __u32 ifindex:1;
+ __u32 port:1;
+ __u32 peer_port:1;
+ __u32 local_v4:1;
+ __u32 peer_v4:1;
+ __u32 local_v6_len;
+ __u32 peer_v6_len;
+ } _present;
+
+ __u8 af;
+ __s32 ifindex;
+ __u16 port /* big-endian */;
+ __u16 peer_port /* big-endian */;
+ __u32 local_v4;
+ __u32 peer_v4;
+ void *local_v6;
+ void *peer_v6;
+};
+
+static inline struct fou_get_req *fou_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct fou_get_req));
+}
+void fou_get_req_free(struct fou_get_req *req);
+
+static inline void fou_get_req_set_af(struct fou_get_req *req, __u8 af)
+{
+ req->_present.af = 1;
+ req->af = af;
+}
+static inline void
+fou_get_req_set_ifindex(struct fou_get_req *req, __s32 ifindex)
+{
+ req->_present.ifindex = 1;
+ req->ifindex = ifindex;
+}
+static inline void
+fou_get_req_set_port(struct fou_get_req *req, __u16 port /* big-endian */)
+{
+ req->_present.port = 1;
+ req->port = port;
+}
+static inline void
+fou_get_req_set_peer_port(struct fou_get_req *req,
+ __u16 peer_port /* big-endian */)
+{
+ req->_present.peer_port = 1;
+ req->peer_port = peer_port;
+}
+static inline void
+fou_get_req_set_local_v4(struct fou_get_req *req, __u32 local_v4)
+{
+ req->_present.local_v4 = 1;
+ req->local_v4 = local_v4;
+}
+static inline void
+fou_get_req_set_peer_v4(struct fou_get_req *req, __u32 peer_v4)
+{
+ req->_present.peer_v4 = 1;
+ req->peer_v4 = peer_v4;
+}
+static inline void
+fou_get_req_set_local_v6(struct fou_get_req *req, const void *local_v6,
+ size_t len)
+{
+ free(req->local_v6);
+ req->local_v6 = malloc(req->_present.local_v6_len);
+ memcpy(req->local_v6, local_v6, req->_present.local_v6_len);
+}
+static inline void
+fou_get_req_set_peer_v6(struct fou_get_req *req, const void *peer_v6,
+ size_t len)
+{
+ free(req->peer_v6);
+ req->peer_v6 = malloc(req->_present.peer_v6_len);
+ memcpy(req->peer_v6, peer_v6, req->_present.peer_v6_len);
+}
+
+struct fou_get_rsp {
+ struct {
+ __u32 port:1;
+ __u32 ipproto:1;
+ __u32 type:1;
+ __u32 remcsum_nopartial:1;
+ __u32 local_v4:1;
+ __u32 peer_v4:1;
+ __u32 local_v6_len;
+ __u32 peer_v6_len;
+ __u32 peer_port:1;
+ __u32 ifindex:1;
+ } _present;
+
+ __u16 port /* big-endian */;
+ __u8 ipproto;
+ __u8 type;
+ __u32 local_v4;
+ __u32 peer_v4;
+ void *local_v6;
+ void *peer_v6;
+ __u16 peer_port /* big-endian */;
+ __s32 ifindex;
+};
+
+void fou_get_rsp_free(struct fou_get_rsp *rsp);
+
+/*
+ * Get tunnel info.
+ */
+struct fou_get_rsp *fou_get(struct ynl_sock *ys, struct fou_get_req *req);
+
+/* FOU_CMD_GET - dump */
+struct fou_get_list {
+ struct fou_get_list *next;
+ struct fou_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void fou_get_list_free(struct fou_get_list *rsp);
+
+struct fou_get_list *fou_get_dump(struct ynl_sock *ys);
+
+#endif /* _LINUX_FOU_GEN_H */
diff --git a/tools/net/ynl/generated/handshake-user.c b/tools/net/ynl/generated/handshake-user.c
new file mode 100644
index 000000000000..7c67765daf90
--- /dev/null
+++ b/tools/net/ynl/generated/handshake-user.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/handshake.yaml */
+/* YNL-GEN user source */
+
+#include <stdlib.h>
+#include <string.h>
+#include "handshake-user.h"
+#include "ynl.h"
+#include <linux/handshake.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+/* Enums */
+static const char * const handshake_op_strmap[] = {
+ [HANDSHAKE_CMD_READY] = "ready",
+ [HANDSHAKE_CMD_ACCEPT] = "accept",
+ [HANDSHAKE_CMD_DONE] = "done",
+};
+
+const char *handshake_op_str(int op)
+{
+ if (op < 0 || op >= (int)MNL_ARRAY_SIZE(handshake_op_strmap))
+ return NULL;
+ return handshake_op_strmap[op];
+}
+
+static const char * const handshake_handler_class_strmap[] = {
+ [0] = "none",
+ [1] = "tlshd",
+ [2] = "max",
+};
+
+const char *handshake_handler_class_str(enum handshake_handler_class value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(handshake_handler_class_strmap))
+ return NULL;
+ return handshake_handler_class_strmap[value];
+}
+
+static const char * const handshake_msg_type_strmap[] = {
+ [0] = "unspec",
+ [1] = "clienthello",
+ [2] = "serverhello",
+};
+
+const char *handshake_msg_type_str(enum handshake_msg_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(handshake_msg_type_strmap))
+ return NULL;
+ return handshake_msg_type_strmap[value];
+}
+
+static const char * const handshake_auth_strmap[] = {
+ [0] = "unspec",
+ [1] = "unauth",
+ [2] = "psk",
+ [3] = "x509",
+};
+
+const char *handshake_auth_str(enum handshake_auth value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(handshake_auth_strmap))
+ return NULL;
+ return handshake_auth_strmap[value];
+}
+
+/* Policies */
+struct ynl_policy_attr handshake_x509_policy[HANDSHAKE_A_X509_MAX + 1] = {
+ [HANDSHAKE_A_X509_CERT] = { .name = "cert", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_X509_PRIVKEY] = { .name = "privkey", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest handshake_x509_nest = {
+ .max_attr = HANDSHAKE_A_X509_MAX,
+ .table = handshake_x509_policy,
+};
+
+struct ynl_policy_attr handshake_accept_policy[HANDSHAKE_A_ACCEPT_MAX + 1] = {
+ [HANDSHAKE_A_ACCEPT_SOCKFD] = { .name = "sockfd", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_ACCEPT_HANDLER_CLASS] = { .name = "handler-class", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_ACCEPT_MESSAGE_TYPE] = { .name = "message-type", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_ACCEPT_TIMEOUT] = { .name = "timeout", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_ACCEPT_AUTH_MODE] = { .name = "auth-mode", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_ACCEPT_PEER_IDENTITY] = { .name = "peer-identity", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_ACCEPT_CERTIFICATE] = { .name = "certificate", .type = YNL_PT_NEST, .nest = &handshake_x509_nest, },
+ [HANDSHAKE_A_ACCEPT_PEERNAME] = { .name = "peername", .type = YNL_PT_NUL_STR, },
+};
+
+struct ynl_policy_nest handshake_accept_nest = {
+ .max_attr = HANDSHAKE_A_ACCEPT_MAX,
+ .table = handshake_accept_policy,
+};
+
+struct ynl_policy_attr handshake_done_policy[HANDSHAKE_A_DONE_MAX + 1] = {
+ [HANDSHAKE_A_DONE_STATUS] = { .name = "status", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_DONE_SOCKFD] = { .name = "sockfd", .type = YNL_PT_U32, },
+ [HANDSHAKE_A_DONE_REMOTE_AUTH] = { .name = "remote-auth", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest handshake_done_nest = {
+ .max_attr = HANDSHAKE_A_DONE_MAX,
+ .table = handshake_done_policy,
+};
+
+/* Common nested types */
+void handshake_x509_free(struct handshake_x509 *obj)
+{
+}
+
+int handshake_x509_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct handshake_x509 *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == HANDSHAKE_A_X509_CERT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.cert = 1;
+ dst->cert = mnl_attr_get_u32(attr);
+ } else if (type == HANDSHAKE_A_X509_PRIVKEY) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.privkey = 1;
+ dst->privkey = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+/* ============== HANDSHAKE_CMD_ACCEPT ============== */
+/* HANDSHAKE_CMD_ACCEPT - do */
+void handshake_accept_req_free(struct handshake_accept_req *req)
+{
+ free(req);
+}
+
+void handshake_accept_rsp_free(struct handshake_accept_rsp *rsp)
+{
+ unsigned int i;
+
+ free(rsp->peer_identity);
+ for (i = 0; i < rsp->n_certificate; i++)
+ handshake_x509_free(&rsp->certificate[i]);
+ free(rsp->certificate);
+ free(rsp->peername);
+ free(rsp);
+}
+
+int handshake_accept_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct handshake_accept_rsp *dst;
+ unsigned int n_peer_identity = 0;
+ unsigned int n_certificate = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ if (dst->certificate)
+ return ynl_error_parse(yarg, "attribute already present (accept.certificate)");
+ if (dst->peer_identity)
+ return ynl_error_parse(yarg, "attribute already present (accept.peer-identity)");
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == HANDSHAKE_A_ACCEPT_SOCKFD) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sockfd = 1;
+ dst->sockfd = mnl_attr_get_u32(attr);
+ } else if (type == HANDSHAKE_A_ACCEPT_MESSAGE_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.message_type = 1;
+ dst->message_type = mnl_attr_get_u32(attr);
+ } else if (type == HANDSHAKE_A_ACCEPT_TIMEOUT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.timeout = 1;
+ dst->timeout = mnl_attr_get_u32(attr);
+ } else if (type == HANDSHAKE_A_ACCEPT_AUTH_MODE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.auth_mode = 1;
+ dst->auth_mode = mnl_attr_get_u32(attr);
+ } else if (type == HANDSHAKE_A_ACCEPT_PEER_IDENTITY) {
+ n_peer_identity++;
+ } else if (type == HANDSHAKE_A_ACCEPT_CERTIFICATE) {
+ n_certificate++;
+ } else if (type == HANDSHAKE_A_ACCEPT_PEERNAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.peername_len = len;
+ dst->peername = malloc(len + 1);
+ memcpy(dst->peername, mnl_attr_get_str(attr), len);
+ dst->peername[len] = 0;
+ }
+ }
+
+ if (n_certificate) {
+ dst->certificate = calloc(n_certificate, sizeof(*dst->certificate));
+ dst->n_certificate = n_certificate;
+ i = 0;
+ parg.rsp_policy = &handshake_x509_nest;
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (mnl_attr_get_type(attr) == HANDSHAKE_A_ACCEPT_CERTIFICATE) {
+ parg.data = &dst->certificate[i];
+ if (handshake_x509_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+ if (n_peer_identity) {
+ dst->peer_identity = calloc(n_peer_identity, sizeof(*dst->peer_identity));
+ dst->n_peer_identity = n_peer_identity;
+ i = 0;
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (mnl_attr_get_type(attr) == HANDSHAKE_A_ACCEPT_PEER_IDENTITY) {
+ dst->peer_identity[i] = mnl_attr_get_u32(attr);
+ i++;
+ }
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct handshake_accept_rsp *
+handshake_accept(struct ynl_sock *ys, struct handshake_accept_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct handshake_accept_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, HANDSHAKE_CMD_ACCEPT, 1);
+ ys->req_policy = &handshake_accept_nest;
+ yrs.yarg.rsp_policy = &handshake_accept_nest;
+
+ if (req->_present.handler_class)
+ mnl_attr_put_u32(nlh, HANDSHAKE_A_ACCEPT_HANDLER_CLASS, req->handler_class);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = handshake_accept_rsp_parse;
+ yrs.rsp_cmd = HANDSHAKE_CMD_ACCEPT;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ handshake_accept_rsp_free(rsp);
+ return NULL;
+}
+
+/* HANDSHAKE_CMD_ACCEPT - notify */
+void handshake_accept_ntf_free(struct handshake_accept_ntf *rsp)
+{
+ unsigned int i;
+
+ free(rsp->obj.peer_identity);
+ for (i = 0; i < rsp->obj.n_certificate; i++)
+ handshake_x509_free(&rsp->obj.certificate[i]);
+ free(rsp->obj.certificate);
+ free(rsp->obj.peername);
+ free(rsp);
+}
+
+/* ============== HANDSHAKE_CMD_DONE ============== */
+/* HANDSHAKE_CMD_DONE - do */
+void handshake_done_req_free(struct handshake_done_req *req)
+{
+ free(req->remote_auth);
+ free(req);
+}
+
+int handshake_done(struct ynl_sock *ys, struct handshake_done_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, HANDSHAKE_CMD_DONE, 1);
+ ys->req_policy = &handshake_done_nest;
+
+ if (req->_present.status)
+ mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_STATUS, req->status);
+ if (req->_present.sockfd)
+ mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_SOCKFD, req->sockfd);
+ for (unsigned int i = 0; i < req->n_remote_auth; i++)
+ mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_REMOTE_AUTH, req->remote_auth[i]);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+static const struct ynl_ntf_info handshake_ntf_info[] = {
+ [HANDSHAKE_CMD_READY] = {
+ .alloc_sz = sizeof(struct handshake_accept_ntf),
+ .cb = handshake_accept_rsp_parse,
+ .policy = &handshake_accept_nest,
+ .free = (void *)handshake_accept_ntf_free,
+ },
+};
+
+const struct ynl_family ynl_handshake_family = {
+ .name = "handshake",
+ .ntf_info = handshake_ntf_info,
+ .ntf_info_size = MNL_ARRAY_SIZE(handshake_ntf_info),
+};
diff --git a/tools/net/ynl/generated/handshake-user.h b/tools/net/ynl/generated/handshake-user.h
new file mode 100644
index 000000000000..47646bb91cea
--- /dev/null
+++ b/tools/net/ynl/generated/handshake-user.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/handshake.yaml */
+/* YNL-GEN user header */
+
+#ifndef _LINUX_HANDSHAKE_GEN_H
+#define _LINUX_HANDSHAKE_GEN_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <linux/types.h>
+#include <linux/handshake.h>
+
+struct ynl_sock;
+
+extern const struct ynl_family ynl_handshake_family;
+
+/* Enums */
+const char *handshake_op_str(int op);
+const char *handshake_handler_class_str(enum handshake_handler_class value);
+const char *handshake_msg_type_str(enum handshake_msg_type value);
+const char *handshake_auth_str(enum handshake_auth value);
+
+/* Common nested types */
+struct handshake_x509 {
+ struct {
+ __u32 cert:1;
+ __u32 privkey:1;
+ } _present;
+
+ __u32 cert;
+ __u32 privkey;
+};
+
+/* ============== HANDSHAKE_CMD_ACCEPT ============== */
+/* HANDSHAKE_CMD_ACCEPT - do */
+struct handshake_accept_req {
+ struct {
+ __u32 handler_class:1;
+ } _present;
+
+ enum handshake_handler_class handler_class;
+};
+
+static inline struct handshake_accept_req *handshake_accept_req_alloc(void)
+{
+ return calloc(1, sizeof(struct handshake_accept_req));
+}
+void handshake_accept_req_free(struct handshake_accept_req *req);
+
+static inline void
+handshake_accept_req_set_handler_class(struct handshake_accept_req *req,
+ enum handshake_handler_class handler_class)
+{
+ req->_present.handler_class = 1;
+ req->handler_class = handler_class;
+}
+
+struct handshake_accept_rsp {
+ struct {
+ __u32 sockfd:1;
+ __u32 message_type:1;
+ __u32 timeout:1;
+ __u32 auth_mode:1;
+ __u32 peername_len;
+ } _present;
+
+ __u32 sockfd;
+ enum handshake_msg_type message_type;
+ __u32 timeout;
+ enum handshake_auth auth_mode;
+ unsigned int n_peer_identity;
+ __u32 *peer_identity;
+ unsigned int n_certificate;
+ struct handshake_x509 *certificate;
+ char *peername;
+};
+
+void handshake_accept_rsp_free(struct handshake_accept_rsp *rsp);
+
+/*
+ * Handler retrieves next queued handshake request
+ */
+struct handshake_accept_rsp *
+handshake_accept(struct ynl_sock *ys, struct handshake_accept_req *req);
+
+/* HANDSHAKE_CMD_ACCEPT - notify */
+struct handshake_accept_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct handshake_accept_ntf *ntf);
+ struct handshake_accept_rsp obj __attribute__ ((aligned (8)));
+};
+
+void handshake_accept_ntf_free(struct handshake_accept_ntf *rsp);
+
+/* ============== HANDSHAKE_CMD_DONE ============== */
+/* HANDSHAKE_CMD_DONE - do */
+struct handshake_done_req {
+ struct {
+ __u32 status:1;
+ __u32 sockfd:1;
+ } _present;
+
+ __u32 status;
+ __u32 sockfd;
+ unsigned int n_remote_auth;
+ __u32 *remote_auth;
+};
+
+static inline struct handshake_done_req *handshake_done_req_alloc(void)
+{
+ return calloc(1, sizeof(struct handshake_done_req));
+}
+void handshake_done_req_free(struct handshake_done_req *req);
+
+static inline void
+handshake_done_req_set_status(struct handshake_done_req *req, __u32 status)
+{
+ req->_present.status = 1;
+ req->status = status;
+}
+static inline void
+handshake_done_req_set_sockfd(struct handshake_done_req *req, __u32 sockfd)
+{
+ req->_present.sockfd = 1;
+ req->sockfd = sockfd;
+}
+static inline void
+__handshake_done_req_set_remote_auth(struct handshake_done_req *req,
+ __u32 *remote_auth,
+ unsigned int n_remote_auth)
+{
+ free(req->remote_auth);
+ req->remote_auth = remote_auth;
+ req->n_remote_auth = n_remote_auth;
+}
+
+/*
+ * Handler reports handshake completion
+ */
+int handshake_done(struct ynl_sock *ys, struct handshake_done_req *req);
+
+#endif /* _LINUX_HANDSHAKE_GEN_H */
diff --git a/tools/net/ynl/generated/netdev-user.c b/tools/net/ynl/generated/netdev-user.c
new file mode 100644
index 000000000000..4eb8aefef0cd
--- /dev/null
+++ b/tools/net/ynl/generated/netdev-user.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN user source */
+
+#include <stdlib.h>
+#include <string.h>
+#include "netdev-user.h"
+#include "ynl.h"
+#include <linux/netdev.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+/* Enums */
+static const char * const netdev_op_strmap[] = {
+ [NETDEV_CMD_DEV_GET] = "dev-get",
+ [NETDEV_CMD_DEV_ADD_NTF] = "dev-add-ntf",
+ [NETDEV_CMD_DEV_DEL_NTF] = "dev-del-ntf",
+ [NETDEV_CMD_DEV_CHANGE_NTF] = "dev-change-ntf",
+};
+
+const char *netdev_op_str(int op)
+{
+ if (op < 0 || op >= (int)MNL_ARRAY_SIZE(netdev_op_strmap))
+ return NULL;
+ return netdev_op_strmap[op];
+}
+
+static const char * const netdev_xdp_act_strmap[] = {
+ [0] = "basic",
+ [1] = "redirect",
+ [2] = "ndo-xmit",
+ [3] = "xsk-zerocopy",
+ [4] = "hw-offload",
+ [5] = "rx-sg",
+ [6] = "ndo-xmit-sg",
+};
+
+const char *netdev_xdp_act_str(enum netdev_xdp_act value)
+{
+ value = ffs(value) - 1;
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(netdev_xdp_act_strmap))
+ return NULL;
+ return netdev_xdp_act_strmap[value];
+}
+
+/* Policies */
+struct ynl_policy_attr netdev_dev_policy[NETDEV_A_DEV_MAX + 1] = {
+ [NETDEV_A_DEV_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
+ [NETDEV_A_DEV_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
+ [NETDEV_A_DEV_XDP_FEATURES] = { .name = "xdp-features", .type = YNL_PT_U64, },
+};
+
+struct ynl_policy_nest netdev_dev_nest = {
+ .max_attr = NETDEV_A_DEV_MAX,
+ .table = netdev_dev_policy,
+};
+
+/* Common nested types */
+/* ============== NETDEV_CMD_DEV_GET ============== */
+/* NETDEV_CMD_DEV_GET - do */
+void netdev_dev_get_req_free(struct netdev_dev_get_req *req)
+{
+ free(req);
+}
+
+void netdev_dev_get_rsp_free(struct netdev_dev_get_rsp *rsp)
+{
+ free(rsp);
+}
+
+int netdev_dev_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct netdev_dev_get_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == NETDEV_A_DEV_IFINDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.ifindex = 1;
+ dst->ifindex = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_DEV_XDP_FEATURES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.xdp_features = 1;
+ dst->xdp_features = mnl_attr_get_u64(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct netdev_dev_get_rsp *
+netdev_dev_get(struct ynl_sock *ys, struct netdev_dev_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct netdev_dev_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_DEV_GET, 1);
+ ys->req_policy = &netdev_dev_nest;
+ yrs.yarg.rsp_policy = &netdev_dev_nest;
+
+ if (req->_present.ifindex)
+ mnl_attr_put_u32(nlh, NETDEV_A_DEV_IFINDEX, req->ifindex);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = netdev_dev_get_rsp_parse;
+ yrs.rsp_cmd = NETDEV_CMD_DEV_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ netdev_dev_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* NETDEV_CMD_DEV_GET - dump */
+void netdev_dev_get_list_free(struct netdev_dev_get_list *rsp)
+{
+ struct netdev_dev_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp);
+ }
+}
+
+struct netdev_dev_get_list *netdev_dev_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct netdev_dev_get_list);
+ yds.cb = netdev_dev_get_rsp_parse;
+ yds.rsp_cmd = NETDEV_CMD_DEV_GET;
+ yds.rsp_policy = &netdev_dev_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_DEV_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ netdev_dev_get_list_free(yds.first);
+ return NULL;
+}
+
+/* NETDEV_CMD_DEV_GET - notify */
+void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp)
+{
+ free(rsp);
+}
+
+static const struct ynl_ntf_info netdev_ntf_info[] = {
+ [NETDEV_CMD_DEV_ADD_NTF] = {
+ .alloc_sz = sizeof(struct netdev_dev_get_ntf),
+ .cb = netdev_dev_get_rsp_parse,
+ .policy = &netdev_dev_nest,
+ .free = (void *)netdev_dev_get_ntf_free,
+ },
+ [NETDEV_CMD_DEV_DEL_NTF] = {
+ .alloc_sz = sizeof(struct netdev_dev_get_ntf),
+ .cb = netdev_dev_get_rsp_parse,
+ .policy = &netdev_dev_nest,
+ .free = (void *)netdev_dev_get_ntf_free,
+ },
+ [NETDEV_CMD_DEV_CHANGE_NTF] = {
+ .alloc_sz = sizeof(struct netdev_dev_get_ntf),
+ .cb = netdev_dev_get_rsp_parse,
+ .policy = &netdev_dev_nest,
+ .free = (void *)netdev_dev_get_ntf_free,
+ },
+};
+
+const struct ynl_family ynl_netdev_family = {
+ .name = "netdev",
+ .ntf_info = netdev_ntf_info,
+ .ntf_info_size = MNL_ARRAY_SIZE(netdev_ntf_info),
+};
diff --git a/tools/net/ynl/generated/netdev-user.h b/tools/net/ynl/generated/netdev-user.h
new file mode 100644
index 000000000000..5554dc69bb9c
--- /dev/null
+++ b/tools/net/ynl/generated/netdev-user.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN user header */
+
+#ifndef _LINUX_NETDEV_GEN_H
+#define _LINUX_NETDEV_GEN_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <linux/types.h>
+#include <linux/netdev.h>
+
+struct ynl_sock;
+
+extern const struct ynl_family ynl_netdev_family;
+
+/* Enums */
+const char *netdev_op_str(int op);
+const char *netdev_xdp_act_str(enum netdev_xdp_act value);
+
+/* Common nested types */
+/* ============== NETDEV_CMD_DEV_GET ============== */
+/* NETDEV_CMD_DEV_GET - do */
+struct netdev_dev_get_req {
+ struct {
+ __u32 ifindex:1;
+ } _present;
+
+ __u32 ifindex;
+};
+
+static inline struct netdev_dev_get_req *netdev_dev_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct netdev_dev_get_req));
+}
+void netdev_dev_get_req_free(struct netdev_dev_get_req *req);
+
+static inline void
+netdev_dev_get_req_set_ifindex(struct netdev_dev_get_req *req, __u32 ifindex)
+{
+ req->_present.ifindex = 1;
+ req->ifindex = ifindex;
+}
+
+struct netdev_dev_get_rsp {
+ struct {
+ __u32 ifindex:1;
+ __u32 xdp_features:1;
+ } _present;
+
+ __u32 ifindex;
+ __u64 xdp_features;
+};
+
+void netdev_dev_get_rsp_free(struct netdev_dev_get_rsp *rsp);
+
+/*
+ * Get / dump information about a netdev.
+ */
+struct netdev_dev_get_rsp *
+netdev_dev_get(struct ynl_sock *ys, struct netdev_dev_get_req *req);
+
+/* NETDEV_CMD_DEV_GET - dump */
+struct netdev_dev_get_list {
+ struct netdev_dev_get_list *next;
+ struct netdev_dev_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void netdev_dev_get_list_free(struct netdev_dev_get_list *rsp);
+
+struct netdev_dev_get_list *netdev_dev_get_dump(struct ynl_sock *ys);
+
+/* NETDEV_CMD_DEV_GET - notify */
+struct netdev_dev_get_ntf {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct netdev_dev_get_ntf *ntf);
+ struct netdev_dev_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp);
+
+#endif /* _LINUX_NETDEV_GEN_H */
diff --git a/tools/net/ynl/lib/Makefile b/tools/net/ynl/lib/Makefile
new file mode 100644
index 000000000000..d2e50fd0a52d
--- /dev/null
+++ b/tools/net/ynl/lib/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CC=gcc
+CFLAGS=-std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow
+ifeq ("$(DEBUG)","1")
+ CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan
+endif
+
+SRCS=$(wildcard *.c)
+OBJS=$(patsubst %.c,%.o,${SRCS})
+
+include $(wildcard *.d)
+
+all: ynl.a
+
+ynl.a: $(OBJS)
+ ar rcs $@ $(OBJS)
+clean:
+ rm -f *.o *.d *~
+
+hardclean: clean
+ rm -f *.a
+
+%.o: %.c
+ $(COMPILE.c) -MMD -c -o $@ $<
+
+.PHONY: all clean
+.DEFAULT_GOAL=all
diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py
index a0241add3839..0ff0d18666b2 100644
--- a/tools/net/ynl/lib/nlspec.py
+++ b/tools/net/ynl/lib/nlspec.py
@@ -154,6 +154,9 @@ class SpecAttr(SpecElement):
is_multi bool, attr may repeat multiple times
struct_name string, name of struct definition
sub_type string, name of sub type
+ len integer, optional byte length of binary types
+ display_hint string, hint to help choose format specifier
+ when displaying the value
"""
def __init__(self, family, attr_set, yaml, value):
super().__init__(family, yaml)
@@ -164,6 +167,8 @@ class SpecAttr(SpecElement):
self.struct_name = yaml.get('struct')
self.sub_type = yaml.get('sub-type')
self.byte_order = yaml.get('byte-order')
+ self.len = yaml.get('len')
+ self.display_hint = yaml.get('display-hint')
class SpecAttrSet(SpecElement):
@@ -226,11 +231,20 @@ class SpecStructMember(SpecElement):
Represents a single struct member attribute.
Attributes:
- type string, type of the member attribute
+ type string, type of the member attribute
+ byte_order string or None for native byte order
+ enum string, name of the enum definition
+ len integer, optional byte length of binary types
+ display_hint string, hint to help choose format specifier
+ when displaying the value
"""
def __init__(self, family, yaml):
super().__init__(family, yaml)
self.type = yaml['type']
+ self.byte_order = yaml.get('byte-order')
+ self.enum = yaml.get('enum')
+ self.len = yaml.get('len')
+ self.display_hint = yaml.get('display-hint')
class SpecStruct(SpecElement):
@@ -320,16 +334,17 @@ class SpecFamily(SpecElement):
Attributes:
proto protocol type (e.g. genetlink)
+ msg_id_model enum-model for operations (unified, directional etc.)
license spec license (loaded from an SPDX tag on the spec)
attr_sets dict of attribute sets
msgs dict of all messages (index by name)
- msgs_by_value dict of all messages (indexed by name)
ops dict of all valid requests / responses
+ ntfs dict of all async events
consts dict of all constants/enums
fixed_header string, optional name of family default fixed header struct
"""
- def __init__(self, spec_path, schema_path=None):
+ def __init__(self, spec_path, schema_path=None, exclude_ops=None):
with open(spec_path, "r") as stream:
prefix = '# SPDX-License-Identifier: '
first = stream.readline().strip()
@@ -344,7 +359,10 @@ class SpecFamily(SpecElement):
super().__init__(self, spec)
+ self._exclude_ops = exclude_ops if exclude_ops else []
+
self.proto = self.yaml.get('protocol', 'genetlink')
+ self.msg_id_model = self.yaml['operations'].get('enum-model', 'unified')
if schema_path is None:
schema_path = os.path.dirname(os.path.dirname(spec_path)) + f'/{self.proto}.yaml'
@@ -364,6 +382,7 @@ class SpecFamily(SpecElement):
self.req_by_value = collections.OrderedDict()
self.rsp_by_value = collections.OrderedDict()
self.ops = collections.OrderedDict()
+ self.ntfs = collections.OrderedDict()
self.consts = collections.OrderedDict()
last_exception = None
@@ -416,7 +435,7 @@ class SpecFamily(SpecElement):
self.fixed_header = self.yaml['operations'].get('fixed-header')
req_val = rsp_val = 1
for elem in self.yaml['operations']['list']:
- if 'notify' in elem:
+ if 'notify' in elem or 'event' in elem:
if 'value' in elem:
rsp_val = elem['value']
req_val_next = req_val
@@ -438,7 +457,17 @@ class SpecFamily(SpecElement):
else:
raise Exception("Can't parse directional ops")
- op = self.new_operation(elem, req_val, rsp_val)
+ if req_val == req_val_next:
+ req_val = None
+ if rsp_val == rsp_val_next:
+ rsp_val = None
+
+ skip = False
+ for exclude in self._exclude_ops:
+ skip |= bool(exclude.match(elem['name']))
+ if not skip:
+ op = self.new_operation(elem, req_val, rsp_val)
+
req_val = req_val_next
rsp_val = rsp_val_next
@@ -469,10 +498,9 @@ class SpecFamily(SpecElement):
attr_set = self.new_attr_set(elem)
self.attr_sets[elem['name']] = attr_set
- msg_id_model = self.yaml['operations'].get('enum-model', 'unified')
- if msg_id_model == 'unified':
+ if self.msg_id_model == 'unified':
self._dictify_ops_unified()
- elif msg_id_model == 'directional':
+ elif self.msg_id_model == 'directional':
self._dictify_ops_directional()
for op in self.msgs.values():
@@ -482,3 +510,5 @@ class SpecFamily(SpecElement):
self.rsp_by_value[op.rsp_value] = op
if not op.is_async and 'attribute-set' in op:
self.ops[op.name] = op
+ elif op.is_async:
+ self.ntfs[op.name] = op
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
new file mode 100644
index 000000000000..514e0d69e731
--- /dev/null
+++ b/tools/net/ynl/lib/ynl.c
@@ -0,0 +1,901 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#include <errno.h>
+#include <poll.h>
+#include <string.h>
+#include <stdlib.h>
+#include <linux/types.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+#include "ynl.h"
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(*arr))
+
+#define __yerr_msg(yse, _msg...) \
+ ({ \
+ struct ynl_error *_yse = (yse); \
+ \
+ if (_yse) { \
+ snprintf(_yse->msg, sizeof(_yse->msg) - 1, _msg); \
+ _yse->msg[sizeof(_yse->msg) - 1] = 0; \
+ } \
+ })
+
+#define __yerr_code(yse, _code...) \
+ ({ \
+ struct ynl_error *_yse = (yse); \
+ \
+ if (_yse) { \
+ _yse->code = _code; \
+ } \
+ })
+
+#define __yerr(yse, _code, _msg...) \
+ ({ \
+ __yerr_msg(yse, _msg); \
+ __yerr_code(yse, _code); \
+ })
+
+#define __perr(yse, _msg) __yerr(yse, errno, _msg)
+
+#define yerr_msg(_ys, _msg...) __yerr_msg(&(_ys)->err, _msg)
+#define yerr(_ys, _code, _msg...) __yerr(&(_ys)->err, _code, _msg)
+#define perr(_ys, _msg) __yerr(&(_ys)->err, errno, _msg)
+
+/* -- Netlink boiler plate */
+static int
+ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type,
+ char *str, int str_sz, int *n)
+{
+ if (!policy) {
+ if (*n < str_sz)
+ *n += snprintf(str, str_sz, "!policy");
+ return 1;
+ }
+
+ if (type > policy->max_attr) {
+ if (*n < str_sz)
+ *n += snprintf(str, str_sz, "!oob");
+ return 1;
+ }
+
+ if (!policy->table[type].name) {
+ if (*n < str_sz)
+ *n += snprintf(str, str_sz, "!name");
+ return 1;
+ }
+
+ if (*n < str_sz)
+ *n += snprintf(str, str_sz - *n,
+ ".%s", policy->table[type].name);
+ return 0;
+}
+
+static int
+ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
+ struct ynl_policy_nest *policy, char *str, int str_sz,
+ struct ynl_policy_nest **nest_pol)
+{
+ unsigned int astart_off, aend_off;
+ const struct nlattr *attr;
+ unsigned int data_len;
+ unsigned int type;
+ bool found = false;
+ int n = 0;
+
+ if (!policy) {
+ if (n < str_sz)
+ n += snprintf(str, str_sz, "!policy");
+ return n;
+ }
+
+ data_len = end - start;
+
+ mnl_attr_for_each_payload(start, data_len) {
+ astart_off = (char *)attr - (char *)start;
+ aend_off = astart_off + mnl_attr_get_payload_len(attr);
+ if (aend_off <= off)
+ continue;
+
+ found = true;
+ break;
+ }
+ if (!found)
+ return 0;
+
+ off -= astart_off;
+
+ type = mnl_attr_get_type(attr);
+
+ if (ynl_err_walk_report_one(policy, type, str, str_sz, &n))
+ return n;
+
+ if (!off) {
+ if (nest_pol)
+ *nest_pol = policy->table[type].nest;
+ return n;
+ }
+
+ if (!policy->table[type].nest) {
+ if (n < str_sz)
+ n += snprintf(str, str_sz, "!nest");
+ return n;
+ }
+
+ off -= sizeof(struct nlattr);
+ start = mnl_attr_get_payload(attr);
+ end = start + mnl_attr_get_payload_len(attr);
+
+ return n + ynl_err_walk(ys, start, end, off, policy->table[type].nest,
+ &str[n], str_sz - n, nest_pol);
+}
+
+#define NLMSGERR_ATTR_MISS_TYPE (NLMSGERR_ATTR_POLICY + 1)
+#define NLMSGERR_ATTR_MISS_NEST (NLMSGERR_ATTR_POLICY + 2)
+#define NLMSGERR_ATTR_MAX (NLMSGERR_ATTR_MAX + 2)
+
+static int
+ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
+ unsigned int hlen)
+{
+ const struct nlattr *tb[NLMSGERR_ATTR_MAX + 1] = {};
+ char miss_attr[sizeof(ys->err.msg)];
+ char bad_attr[sizeof(ys->err.msg)];
+ const struct nlattr *attr;
+ const char *str = NULL;
+
+ if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS))
+ return MNL_CB_OK;
+
+ mnl_attr_for_each(attr, nlh, hlen) {
+ unsigned int len, type;
+
+ len = mnl_attr_get_payload_len(attr);
+ type = mnl_attr_get_type(attr);
+
+ if (type > NLMSGERR_ATTR_MAX)
+ continue;
+
+ tb[type] = attr;
+
+ switch (type) {
+ case NLMSGERR_ATTR_OFFS:
+ case NLMSGERR_ATTR_MISS_TYPE:
+ case NLMSGERR_ATTR_MISS_NEST:
+ if (len != sizeof(__u32))
+ return MNL_CB_ERROR;
+ break;
+ case NLMSGERR_ATTR_MSG:
+ str = mnl_attr_get_payload(attr);
+ if (str[len - 1])
+ return MNL_CB_ERROR;
+ break;
+ default:
+ break;
+ }
+ }
+
+ bad_attr[0] = '\0';
+ miss_attr[0] = '\0';
+
+ if (tb[NLMSGERR_ATTR_OFFS]) {
+ unsigned int n, off;
+ void *start, *end;
+
+ ys->err.attr_offs = mnl_attr_get_u32(tb[NLMSGERR_ATTR_OFFS]);
+
+ n = snprintf(bad_attr, sizeof(bad_attr), "%sbad attribute: ",
+ str ? " (" : "");
+
+ start = mnl_nlmsg_get_payload_offset(ys->nlh,
+ sizeof(struct genlmsghdr));
+ end = mnl_nlmsg_get_payload_tail(ys->nlh);
+
+ off = ys->err.attr_offs;
+ off -= sizeof(struct nlmsghdr);
+ off -= sizeof(struct genlmsghdr);
+
+ n += ynl_err_walk(ys, start, end, off, ys->req_policy,
+ &bad_attr[n], sizeof(bad_attr) - n, NULL);
+
+ if (n >= sizeof(bad_attr))
+ n = sizeof(bad_attr) - 1;
+ bad_attr[n] = '\0';
+ }
+ if (tb[NLMSGERR_ATTR_MISS_TYPE]) {
+ struct ynl_policy_nest *nest_pol = NULL;
+ unsigned int n, off, type;
+ void *start, *end;
+ int n2;
+
+ type = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_TYPE]);
+
+ n = snprintf(miss_attr, sizeof(miss_attr), "%smissing attribute: ",
+ bad_attr[0] ? ", " : (str ? " (" : ""));
+
+ start = mnl_nlmsg_get_payload_offset(ys->nlh,
+ sizeof(struct genlmsghdr));
+ end = mnl_nlmsg_get_payload_tail(ys->nlh);
+
+ nest_pol = ys->req_policy;
+ if (tb[NLMSGERR_ATTR_MISS_NEST]) {
+ off = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_NEST]);
+ off -= sizeof(struct nlmsghdr);
+ off -= sizeof(struct genlmsghdr);
+
+ n += ynl_err_walk(ys, start, end, off, ys->req_policy,
+ &miss_attr[n], sizeof(miss_attr) - n,
+ &nest_pol);
+ }
+
+ n2 = 0;
+ ynl_err_walk_report_one(nest_pol, type, &miss_attr[n],
+ sizeof(miss_attr) - n, &n2);
+ n += n2;
+
+ if (n >= sizeof(miss_attr))
+ n = sizeof(miss_attr) - 1;
+ miss_attr[n] = '\0';
+ }
+
+ /* Implicitly depend on ys->err.code already set */
+ if (str)
+ yerr_msg(ys, "Kernel %s: '%s'%s%s%s",
+ ys->err.code ? "error" : "warning",
+ str, bad_attr, miss_attr,
+ bad_attr[0] || miss_attr[0] ? ")" : "");
+ else if (bad_attr[0] || miss_attr[0])
+ yerr_msg(ys, "Kernel %s: %s%s",
+ ys->err.code ? "error" : "warning",
+ bad_attr, miss_attr);
+
+ return MNL_CB_OK;
+}
+
+static int ynl_cb_error(const struct nlmsghdr *nlh, void *data)
+{
+ const struct nlmsgerr *err = mnl_nlmsg_get_payload(nlh);
+ struct ynl_parse_arg *yarg = data;
+ unsigned int hlen;
+ int code;
+
+ code = err->error >= 0 ? err->error : -err->error;
+ yarg->ys->err.code = code;
+ errno = code;
+
+ hlen = sizeof(*err);
+ if (!(nlh->nlmsg_flags & NLM_F_CAPPED))
+ hlen += mnl_nlmsg_get_payload_len(&err->msg);
+
+ ynl_ext_ack_check(yarg->ys, nlh, hlen);
+
+ return code ? MNL_CB_ERROR : MNL_CB_STOP;
+}
+
+static int ynl_cb_done(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ int err;
+
+ err = *(int *)NLMSG_DATA(nlh);
+ if (err < 0) {
+ yarg->ys->err.code = -err;
+ errno = -err;
+
+ ynl_ext_ack_check(yarg->ys, nlh, sizeof(int));
+
+ return MNL_CB_ERROR;
+ }
+ return MNL_CB_STOP;
+}
+
+static int ynl_cb_noop(const struct nlmsghdr *nlh, void *data)
+{
+ return MNL_CB_OK;
+}
+
+mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE] = {
+ [NLMSG_NOOP] = ynl_cb_noop,
+ [NLMSG_ERROR] = ynl_cb_error,
+ [NLMSG_DONE] = ynl_cb_done,
+ [NLMSG_OVERRUN] = ynl_cb_noop,
+};
+
+/* Attribute validation */
+
+int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
+{
+ struct ynl_policy_attr *policy;
+ unsigned int type, len;
+ unsigned char *data;
+
+ data = mnl_attr_get_payload(attr);
+ len = mnl_attr_get_payload_len(attr);
+ type = mnl_attr_get_type(attr);
+ if (type > yarg->rsp_policy->max_attr) {
+ yerr(yarg->ys, YNL_ERROR_INTERNAL,
+ "Internal error, validating unknown attribute");
+ return -1;
+ }
+
+ policy = &yarg->rsp_policy->table[type];
+
+ switch (policy->type) {
+ case YNL_PT_REJECT:
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Rejected attribute (%s)", policy->name);
+ return -1;
+ case YNL_PT_IGNORE:
+ break;
+ case YNL_PT_U8:
+ if (len == sizeof(__u8))
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (u8 %s)", policy->name);
+ return -1;
+ case YNL_PT_U16:
+ if (len == sizeof(__u16))
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (u16 %s)", policy->name);
+ return -1;
+ case YNL_PT_U32:
+ if (len == sizeof(__u32))
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (u32 %s)", policy->name);
+ return -1;
+ case YNL_PT_U64:
+ if (len == sizeof(__u64))
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (u64 %s)", policy->name);
+ return -1;
+ case YNL_PT_FLAG:
+ /* Let flags grow into real attrs, why not.. */
+ break;
+ case YNL_PT_NEST:
+ if (!len || len >= sizeof(*attr))
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (nest %s)", policy->name);
+ return -1;
+ case YNL_PT_BINARY:
+ if (!policy->len || len == policy->len)
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (binary %s)", policy->name);
+ return -1;
+ case YNL_PT_NUL_STR:
+ if ((!policy->len || len <= policy->len) && !data[len - 1])
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (string %s)", policy->name);
+ return -1;
+ default:
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (unknown %s)", policy->name);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Generic code */
+
+static void ynl_err_reset(struct ynl_sock *ys)
+{
+ ys->err.code = 0;
+ ys->err.attr_offs = 0;
+ ys->err.msg[0] = 0;
+}
+
+struct nlmsghdr *ynl_msg_start(struct ynl_sock *ys, __u32 id, __u16 flags)
+{
+ struct nlmsghdr *nlh;
+
+ ynl_err_reset(ys);
+
+ nlh = ys->nlh = mnl_nlmsg_put_header(ys->tx_buf);
+ nlh->nlmsg_type = id;
+ nlh->nlmsg_flags = flags;
+ nlh->nlmsg_seq = ++ys->seq;
+
+ return nlh;
+}
+
+struct nlmsghdr *
+ynl_gemsg_start(struct ynl_sock *ys, __u32 id, __u16 flags,
+ __u8 cmd, __u8 version)
+{
+ struct genlmsghdr gehdr;
+ struct nlmsghdr *nlh;
+ void *data;
+
+ nlh = ynl_msg_start(ys, id, flags);
+
+ memset(&gehdr, 0, sizeof(gehdr));
+ gehdr.cmd = cmd;
+ gehdr.version = version;
+
+ data = mnl_nlmsg_put_extra_header(nlh, sizeof(gehdr));
+ memcpy(data, &gehdr, sizeof(gehdr));
+
+ return nlh;
+}
+
+void ynl_msg_start_req(struct ynl_sock *ys, __u32 id)
+{
+ ynl_msg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK);
+}
+
+void ynl_msg_start_dump(struct ynl_sock *ys, __u32 id)
+{
+ ynl_msg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP);
+}
+
+struct nlmsghdr *
+ynl_gemsg_start_req(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version)
+{
+ return ynl_gemsg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK, cmd, version);
+}
+
+struct nlmsghdr *
+ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version)
+{
+ return ynl_gemsg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP,
+ cmd, version);
+}
+
+int ynl_recv_ack(struct ynl_sock *ys, int ret)
+{
+ if (!ret) {
+ yerr(ys, YNL_ERROR_EXPECT_ACK,
+ "Expecting an ACK but nothing received");
+ return -1;
+ }
+
+ ret = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);
+ if (ret < 0) {
+ perr(ys, "Socket receive failed");
+ return ret;
+ }
+ return mnl_cb_run(ys->rx_buf, ret, ys->seq, ys->portid,
+ ynl_cb_null, ys);
+}
+
+int ynl_cb_null(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+
+ yerr(yarg->ys, YNL_ERROR_UNEXPECT_MSG,
+ "Received a message when none were expected");
+
+ return MNL_CB_ERROR;
+}
+
+/* Init/fini and genetlink boiler plate */
+static int
+ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
+{
+ const struct nlattr *entry, *attr;
+ unsigned int i;
+
+ mnl_attr_for_each_nested(attr, mcasts)
+ ys->n_mcast_groups++;
+
+ if (!ys->n_mcast_groups)
+ return 0;
+
+ ys->mcast_groups = calloc(ys->n_mcast_groups,
+ sizeof(*ys->mcast_groups));
+ if (!ys->mcast_groups)
+ return MNL_CB_ERROR;
+
+ i = 0;
+ mnl_attr_for_each_nested(entry, mcasts) {
+ mnl_attr_for_each_nested(attr, entry) {
+ if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_ID)
+ ys->mcast_groups[i].id = mnl_attr_get_u32(attr);
+ if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_NAME) {
+ strncpy(ys->mcast_groups[i].name,
+ mnl_attr_get_str(attr),
+ GENL_NAMSIZ - 1);
+ ys->mcast_groups[i].name[GENL_NAMSIZ - 1] = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ynl_get_family_info_cb(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct ynl_sock *ys = yarg->ys;
+ const struct nlattr *attr;
+ bool found_id = true;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GROUPS)
+ if (ynl_get_family_info_mcast(ys, attr))
+ return MNL_CB_ERROR;
+
+ if (mnl_attr_get_type(attr) != CTRL_ATTR_FAMILY_ID)
+ continue;
+
+ if (mnl_attr_get_payload_len(attr) != sizeof(__u16)) {
+ yerr(ys, YNL_ERROR_ATTR_INVALID, "Invalid family ID");
+ return MNL_CB_ERROR;
+ }
+
+ ys->family_id = mnl_attr_get_u16(attr);
+ found_id = true;
+ }
+
+ if (!found_id) {
+ yerr(ys, YNL_ERROR_ATTR_MISSING, "Family ID missing");
+ return MNL_CB_ERROR;
+ }
+ return MNL_CB_OK;
+}
+
+static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
+{
+ struct ynl_parse_arg yarg = { .ys = ys, };
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, 1);
+ mnl_attr_put_strz(nlh, CTRL_ATTR_FAMILY_NAME, family_name);
+
+ err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);
+ if (err < 0) {
+ perr(ys, "failed to request socket family info");
+ return err;
+ }
+
+ err = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);
+ if (err <= 0) {
+ perr(ys, "failed to receive the socket family info");
+ return err;
+ }
+ err = mnl_cb_run2(ys->rx_buf, err, ys->seq, ys->portid,
+ ynl_get_family_info_cb, &yarg,
+ ynl_cb_array, ARRAY_SIZE(ynl_cb_array));
+ if (err < 0) {
+ free(ys->mcast_groups);
+ perr(ys, "failed to receive the socket family info - no such family?");
+ return err;
+ }
+
+ return ynl_recv_ack(ys, err);
+}
+
+struct ynl_sock *
+ynl_sock_create(const struct ynl_family *yf, struct ynl_error *yse)
+{
+ struct ynl_sock *ys;
+ int one = 1;
+
+ ys = malloc(sizeof(*ys) + 2 * MNL_SOCKET_BUFFER_SIZE);
+ if (!ys)
+ return NULL;
+ memset(ys, 0, sizeof(*ys));
+
+ ys->family = yf;
+ ys->tx_buf = &ys->raw_buf[0];
+ ys->rx_buf = &ys->raw_buf[MNL_SOCKET_BUFFER_SIZE];
+ ys->ntf_last_next = &ys->ntf_first;
+
+ ys->sock = mnl_socket_open(NETLINK_GENERIC);
+ if (!ys->sock) {
+ __perr(yse, "failed to create a netlink socket");
+ goto err_free_sock;
+ }
+
+ if (mnl_socket_setsockopt(ys->sock, NETLINK_CAP_ACK,
+ &one, sizeof(one))) {
+ __perr(yse, "failed to enable netlink ACK");
+ goto err_close_sock;
+ }
+ if (mnl_socket_setsockopt(ys->sock, NETLINK_EXT_ACK,
+ &one, sizeof(one))) {
+ __perr(yse, "failed to enable netlink ext ACK");
+ goto err_close_sock;
+ }
+
+ ys->seq = random();
+ ys->portid = mnl_socket_get_portid(ys->sock);
+
+ if (ynl_sock_read_family(ys, yf->name)) {
+ if (yse)
+ memcpy(yse, &ys->err, sizeof(*yse));
+ goto err_close_sock;
+ }
+
+ return ys;
+
+err_close_sock:
+ mnl_socket_close(ys->sock);
+err_free_sock:
+ free(ys);
+ return NULL;
+}
+
+void ynl_sock_destroy(struct ynl_sock *ys)
+{
+ struct ynl_ntf_base_type *ntf;
+
+ mnl_socket_close(ys->sock);
+ while ((ntf = ynl_ntf_dequeue(ys)))
+ ynl_ntf_free(ntf);
+ free(ys->mcast_groups);
+ free(ys);
+}
+
+/* YNL multicast handling */
+
+void ynl_ntf_free(struct ynl_ntf_base_type *ntf)
+{
+ ntf->free(ntf);
+}
+
+int ynl_subscribe(struct ynl_sock *ys, const char *grp_name)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ys->n_mcast_groups; i++)
+ if (!strcmp(ys->mcast_groups[i].name, grp_name))
+ break;
+ if (i == ys->n_mcast_groups) {
+ yerr(ys, ENOENT, "Multicast group '%s' not found", grp_name);
+ return -1;
+ }
+
+ err = mnl_socket_setsockopt(ys->sock, NETLINK_ADD_MEMBERSHIP,
+ &ys->mcast_groups[i].id,
+ sizeof(ys->mcast_groups[i].id));
+ if (err < 0) {
+ perr(ys, "Subscribing to multicast group failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+int ynl_socket_get_fd(struct ynl_sock *ys)
+{
+ return mnl_socket_get_fd(ys->sock);
+}
+
+struct ynl_ntf_base_type *ynl_ntf_dequeue(struct ynl_sock *ys)
+{
+ struct ynl_ntf_base_type *ntf;
+
+ if (!ynl_has_ntf(ys))
+ return NULL;
+
+ ntf = ys->ntf_first;
+ ys->ntf_first = ntf->next;
+ if (ys->ntf_last_next == &ntf->next)
+ ys->ntf_last_next = &ys->ntf_first;
+
+ return ntf;
+}
+
+static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
+{
+ struct ynl_parse_arg yarg = { .ys = ys, };
+ const struct ynl_ntf_info *info;
+ struct ynl_ntf_base_type *rsp;
+ struct genlmsghdr *gehdr;
+ int ret;
+
+ gehdr = mnl_nlmsg_get_payload(nlh);
+ if (gehdr->cmd >= ys->family->ntf_info_size)
+ return MNL_CB_ERROR;
+ info = &ys->family->ntf_info[gehdr->cmd];
+ if (!info->cb)
+ return MNL_CB_ERROR;
+
+ rsp = calloc(1, info->alloc_sz);
+ rsp->free = info->free;
+ yarg.data = rsp->data;
+ yarg.rsp_policy = info->policy;
+
+ ret = info->cb(nlh, &yarg);
+ if (ret <= MNL_CB_STOP)
+ goto err_free;
+
+ rsp->family = nlh->nlmsg_type;
+ rsp->cmd = gehdr->cmd;
+
+ *ys->ntf_last_next = rsp;
+ ys->ntf_last_next = &rsp->next;
+
+ return MNL_CB_OK;
+
+err_free:
+ info->free(rsp);
+ return MNL_CB_ERROR;
+}
+
+static int ynl_ntf_trampoline(const struct nlmsghdr *nlh, void *data)
+{
+ return ynl_ntf_parse((struct ynl_sock *)data, nlh);
+}
+
+int ynl_ntf_check(struct ynl_sock *ys)
+{
+ ssize_t len;
+ int err;
+
+ do {
+ /* libmnl doesn't let us pass flags to the recv to make
+ * it non-blocking so we need to poll() or peek() :|
+ */
+ struct pollfd pfd = { };
+
+ pfd.fd = mnl_socket_get_fd(ys->sock);
+ pfd.events = POLLIN;
+ err = poll(&pfd, 1, 1);
+ if (err < 1)
+ return err;
+
+ len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
+ MNL_SOCKET_BUFFER_SIZE);
+ if (len < 0)
+ return len;
+
+ err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
+ ynl_ntf_trampoline, ys,
+ ynl_cb_array, NLMSG_MIN_TYPE);
+ if (err < 0)
+ return err;
+ } while (err > 0);
+
+ return 0;
+}
+
+/* YNL specific helpers used by the auto-generated code */
+
+struct ynl_dump_list_type *YNL_LIST_END = (void *)(0xb4d123);
+
+void ynl_error_unknown_notification(struct ynl_sock *ys, __u8 cmd)
+{
+ yerr(ys, YNL_ERROR_UNKNOWN_NTF,
+ "Unknown notification message type '%d'", cmd);
+}
+
+int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg)
+{
+ yerr(yarg->ys, YNL_ERROR_INV_RESP, "Error parsing response: %s", msg);
+ return MNL_CB_ERROR;
+}
+
+static int
+ynl_check_alien(struct ynl_sock *ys, const struct nlmsghdr *nlh, __u32 rsp_cmd)
+{
+ struct genlmsghdr *gehdr;
+
+ if (mnl_nlmsg_get_payload_len(nlh) < sizeof(*gehdr)) {
+ yerr(ys, YNL_ERROR_INV_RESP,
+ "Kernel responded with truncated message");
+ return -1;
+ }
+
+ gehdr = mnl_nlmsg_get_payload(nlh);
+ if (gehdr->cmd != rsp_cmd)
+ return ynl_ntf_parse(ys, nlh);
+
+ return 0;
+}
+
+static int ynl_req_trampoline(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_req_state *yrs = data;
+ int ret;
+
+ ret = ynl_check_alien(yrs->yarg.ys, nlh, yrs->rsp_cmd);
+ if (ret)
+ return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK;
+
+ return yrs->cb(nlh, &yrs->yarg);
+}
+
+int ynl_exec(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
+ struct ynl_req_state *yrs)
+{
+ ssize_t len;
+ int err;
+
+ err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len);
+ if (err < 0)
+ return err;
+
+ do {
+ len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
+ MNL_SOCKET_BUFFER_SIZE);
+ if (len < 0)
+ return len;
+
+ err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
+ ynl_req_trampoline, yrs,
+ ynl_cb_array, NLMSG_MIN_TYPE);
+ if (err < 0)
+ return err;
+ } while (err > 0);
+
+ return 0;
+}
+
+static int ynl_dump_trampoline(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_dump_state *ds = data;
+ struct ynl_dump_list_type *obj;
+ struct ynl_parse_arg yarg = {};
+ int ret;
+
+ ret = ynl_check_alien(ds->ys, nlh, ds->rsp_cmd);
+ if (ret)
+ return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK;
+
+ obj = calloc(1, ds->alloc_sz);
+ if (!obj)
+ return MNL_CB_ERROR;
+
+ if (!ds->first)
+ ds->first = obj;
+ if (ds->last)
+ ds->last->next = obj;
+ ds->last = obj;
+
+ yarg.ys = ds->ys;
+ yarg.rsp_policy = ds->rsp_policy;
+ yarg.data = &obj->data;
+
+ return ds->cb(nlh, &yarg);
+}
+
+static void *ynl_dump_end(struct ynl_dump_state *ds)
+{
+ if (!ds->first)
+ return YNL_LIST_END;
+
+ ds->last->next = YNL_LIST_END;
+ return ds->first;
+}
+
+int ynl_exec_dump(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
+ struct ynl_dump_state *yds)
+{
+ ssize_t len;
+ int err;
+
+ err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len);
+ if (err < 0)
+ return err;
+
+ do {
+ len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
+ MNL_SOCKET_BUFFER_SIZE);
+ if (len < 0)
+ goto err_close_list;
+
+ err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
+ ynl_dump_trampoline, yds,
+ ynl_cb_array, NLMSG_MIN_TYPE);
+ if (err < 0)
+ goto err_close_list;
+ } while (err > 0);
+
+ yds->first = ynl_dump_end(yds);
+ return 0;
+
+err_close_list:
+ yds->first = ynl_dump_end(yds);
+ return -1;
+}
diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h
new file mode 100644
index 000000000000..9eafa3552c16
--- /dev/null
+++ b/tools/net/ynl/lib/ynl.h
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#ifndef __YNL_C_H
+#define __YNL_C_H 1
+
+#include <stddef.h>
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+#include <linux/types.h>
+
+struct mnl_socket;
+struct nlmsghdr;
+
+/*
+ * User facing code
+ */
+
+struct ynl_ntf_base_type;
+struct ynl_ntf_info;
+struct ynl_sock;
+
+enum ynl_error_code {
+ YNL_ERROR_NONE = 0,
+ __YNL_ERRNO_END = 4096,
+ YNL_ERROR_INTERNAL,
+ YNL_ERROR_EXPECT_ACK,
+ YNL_ERROR_EXPECT_MSG,
+ YNL_ERROR_UNEXPECT_MSG,
+ YNL_ERROR_ATTR_MISSING,
+ YNL_ERROR_ATTR_INVALID,
+ YNL_ERROR_UNKNOWN_NTF,
+ YNL_ERROR_INV_RESP,
+};
+
+/**
+ * struct ynl_error - error encountered by YNL
+ * @code: errno (low values) or YNL error code (enum ynl_error_code)
+ * @attr_offs: offset of bad attribute (for very advanced users)
+ * @msg: error message
+ *
+ * Error information for when YNL operations fail.
+ * Users should interact with the err member of struct ynl_sock directly.
+ * The main exception to that rule is ynl_sock_create().
+ */
+struct ynl_error {
+ enum ynl_error_code code;
+ unsigned int attr_offs;
+ char msg[512];
+};
+
+/**
+ * struct ynl_family - YNL family info
+ * Family description generated by codegen. Pass to ynl_sock_create().
+ */
+struct ynl_family {
+/* private: */
+ const char *name;
+ const struct ynl_ntf_info *ntf_info;
+ unsigned int ntf_info_size;
+};
+
+/**
+ * struct ynl_sock - YNL wrapped netlink socket
+ * @err: YNL error descriptor, cleared on every request.
+ */
+struct ynl_sock {
+ struct ynl_error err;
+
+/* private: */
+ const struct ynl_family *family;
+ struct mnl_socket *sock;
+ __u32 seq;
+ __u32 portid;
+ __u16 family_id;
+
+ unsigned int n_mcast_groups;
+ struct {
+ unsigned int id;
+ char name[GENL_NAMSIZ];
+ } *mcast_groups;
+
+ struct ynl_ntf_base_type *ntf_first;
+ struct ynl_ntf_base_type **ntf_last_next;
+
+ struct nlmsghdr *nlh;
+ struct ynl_policy_nest *req_policy;
+ unsigned char *tx_buf;
+ unsigned char *rx_buf;
+ unsigned char raw_buf[];
+};
+
+struct ynl_sock *
+ynl_sock_create(const struct ynl_family *yf, struct ynl_error *e);
+void ynl_sock_destroy(struct ynl_sock *ys);
+
+#define ynl_dump_foreach(dump, iter) \
+ for (typeof(dump->obj) *iter = &dump->obj; \
+ !ynl_dump_obj_is_last(iter); \
+ iter = ynl_dump_obj_next(iter))
+
+int ynl_subscribe(struct ynl_sock *ys, const char *grp_name);
+int ynl_socket_get_fd(struct ynl_sock *ys);
+int ynl_ntf_check(struct ynl_sock *ys);
+
+/**
+ * ynl_has_ntf() - check if socket has *parsed* notifications
+ * @ys: active YNL socket
+ *
+ * Note that this does not take into account notifications sitting
+ * in netlink socket, just the notifications which have already been
+ * read and parsed (e.g. during a ynl_ntf_check() call).
+ */
+static inline bool ynl_has_ntf(struct ynl_sock *ys)
+{
+ return ys->ntf_last_next != &ys->ntf_first;
+}
+struct ynl_ntf_base_type *ynl_ntf_dequeue(struct ynl_sock *ys);
+
+void ynl_ntf_free(struct ynl_ntf_base_type *ntf);
+
+/*
+ * YNL internals / low level stuff
+ */
+
+/* Generic mnl helper code */
+
+enum ynl_policy_type {
+ YNL_PT_REJECT = 1,
+ YNL_PT_IGNORE,
+ YNL_PT_NEST,
+ YNL_PT_FLAG,
+ YNL_PT_BINARY,
+ YNL_PT_U8,
+ YNL_PT_U16,
+ YNL_PT_U32,
+ YNL_PT_U64,
+ YNL_PT_NUL_STR,
+};
+
+struct ynl_policy_attr {
+ enum ynl_policy_type type;
+ unsigned int len;
+ const char *name;
+ struct ynl_policy_nest *nest;
+};
+
+struct ynl_policy_nest {
+ unsigned int max_attr;
+ struct ynl_policy_attr *table;
+};
+
+struct ynl_parse_arg {
+ struct ynl_sock *ys;
+ struct ynl_policy_nest *rsp_policy;
+ void *data;
+};
+
+struct ynl_dump_list_type {
+ struct ynl_dump_list_type *next;
+ unsigned char data[] __attribute__ ((aligned (8)));
+};
+extern struct ynl_dump_list_type *YNL_LIST_END;
+
+static inline bool ynl_dump_obj_is_last(void *obj)
+{
+ unsigned long uptr = (unsigned long)obj;
+
+ uptr -= offsetof(struct ynl_dump_list_type, data);
+ return uptr == (unsigned long)YNL_LIST_END;
+}
+
+static inline void *ynl_dump_obj_next(void *obj)
+{
+ unsigned long uptr = (unsigned long)obj;
+ struct ynl_dump_list_type *list;
+
+ uptr -= offsetof(struct ynl_dump_list_type, data);
+ list = (void *)uptr;
+ uptr = (unsigned long)list->next;
+ uptr += offsetof(struct ynl_dump_list_type, data);
+
+ return (void *)uptr;
+}
+
+struct ynl_ntf_base_type {
+ __u16 family;
+ __u8 cmd;
+ struct ynl_ntf_base_type *next;
+ void (*free)(struct ynl_ntf_base_type *ntf);
+ unsigned char data[] __attribute__ ((aligned (8)));
+};
+
+extern mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE];
+
+struct nlmsghdr *
+ynl_gemsg_start_req(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
+struct nlmsghdr *
+ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
+
+int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr);
+
+int ynl_recv_ack(struct ynl_sock *ys, int ret);
+int ynl_cb_null(const struct nlmsghdr *nlh, void *data);
+
+/* YNL specific helpers used by the auto-generated code */
+
+struct ynl_req_state {
+ struct ynl_parse_arg yarg;
+ mnl_cb_t cb;
+ __u32 rsp_cmd;
+};
+
+struct ynl_dump_state {
+ struct ynl_sock *ys;
+ struct ynl_policy_nest *rsp_policy;
+ void *first;
+ struct ynl_dump_list_type *last;
+ size_t alloc_sz;
+ mnl_cb_t cb;
+ __u32 rsp_cmd;
+};
+
+struct ynl_ntf_info {
+ struct ynl_policy_nest *policy;
+ mnl_cb_t cb;
+ size_t alloc_sz;
+ void (*free)(struct ynl_ntf_base_type *ntf);
+};
+
+int ynl_exec(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
+ struct ynl_req_state *yrs);
+int ynl_exec_dump(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
+ struct ynl_dump_state *yds);
+
+void ynl_error_unknown_notification(struct ynl_sock *ys, __u8 cmd);
+int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg);
+
+#endif
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 3144f33196be..1b3a36fbb1c3 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+from collections import namedtuple
import functools
import os
import random
import socket
import struct
+from struct import Struct
import yaml
+import ipaddress
+import uuid
from .nlspec import SpecFamily
@@ -76,10 +80,17 @@ class NlError(Exception):
class NlAttr:
- type_formats = { 'u8' : ('B', 1), 's8' : ('b', 1),
- 'u16': ('H', 2), 's16': ('h', 2),
- 'u32': ('I', 4), 's32': ('i', 4),
- 'u64': ('Q', 8), 's64': ('q', 8) }
+ ScalarFormat = namedtuple('ScalarFormat', ['native', 'big', 'little'])
+ type_formats = {
+ 'u8' : ScalarFormat(Struct('B'), Struct("B"), Struct("B")),
+ 's8' : ScalarFormat(Struct('b'), Struct("b"), Struct("b")),
+ 'u16': ScalarFormat(Struct('H'), Struct(">H"), Struct("<H")),
+ 's16': ScalarFormat(Struct('h'), Struct(">h"), Struct("<h")),
+ 'u32': ScalarFormat(Struct('I'), Struct(">I"), Struct("<I")),
+ 's32': ScalarFormat(Struct('i'), Struct(">i"), Struct("<i")),
+ 'u64': ScalarFormat(Struct('Q'), Struct(">Q"), Struct("<Q")),
+ 's64': ScalarFormat(Struct('q'), Struct(">q"), Struct("<q"))
+ }
def __init__(self, raw, offset):
self._len, self._type = struct.unpack("HH", raw[offset:offset + 4])
@@ -88,25 +99,31 @@ class NlAttr:
self.full_len = (self.payload_len + 3) & ~3
self.raw = raw[offset + 4:offset + self.payload_len]
- def format_byte_order(byte_order):
+ @classmethod
+ def get_format(cls, attr_type, byte_order=None):
+ format = cls.type_formats[attr_type]
if byte_order:
- return ">" if byte_order == "big-endian" else "<"
- return ""
-
- def as_u8(self):
- return struct.unpack("B", self.raw)[0]
-
- def as_u16(self, byte_order=None):
- endian = NlAttr.format_byte_order(byte_order)
- return struct.unpack(f"{endian}H", self.raw)[0]
-
- def as_u32(self, byte_order=None):
- endian = NlAttr.format_byte_order(byte_order)
- return struct.unpack(f"{endian}I", self.raw)[0]
+ return format.big if byte_order == "big-endian" \
+ else format.little
+ return format.native
+
+ @classmethod
+ def formatted_string(cls, raw, display_hint):
+ if display_hint == 'mac':
+ formatted = ':'.join('%02x' % b for b in raw)
+ elif display_hint == 'hex':
+ formatted = bytes.hex(raw, ' ')
+ elif display_hint in [ 'ipv4', 'ipv6' ]:
+ formatted = format(ipaddress.ip_address(raw))
+ elif display_hint == 'uuid':
+ formatted = str(uuid.UUID(bytes=raw))
+ else:
+ formatted = raw
+ return formatted
- def as_u64(self, byte_order=None):
- endian = NlAttr.format_byte_order(byte_order)
- return struct.unpack(f"{endian}Q", self.raw)[0]
+ def as_scalar(self, attr_type, byte_order=None):
+ format = self.get_format(attr_type, byte_order)
+ return format.unpack(self.raw)[0]
def as_strz(self):
return self.raw.decode('ascii')[:-1]
@@ -115,18 +132,24 @@ class NlAttr:
return self.raw
def as_c_array(self, type):
- format, _ = self.type_formats[type]
- return list({ x[0] for x in struct.iter_unpack(format, self.raw) })
+ format = self.get_format(type)
+ return [ x[0] for x in format.iter_unpack(self.raw) ]
def as_struct(self, members):
value = dict()
offset = 0
for m in members:
# TODO: handle non-scalar members
- format, size = self.type_formats[m.type]
- decoded = struct.unpack_from(format, self.raw, offset)
- offset += size
- value[m.name] = decoded[0]
+ if m.type == 'binary':
+ decoded = self.raw[offset:offset+m['len']]
+ offset += m['len']
+ elif m.type in NlAttr.type_formats:
+ format = self.get_format(m.type, m.byte_order)
+ [ decoded ] = format.unpack_from(self.raw, offset)
+ offset += format.size
+ if m.display_hint:
+ decoded = self.formatted_string(decoded, m.display_hint)
+ value[m.name] = decoded
return value
def __repr__(self):
@@ -184,11 +207,11 @@ class NlMsg:
if extack.type == Netlink.NLMSGERR_ATTR_MSG:
self.extack['msg'] = extack.as_strz()
elif extack.type == Netlink.NLMSGERR_ATTR_MISS_TYPE:
- self.extack['miss-type'] = extack.as_u32()
+ self.extack['miss-type'] = extack.as_scalar('u32')
elif extack.type == Netlink.NLMSGERR_ATTR_MISS_NEST:
- self.extack['miss-nest'] = extack.as_u32()
+ self.extack['miss-nest'] = extack.as_scalar('u32')
elif extack.type == Netlink.NLMSGERR_ATTR_OFFS:
- self.extack['bad-attr-offs'] = extack.as_u32()
+ self.extack['bad-attr-offs'] = extack.as_scalar('u32')
else:
if 'unknown' not in self.extack:
self.extack['unknown'] = []
@@ -272,11 +295,11 @@ def _genl_load_families():
fam = dict()
for attr in gm.raw_attrs:
if attr.type == Netlink.CTRL_ATTR_FAMILY_ID:
- fam['id'] = attr.as_u16()
+ fam['id'] = attr.as_scalar('u16')
elif attr.type == Netlink.CTRL_ATTR_FAMILY_NAME:
fam['name'] = attr.as_strz()
elif attr.type == Netlink.CTRL_ATTR_MAXATTR:
- fam['maxattr'] = attr.as_u32()
+ fam['maxattr'] = attr.as_scalar('u32')
elif attr.type == Netlink.CTRL_ATTR_MCAST_GROUPS:
fam['mcast'] = dict()
for entry in NlAttrs(attr.raw):
@@ -286,7 +309,7 @@ def _genl_load_families():
if entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_NAME:
mcast_name = entry_attr.as_strz()
elif entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_ID:
- mcast_id = entry_attr.as_u32()
+ mcast_id = entry_attr.as_scalar('u32')
if mcast_name and mcast_id is not None:
fam['mcast'][mcast_name] = mcast_id
if 'name' in fam and 'id' in fam:
@@ -304,9 +327,9 @@ class GenlMsg:
self.fixed_header_attrs = dict()
for m in fixed_header_members:
- format, size = NlAttr.type_formats[m.type]
- decoded = struct.unpack_from(format, nl_msg.raw, offset)
- offset += size
+ format = NlAttr.get_format(m.type, m.byte_order)
+ decoded = format.unpack_from(nl_msg.raw, offset)
+ offset += format.size
self.fixed_header_attrs[m.name] = decoded[0]
self.raw = nl_msg.raw[offset:]
@@ -381,21 +404,13 @@ class YnlFamily(SpecFamily):
attr_payload += self._add_attr(attr['nested-attributes'], subname, subvalue)
elif attr["type"] == 'flag':
attr_payload = b''
- elif attr["type"] == 'u8':
- attr_payload = struct.pack("B", int(value))
- elif attr["type"] == 'u16':
- endian = NlAttr.format_byte_order(attr.byte_order)
- attr_payload = struct.pack(f"{endian}H", int(value))
- elif attr["type"] == 'u32':
- endian = NlAttr.format_byte_order(attr.byte_order)
- attr_payload = struct.pack(f"{endian}I", int(value))
- elif attr["type"] == 'u64':
- endian = NlAttr.format_byte_order(attr.byte_order)
- attr_payload = struct.pack(f"{endian}Q", int(value))
elif attr["type"] == 'string':
attr_payload = str(value).encode('ascii') + b'\x00'
elif attr["type"] == 'binary':
- attr_payload = value
+ attr_payload = bytes.fromhex(value)
+ elif attr['type'] in NlAttr.type_formats:
+ format = NlAttr.get_format(attr['type'], attr.byte_order)
+ attr_payload = format.pack(int(value))
else:
raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
@@ -419,11 +434,17 @@ class YnlFamily(SpecFamily):
def _decode_binary(self, attr, attr_spec):
if attr_spec.struct_name:
- decoded = attr.as_struct(self.consts[attr_spec.struct_name])
+ members = self.consts[attr_spec.struct_name]
+ decoded = attr.as_struct(members)
+ for m in members:
+ if m.enum:
+ self._decode_enum(decoded, m)
elif attr_spec.sub_type:
decoded = attr.as_c_array(attr_spec.sub_type)
else:
decoded = attr.as_bin()
+ if attr_spec.display_hint:
+ decoded = NlAttr.formatted_string(decoded, attr_spec.display_hint)
return decoded
def _decode(self, attrs, space):
@@ -434,22 +455,16 @@ class YnlFamily(SpecFamily):
if attr_spec["type"] == 'nest':
subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'])
decoded = subdict
- elif attr_spec['type'] == 'u8':
- decoded = attr.as_u8()
- elif attr_spec['type'] == 'u16':
- decoded = attr.as_u16(attr_spec.byte_order)
- elif attr_spec['type'] == 'u32':
- decoded = attr.as_u32(attr_spec.byte_order)
- elif attr_spec['type'] == 'u64':
- decoded = attr.as_u64(attr_spec.byte_order)
elif attr_spec["type"] == 'string':
decoded = attr.as_strz()
elif attr_spec["type"] == 'binary':
decoded = self._decode_binary(attr, attr_spec)
elif attr_spec["type"] == 'flag':
decoded = True
+ elif attr_spec["type"] in NlAttr.type_formats:
+ decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
else:
- raise Exception(f'Unknown {attr.type} {attr_spec["name"]} {attr_spec["type"]}')
+ raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
if not attr_spec.is_multi:
rsp[attr_spec['name']] = decoded
@@ -554,9 +569,9 @@ class YnlFamily(SpecFamily):
if op.fixed_header:
fixed_header_members = self.consts[op.fixed_header].members
for m in fixed_header_members:
- value = vals.pop(m.name)
- format, _ = NlAttr.type_formats[m.type]
- msg += struct.pack(format, value)
+ value = vals.pop(m.name) if m.name in vals else 0
+ format = NlAttr.get_format(m.type, m.byte_order)
+ msg += format.pack(value)
for name, value in vals.items():
msg += self._add_attr(op.attr_set.name, name, value)
msg = _genl_msg_finalize(msg)
diff --git a/tools/net/ynl/samples/.gitignore b/tools/net/ynl/samples/.gitignore
new file mode 100644
index 000000000000..2aae60c4829f
--- /dev/null
+++ b/tools/net/ynl/samples/.gitignore
@@ -0,0 +1,3 @@
+ethtool
+devlink
+netdev
diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile
new file mode 100644
index 000000000000..f2db8bb78309
--- /dev/null
+++ b/tools/net/ynl/samples/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+include ../Makefile.deps
+
+CC=gcc
+CFLAGS=-std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow \
+ -I../lib/ -I../generated/ -idirafter $(UAPI_PATH)
+ifeq ("$(DEBUG)","1")
+ CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan
+endif
+
+LDLIBS=-lmnl ../lib/ynl.a ../generated/protos.a
+
+SRCS=$(wildcard *.c)
+BINS=$(patsubst %.c,%,${SRCS})
+
+include $(wildcard *.d)
+
+all: $(BINS)
+
+$(BINS): ../lib/ynl.a ../generated/protos.a
+
+clean:
+ rm -f *.o *.d *~
+
+hardclean: clean
+ rm -f $(BINS)
+
+.PHONY: all clean
+.DEFAULT_GOAL=all
diff --git a/tools/net/ynl/samples/devlink.c b/tools/net/ynl/samples/devlink.c
new file mode 100644
index 000000000000..d2611d7ebab4
--- /dev/null
+++ b/tools/net/ynl/samples/devlink.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+
+#include <ynl.h>
+
+#include "devlink-user.h"
+
+int main(int argc, char **argv)
+{
+ struct devlink_get_list *devs;
+ struct ynl_sock *ys;
+
+ ys = ynl_sock_create(&ynl_devlink_family, NULL);
+ if (!ys)
+ return 1;
+
+ devs = devlink_get_dump(ys);
+ if (!devs)
+ goto err_close;
+
+ ynl_dump_foreach(devs, d) {
+ struct devlink_info_get_req *info_req;
+ struct devlink_info_get_rsp *info_rsp;
+
+ printf("%s/%s:\n", d->bus_name, d->dev_name);
+
+ info_req = devlink_info_get_req_alloc();
+ devlink_info_get_req_set_bus_name(info_req, d->bus_name);
+ devlink_info_get_req_set_dev_name(info_req, d->dev_name);
+
+ info_rsp = devlink_info_get(ys, info_req);
+ devlink_info_get_req_free(info_req);
+ if (!info_rsp)
+ goto err_free_devs;
+
+ if (info_rsp->_present.info_driver_name_len)
+ printf(" driver: %s\n", info_rsp->info_driver_name);
+ if (info_rsp->n_info_version_running)
+ printf(" running fw:\n");
+ for (unsigned i = 0; i < info_rsp->n_info_version_running; i++)
+ printf(" %s: %s\n",
+ info_rsp->info_version_running[i].info_version_name,
+ info_rsp->info_version_running[i].info_version_value);
+ printf(" ...\n");
+ devlink_info_get_rsp_free(info_rsp);
+ }
+ devlink_get_list_free(devs);
+
+ ynl_sock_destroy(ys);
+
+ return 0;
+
+err_free_devs:
+ devlink_get_list_free(devs);
+err_close:
+ fprintf(stderr, "YNL: %s\n", ys->err.msg);
+ ynl_sock_destroy(ys);
+ return 2;
+}
diff --git a/tools/net/ynl/samples/ethtool.c b/tools/net/ynl/samples/ethtool.c
new file mode 100644
index 000000000000..a7ebbd1b98db
--- /dev/null
+++ b/tools/net/ynl/samples/ethtool.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+
+#include <ynl.h>
+
+#include <net/if.h>
+
+#include "ethtool-user.h"
+
+int main(int argc, char **argv)
+{
+ struct ethtool_channels_get_req_dump creq = {};
+ struct ethtool_rings_get_req_dump rreq = {};
+ struct ethtool_channels_get_list *channels;
+ struct ethtool_rings_get_list *rings;
+ struct ynl_sock *ys;
+
+ ys = ynl_sock_create(&ynl_ethtool_family, NULL);
+ if (!ys)
+ return 1;
+
+ creq._present.header = 1; /* ethtool needs an empty nest, sigh */
+ channels = ethtool_channels_get_dump(ys, &creq);
+ if (!channels)
+ goto err_close;
+
+ printf("Channels:\n");
+ ynl_dump_foreach(channels, dev) {
+ printf(" %8s: ", dev->header.dev_name);
+ if (dev->_present.rx_count)
+ printf("rx %d ", dev->rx_count);
+ if (dev->_present.tx_count)
+ printf("tx %d ", dev->tx_count);
+ if (dev->_present.combined_count)
+ printf("combined %d ", dev->combined_count);
+ printf("\n");
+ }
+ ethtool_channels_get_list_free(channels);
+
+ rreq._present.header = 1; /* ethtool needs an empty nest.. */
+ rings = ethtool_rings_get_dump(ys, &rreq);
+ if (!rings)
+ goto err_close;
+
+ printf("Rings:\n");
+ ynl_dump_foreach(rings, dev) {
+ printf(" %8s: ", dev->header.dev_name);
+ if (dev->_present.rx)
+ printf("rx %d ", dev->rx);
+ if (dev->_present.tx)
+ printf("tx %d ", dev->tx);
+ printf("\n");
+ }
+ ethtool_rings_get_list_free(rings);
+
+ ynl_sock_destroy(ys);
+
+ return 0;
+
+err_close:
+ fprintf(stderr, "YNL (%d): %s\n", ys->err.code, ys->err.msg);
+ ynl_sock_destroy(ys);
+ return 2;
+}
diff --git a/tools/net/ynl/samples/netdev.c b/tools/net/ynl/samples/netdev.c
new file mode 100644
index 000000000000..d31268aa47c5
--- /dev/null
+++ b/tools/net/ynl/samples/netdev.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+
+#include <ynl.h>
+
+#include <net/if.h>
+
+#include "netdev-user.h"
+
+/* netdev genetlink family code sample
+ * This sample shows off basics of the netdev family but also notification
+ * handling, hence the somewhat odd UI. We subscribe to notifications first
+ * then wait for ifc selection, so the socket may already accumulate
+ * notifications as we wait. This allows us to test that YNL can handle
+ * requests and notifications getting interleaved.
+ */
+
+static void netdev_print_device(struct netdev_dev_get_rsp *d, unsigned int op)
+{
+ char ifname[IF_NAMESIZE];
+ const char *name;
+
+ if (!d->_present.ifindex)
+ return;
+
+ name = if_indextoname(d->ifindex, ifname);
+ if (name)
+ printf("%8s", name);
+ printf("[%d]\t", d->ifindex);
+
+ if (!d->_present.xdp_features)
+ return;
+
+ printf("%llx:", d->xdp_features);
+ for (int i = 0; d->xdp_features > 1U << i; i++) {
+ if (d->xdp_features & (1U << i))
+ printf(" %s", netdev_xdp_act_str(1 << i));
+ }
+
+ name = netdev_op_str(op);
+ if (name)
+ printf(" (ntf: %s)", name);
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ struct netdev_dev_get_list *devs;
+ struct ynl_ntf_base_type *ntf;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ifindex = 0;
+
+ if (argc > 1)
+ ifindex = strtol(argv[1], NULL, 0);
+
+ ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return 1;
+ }
+
+ if (ynl_subscribe(ys, "mgmt"))
+ goto err_close;
+
+ printf("Select ifc ($ifindex; or 0 = dump; or -2 ntf check): ");
+ scanf("%d", &ifindex);
+
+ if (ifindex > 0) {
+ struct netdev_dev_get_req *req;
+ struct netdev_dev_get_rsp *d;
+
+ req = netdev_dev_get_req_alloc();
+ netdev_dev_get_req_set_ifindex(req, ifindex);
+
+ d = netdev_dev_get(ys, req);
+ netdev_dev_get_req_free(req);
+ if (!d)
+ goto err_close;
+
+ netdev_print_device(d, 0);
+ netdev_dev_get_rsp_free(d);
+ } else if (!ifindex) {
+ devs = netdev_dev_get_dump(ys);
+ if (!devs)
+ goto err_close;
+
+ ynl_dump_foreach(devs, d)
+ netdev_print_device(d, 0);
+ netdev_dev_get_list_free(devs);
+ } else if (ifindex == -2) {
+ ynl_ntf_check(ys);
+ }
+ while ((ntf = ynl_ntf_dequeue(ys))) {
+ netdev_print_device((struct netdev_dev_get_rsp *)&ntf->data,
+ ntf->cmd);
+ ynl_ntf_free(ntf);
+ }
+
+ ynl_sock_destroy(ys);
+ return 0;
+
+err_close:
+ fprintf(stderr, "YNL: %s\n", ys->err.msg);
+ ynl_sock_destroy(ys);
+ return 2;
+}
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index cc2f8c945340..71c5e79e877f 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -4,6 +4,7 @@
import argparse
import collections
import os
+import re
import yaml
from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation, SpecEnumSet, SpecEnumEntry
@@ -48,6 +49,11 @@ class Type(SpecAttr):
else:
self.nested_render_name = f"{family.name}_{c_lower(self.nested_attrs)}"
+ if self.nested_attrs in self.family.consts:
+ self.nested_struct_type = 'struct ' + self.nested_render_name + '_'
+ else:
+ self.nested_struct_type = 'struct ' + self.nested_render_name
+
self.c_name = c_lower(self.name)
if self.c_name in _C_KW:
self.c_name += '_'
@@ -57,8 +63,11 @@ class Type(SpecAttr):
delattr(self, "enum_name")
def resolve(self):
- self.enum_name = f"{self.attr_set.name_prefix}{self.name}"
- self.enum_name = c_upper(self.enum_name)
+ if 'name-prefix' in self.attr:
+ enum_name = f"{self.attr['name-prefix']}{self.name}"
+ else:
+ enum_name = f"{self.attr_set.name_prefix}{self.name}"
+ self.enum_name = c_upper(enum_name)
def is_multi_val(self):
return None
@@ -94,7 +103,10 @@ class Type(SpecAttr):
def arg_member(self, ri):
member = self._complex_member_type(ri)
if member:
- return [member + ' *' + self.c_name]
+ arg = [member + ' *' + self.c_name]
+ if self.presence_type() == 'count':
+ arg += ['unsigned int n_' + self.c_name]
+ return arg
raise Exception(f"Struct member not implemented for class type {self.type}")
def struct_member(self, ri):
@@ -150,7 +162,7 @@ class Type(SpecAttr):
init_lines = [init_lines]
kw = 'if' if first else 'else if'
- ri.cw.block_start(line=f"{kw} (mnl_attr_get_type(attr) == {self.enum_name})")
+ ri.cw.block_start(line=f"{kw} (type == {self.enum_name})")
if local_vars:
for local in local_vars:
ri.cw.p(local)
@@ -170,6 +182,7 @@ class Type(SpecAttr):
for line in lines:
ri.cw.p(line)
ri.cw.block_end()
+ return True
def _setter_lines(self, ri, member, presence):
raise Exception(f"Setter not implemented for class type {self.type}")
@@ -187,9 +200,12 @@ class Type(SpecAttr):
code.append(presence + ' = 1;')
code += self._setter_lines(ri, member, presence)
- ri.cw.write_func('static inline void',
- f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}",
- body=code,
+ func_name = f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}"
+ free = bool([x for x in code if 'free(' in x])
+ alloc = bool([x for x in code if 'alloc(' in x])
+ if free and not alloc:
+ func_name = '__' + func_name
+ ri.cw.write_func('static inline void', func_name, body=code,
args=[f'{type_name(ri, direction, deref=deref)} *{var}'] + self.arg_member(ri))
@@ -197,6 +213,12 @@ class TypeUnused(Type):
def presence_type(self):
return ''
+ def arg_member(self, ri):
+ return []
+
+ def _attr_get(self, ri, var):
+ return ['return MNL_CB_ERROR;'], None, None
+
def _attr_typol(self):
return '.type = YNL_PT_REJECT, '
@@ -208,12 +230,24 @@ class TypePad(Type):
def presence_type(self):
return ''
+ def arg_member(self, ri):
+ return []
+
def _attr_typol(self):
- return '.type = YNL_PT_REJECT, '
+ return '.type = YNL_PT_IGNORE, '
+
+ def attr_put(self, ri, var):
+ pass
+
+ def attr_get(self, ri, var, first):
+ pass
def attr_policy(self, cw):
pass
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ pass
+
class TypeScalar(Type):
def __init__(self, family, attr_set, attr, value):
@@ -239,7 +273,8 @@ class TypeScalar(Type):
else:
self.is_bitfield = False
- if 'enum' in self.attr and not self.is_bitfield:
+ maybe_enum = not self.is_bitfield and 'enum' in self.attr
+ if maybe_enum and self.family.consts[self.attr['enum']].enum_name:
self.type_name = f"enum {self.family.name}_{c_lower(self.attr['enum'])}"
else:
self.type_name = '__' + self.type
@@ -265,8 +300,10 @@ class TypeScalar(Type):
return f"NLA_POLICY_MIN({policy}, {self.checks['min']})"
elif 'enum' in self.attr:
enum = self.family.consts[self.attr['enum']]
- cnt = len(enum['entries'])
- return f"NLA_POLICY_MAX({policy}, {cnt - 1})"
+ low, high = enum.value_range()
+ if low == 0:
+ return f"NLA_POLICY_MAX({policy}, {high})"
+ return f"NLA_POLICY_RANGE({policy}, {low}, {high})"
return super()._attr_policy(policy)
def _attr_typol(self):
@@ -395,7 +432,7 @@ class TypeBinary(Type):
class TypeNest(Type):
def _complex_member_type(self, ri):
- return f"struct {self.nested_render_name}"
+ return self.nested_struct_type
def free(self, ri, var, ref):
ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name});')
@@ -411,7 +448,8 @@ class TypeNest(Type):
f"{self.enum_name}, &{var}->{self.c_name})")
def _attr_get(self, ri, var):
- get_lines = [f"{self.nested_render_name}_parse(&parg, attr);"]
+ get_lines = [f"if ({self.nested_render_name}_parse(&parg, attr))",
+ "return MNL_CB_ERROR;"]
init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
f"parg.data = &{var}->{self.c_name};"]
return get_lines, init_lines, None
@@ -424,15 +462,27 @@ class TypeNest(Type):
class TypeMultiAttr(Type):
+ def __init__(self, family, attr_set, attr, value, base_type):
+ super().__init__(family, attr_set, attr, value)
+
+ self.base_type = base_type
+
def is_multi_val(self):
return True
def presence_type(self):
return 'count'
+ def _mnl_type(self):
+ t = self.type
+ # mnl does not have a helper for signed types
+ if t[0] == 's':
+ t = 'u' + t[1:]
+ return t
+
def _complex_member_type(self, ri):
if 'type' not in self.attr or self.attr['type'] == 'nest':
- return f"struct {self.nested_render_name}"
+ return self.nested_struct_type
elif self.attr['type'] in scalars:
scalar_pfx = '__' if ri.ku_space == 'user' else ''
return scalar_pfx + self.attr['type']
@@ -443,20 +493,42 @@ class TypeMultiAttr(Type):
return 'type' not in self.attr or self.attr['type'] == 'nest'
def free(self, ri, var, ref):
- if 'type' not in self.attr or self.attr['type'] == 'nest':
+ if self.attr['type'] in scalars:
+ ri.cw.p(f"free({var}->{ref}{self.c_name});")
+ elif 'type' not in self.attr or self.attr['type'] == 'nest':
ri.cw.p(f"for (i = 0; i < {var}->{ref}n_{self.c_name}; i++)")
ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);')
+ ri.cw.p(f"free({var}->{ref}{self.c_name});")
+ else:
+ raise Exception(f"Free of MultiAttr sub-type {self.attr['type']} not supported yet")
+
+ def _attr_policy(self, policy):
+ return self.base_type._attr_policy(policy)
def _attr_typol(self):
- if 'type' not in self.attr or self.attr['type'] == 'nest':
- return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
- elif self.attr['type'] in scalars:
- return f".type = YNL_PT_U{self.attr['type'][1:]}, "
- else:
- raise Exception(f"Sub-type {self.attr['type']} not supported yet")
+ return self.base_type._attr_typol()
def _attr_get(self, ri, var):
- return f'{var}->n_{self.c_name}++;', None, None
+ return f'n_{self.c_name}++;', None, None
+
+ def attr_put(self, ri, var):
+ if self.attr['type'] in scalars:
+ put_type = self._mnl_type()
+ ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
+ ri.cw.p(f"mnl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
+ elif 'type' not in self.attr or self.attr['type'] == 'nest':
+ ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
+ self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
+ f"{self.enum_name}, &{var}->{self.c_name}[i])")
+ else:
+ raise Exception(f"Put of MultiAttr sub-type {self.attr['type']} not supported yet")
+
+ def _setter_lines(self, ri, member, presence):
+ # For multi-attr we have a count, not presence, hack up the presence
+ presence = presence[:-(len('_present.') + len(self.c_name))] + "n_" + self.c_name
+ return [f"free({member});",
+ f"{member} = {self.c_name};",
+ f"{presence} = n_{self.c_name};"]
class TypeArrayNest(Type):
@@ -468,7 +540,7 @@ class TypeArrayNest(Type):
def _complex_member_type(self, ri):
if 'sub-type' not in self.attr or self.attr['sub-type'] == 'nest':
- return f"struct {self.nested_render_name}"
+ return self.nested_struct_type
elif self.attr['sub-type'] in scalars:
scalar_pfx = '__' if ri.ku_space == 'user' else ''
return scalar_pfx + self.attr['sub-type']
@@ -488,7 +560,7 @@ class TypeArrayNest(Type):
class TypeNestTypeValue(Type):
def _complex_member_type(self, ri):
- return f"struct {self.nested_render_name}"
+ return self.nested_struct_type
def _attr_typol(self):
return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
@@ -531,6 +603,8 @@ class Struct:
else:
self.render_name = f"{family.name}_{c_lower(space_name)}"
self.struct_name = 'struct ' + self.render_name
+ if self.nested and space_name in family.consts:
+ self.struct_name += '_'
self.ptr_name = self.struct_name + ' *'
self.request = False
@@ -591,7 +665,14 @@ class EnumEntry(SpecEnumEntry):
class EnumSet(SpecEnumSet):
def __init__(self, family, yaml):
self.render_name = c_lower(family.name + '-' + yaml['name'])
- self.enum_name = 'enum ' + self.render_name
+
+ if 'enum-name' in yaml:
+ if yaml['enum-name']:
+ self.enum_name = 'enum ' + c_lower(yaml['enum-name'])
+ else:
+ self.enum_name = None
+ else:
+ self.enum_name = 'enum ' + self.render_name
self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-")
@@ -600,6 +681,15 @@ class EnumSet(SpecEnumSet):
def new_entry(self, entry, prev_entry, value_start):
return EnumEntry(self, entry, prev_entry, value_start)
+ def value_range(self):
+ low = min([x.value for x in self.entries.values()])
+ high = max([x.value for x in self.entries.values()])
+
+ if high - low + 1 != len(self.entries):
+ raise Exception("Can't get value range for a noncontiguous enum")
+
+ return low, high
+
class AttrSet(SpecAttrSet):
def __init__(self, family, yaml):
@@ -630,42 +720,44 @@ class AttrSet(SpecAttrSet):
self.c_name = ''
def new_attr(self, elem, value):
- if 'multi-attr' in elem and elem['multi-attr']:
- return TypeMultiAttr(self.family, self, elem, value)
- elif elem['type'] in scalars:
- return TypeScalar(self.family, self, elem, value)
+ if elem['type'] in scalars:
+ t = TypeScalar(self.family, self, elem, value)
elif elem['type'] == 'unused':
- return TypeUnused(self.family, self, elem, value)
+ t = TypeUnused(self.family, self, elem, value)
elif elem['type'] == 'pad':
- return TypePad(self.family, self, elem, value)
+ t = TypePad(self.family, self, elem, value)
elif elem['type'] == 'flag':
- return TypeFlag(self.family, self, elem, value)
+ t = TypeFlag(self.family, self, elem, value)
elif elem['type'] == 'string':
- return TypeString(self.family, self, elem, value)
+ t = TypeString(self.family, self, elem, value)
elif elem['type'] == 'binary':
- return TypeBinary(self.family, self, elem, value)
+ t = TypeBinary(self.family, self, elem, value)
elif elem['type'] == 'nest':
- return TypeNest(self.family, self, elem, value)
+ t = TypeNest(self.family, self, elem, value)
elif elem['type'] == 'array-nest':
- return TypeArrayNest(self.family, self, elem, value)
+ t = TypeArrayNest(self.family, self, elem, value)
elif elem['type'] == 'nest-type-value':
- return TypeNestTypeValue(self.family, self, elem, value)
+ t = TypeNestTypeValue(self.family, self, elem, value)
else:
raise Exception(f"No typed class for type {elem['type']}")
+ if 'multi-attr' in elem and elem['multi-attr']:
+ t = TypeMultiAttr(self.family, self, elem, value, t)
+
+ return t
+
class Operation(SpecOperation):
def __init__(self, family, yaml, req_value, rsp_value):
super().__init__(family, yaml, req_value, rsp_value)
- if req_value != rsp_value:
- raise Exception("Directional messages not supported by codegen")
-
self.render_name = family.name + '_' + c_lower(self.name)
self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
('dump' in yaml and 'request' in yaml['dump'])
+ self.has_ntf = False
+
# Added by resolve:
self.enum_name = None
delattr(self, "enum_name")
@@ -678,16 +770,12 @@ class Operation(SpecOperation):
else:
self.enum_name = self.family.async_op_prefix + c_upper(self.name)
- def add_notification(self, op):
- if 'notify' not in self.yaml:
- self.yaml['notify'] = dict()
- self.yaml['notify']['reply'] = self.yaml['do']['reply']
- self.yaml['notify']['cmds'] = []
- self.yaml['notify']['cmds'].append(op)
+ def mark_has_ntf(self):
+ self.has_ntf = True
class Family(SpecFamily):
- def __init__(self, file_name):
+ def __init__(self, file_name, exclude_ops):
# Added by resolve:
self.c_name = None
delattr(self, "c_name")
@@ -702,7 +790,7 @@ class Family(SpecFamily):
self.hooks = None
delattr(self, "hooks")
- super().__init__(file_name)
+ super().__init__(file_name, exclude_ops=exclude_ops)
self.fam_key = c_upper(self.yaml.get('c-family-name', self.yaml["name"] + '_FAMILY_NAME'))
self.ver_key = c_upper(self.yaml.get('c-version-name', self.yaml["name"] + '_FAMILY_VERSION'))
@@ -745,14 +833,12 @@ class Family(SpecFamily):
self.root_sets = dict()
# dict space-name -> set('request', 'reply')
self.pure_nested_structs = dict()
- self.all_notify = dict()
+ self._mark_notify()
self._mock_up_events()
- self._dictify()
self._load_root_sets()
self._load_nested_sets()
- self._load_all_notify()
self._load_hooks()
self.kernel_policy = self.yaml.get('kernel-policy', 'split')
@@ -768,6 +854,11 @@ class Family(SpecFamily):
def new_operation(self, elem, req_value, rsp_value):
return Operation(self, elem, req_value, rsp_value)
+ def _mark_notify(self):
+ for op in self.msgs.values():
+ if 'notify' in op:
+ self.ops[op['notify']].mark_has_ntf()
+
# Fake a 'do' equivalent of all events, so that we can render their response parsing
def _mock_up_events(self):
for op in self.yaml['operations']['list']:
@@ -778,16 +869,8 @@ class Family(SpecFamily):
}
}
- def _dictify(self):
- ntf = []
- for msg in self.msgs.values():
- if 'notify' in msg:
- ntf.append(msg)
- for n in ntf:
- self.ops[n['notify']].add_notification(n)
-
def _load_root_sets(self):
- for op_name, op in self.ops.items():
+ for op_name, op in self.msgs.items():
if 'attribute-set' not in op:
continue
@@ -798,6 +881,8 @@ class Family(SpecFamily):
req_attrs.update(set(op[op_mode]['request']['attributes']))
if op_mode in op and 'reply' in op[op_mode]:
rsp_attrs.update(set(op[op_mode]['reply']['attributes']))
+ if 'event' in op:
+ rsp_attrs.update(set(op['event']['attributes']))
if op['attribute-set'] not in self.root_sets:
self.root_sets[op['attribute-set']] = {'request': req_attrs, 'reply': rsp_attrs}
@@ -806,33 +891,73 @@ class Family(SpecFamily):
self.root_sets[op['attribute-set']]['reply'].update(rsp_attrs)
def _load_nested_sets(self):
+ attr_set_queue = list(self.root_sets.keys())
+ attr_set_seen = set(self.root_sets.keys())
+
+ while len(attr_set_queue):
+ a_set = attr_set_queue.pop(0)
+ for attr, spec in self.attr_sets[a_set].items():
+ if 'nested-attributes' not in spec:
+ continue
+
+ nested = spec['nested-attributes']
+ if nested not in attr_set_seen:
+ attr_set_queue.append(nested)
+ attr_set_seen.add(nested)
+
+ inherit = set()
+ if nested not in self.root_sets:
+ if nested not in self.pure_nested_structs:
+ self.pure_nested_structs[nested] = Struct(self, nested, inherited=inherit)
+ else:
+ raise Exception(f'Using attr set as root and nested not supported - {nested}')
+
+ if 'type-value' in spec:
+ if nested in self.root_sets:
+ raise Exception("Inheriting members to a space used as root not supported")
+ inherit.update(set(spec['type-value']))
+ elif spec['type'] == 'array-nest':
+ inherit.add('idx')
+ self.pure_nested_structs[nested].set_inherited(inherit)
+
for root_set, rs_members in self.root_sets.items():
for attr, spec in self.attr_sets[root_set].items():
if 'nested-attributes' in spec:
- inherit = set()
nested = spec['nested-attributes']
- if nested not in self.root_sets:
- self.pure_nested_structs[nested] = Struct(self, nested, inherited=inherit)
if attr in rs_members['request']:
self.pure_nested_structs[nested].request = True
if attr in rs_members['reply']:
self.pure_nested_structs[nested].reply = True
- if 'type-value' in spec:
- if nested in self.root_sets:
- raise Exception("Inheriting members to a space used as root not supported")
- inherit.update(set(spec['type-value']))
- elif spec['type'] == 'array-nest':
- inherit.add('idx')
- self.pure_nested_structs[nested].set_inherited(inherit)
-
- def _load_all_notify(self):
- for op_name, op in self.ops.items():
- if not op:
- continue
-
- if 'notify' in op:
- self.all_notify[op_name] = op['notify']['cmds']
+ # Try to reorder according to dependencies
+ pns_key_list = list(self.pure_nested_structs.keys())
+ pns_key_seen = set()
+ rounds = len(pns_key_list)**2 # it's basically bubble sort
+ for _ in range(rounds):
+ if len(pns_key_list) == 0:
+ break
+ name = pns_key_list.pop(0)
+ finished = True
+ for _, spec in self.attr_sets[name].items():
+ if 'nested-attributes' in spec:
+ if spec['nested-attributes'] not in pns_key_seen:
+ # Dicts are sorted, this will make struct last
+ struct = self.pure_nested_structs.pop(name)
+ self.pure_nested_structs[name] = struct
+ finished = False
+ break
+ if finished:
+ pns_key_seen.add(name)
+ else:
+ pns_key_list.append(name)
+ # Propagate the request / reply
+ for attr_set, struct in reversed(self.pure_nested_structs.items()):
+ for _, spec in self.attr_sets[attr_set].items():
+ if 'nested-attributes' in spec:
+ child = self.pure_nested_structs.get(spec['nested-attributes'])
+ if child:
+ child.request |= struct.request
+ child.reply |= struct.reply
def _load_global_policy(self):
global_set = set()
@@ -874,33 +999,38 @@ class Family(SpecFamily):
class RenderInfo:
- def __init__(self, cw, family, ku_space, op, op_name, op_mode, attr_set=None):
+ def __init__(self, cw, family, ku_space, op, op_mode, attr_set=None):
self.family = family
self.nl = cw.nlib
self.ku_space = ku_space
- self.op = op
- self.op_name = op_name
self.op_mode = op_mode
+ self.op = op
# 'do' and 'dump' response parsing is identical
- if op_mode != 'do' and 'dump' in op and 'do' in op and 'reply' in op['do'] and \
- op["do"]["reply"] == op["dump"]["reply"]:
- self.type_consistent = True
- else:
- self.type_consistent = op_mode == 'event'
+ self.type_consistent = True
+ if op_mode != 'do' and 'dump' in op and 'do' in op:
+ if ('reply' in op['do']) != ('reply' in op["dump"]):
+ self.type_consistent = False
+ elif 'reply' in op['do'] and op["do"]["reply"] != op["dump"]["reply"]:
+ self.type_consistent = False
self.attr_set = attr_set
if not self.attr_set:
self.attr_set = op['attribute-set']
+ self.type_name_conflict = False
if op:
- self.type_name = c_lower(op_name)
+ self.type_name = c_lower(op.name)
else:
self.type_name = c_lower(attr_set)
+ if attr_set in family.consts:
+ self.type_name_conflict = True
self.cw = cw
self.struct = dict()
+ if op_mode == 'notify':
+ op_mode = 'do'
for op_dir in ['request', 'reply']:
if op and op_dir in op[op_mode]:
self.struct[op_dir] = Struct(family, self.attr_set,
@@ -914,6 +1044,7 @@ class CodeWriter:
self.nlib = nlib
self._nl = False
+ self._block_end = False
self._silent_block = False
self._ind = 0
self._out = out_file
@@ -923,9 +1054,17 @@ class CodeWriter:
return line.startswith('if') or line.startswith('while') or line.startswith('for')
def p(self, line, add_ind=0):
+ if self._block_end:
+ self._block_end = False
+ if line.startswith('else'):
+ line = '} ' + line
+ else:
+ self._out.write('\t' * self._ind + '}\n')
+
if self._nl:
self._out.write('\n')
self._nl = False
+
ind = self._ind
if line[-1] == ':':
ind -= 1
@@ -949,7 +1088,14 @@ class CodeWriter:
if line and line[0] not in {';', ','}:
line = ' ' + line
self._ind -= 1
- self.p('}' + line)
+ self._nl = False
+ if not line:
+ # Delay printing closing bracket in case "else" comes next
+ if self._block_end:
+ self._out.write('\t' * (self._ind + 1) + '}\n')
+ self._block_end = True
+ else:
+ self.p('}' + line)
def write_doc_line(self, doc, indent=True):
words = doc.split()
@@ -1068,7 +1214,40 @@ op_mode_to_wrapper = {
}
_C_KW = {
- 'do'
+ 'auto',
+ 'bool',
+ 'break',
+ 'case',
+ 'char',
+ 'const',
+ 'continue',
+ 'default',
+ 'do',
+ 'double',
+ 'else',
+ 'enum',
+ 'extern',
+ 'float',
+ 'for',
+ 'goto',
+ 'if',
+ 'inline',
+ 'int',
+ 'long',
+ 'register',
+ 'return',
+ 'short',
+ 'signed',
+ 'sizeof',
+ 'static',
+ 'struct',
+ 'switch',
+ 'typedef',
+ 'union',
+ 'unsigned',
+ 'void',
+ 'volatile',
+ 'while'
}
@@ -1131,10 +1310,6 @@ def print_dump_prototype(ri):
print_prototype(ri, "request")
-def put_typol_fwd(cw, struct):
- cw.p(f'extern struct ynl_policy_nest {struct.render_name}_nest;')
-
-
def put_typol(cw, struct):
type_max = struct.attr_set.max_name
cw.block_start(line=f'struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
@@ -1152,6 +1327,58 @@ def put_typol(cw, struct):
cw.nl()
+def _put_enum_to_str_helper(cw, render_name, map_name, arg_name, enum=None):
+ args = [f'int {arg_name}']
+ if enum and not ('enum-name' in enum and not enum['enum-name']):
+ args = [f'enum {render_name} {arg_name}']
+ cw.write_func_prot('const char *', f'{render_name}_str', args)
+ cw.block_start()
+ if enum and enum.type == 'flags':
+ cw.p(f'{arg_name} = ffs({arg_name}) - 1;')
+ cw.p(f'if ({arg_name} < 0 || {arg_name} >= (int)MNL_ARRAY_SIZE({map_name}))')
+ cw.p('return NULL;')
+ cw.p(f'return {map_name}[{arg_name}];')
+ cw.block_end()
+ cw.nl()
+
+
+def put_op_name_fwd(family, cw):
+ cw.write_func_prot('const char *', f'{family.name}_op_str', ['int op'], suffix=';')
+
+
+def put_op_name(family, cw):
+ map_name = f'{family.name}_op_strmap'
+ cw.block_start(line=f"static const char * const {map_name}[] =")
+ for op_name, op in family.msgs.items():
+ if op.rsp_value:
+ if op.req_value == op.rsp_value:
+ cw.p(f'[{op.enum_name}] = "{op_name}",')
+ else:
+ cw.p(f'[{op.rsp_value}] = "{op_name}",')
+ cw.block_end(line=';')
+ cw.nl()
+
+ _put_enum_to_str_helper(cw, family.name + '_op', map_name, 'op')
+
+
+def put_enum_to_str_fwd(family, cw, enum):
+ args = [f'enum {enum.render_name} value']
+ if 'enum-name' in enum and not enum['enum-name']:
+ args = ['int value']
+ cw.write_func_prot('const char *', f'{enum.render_name}_str', args, suffix=';')
+
+
+def put_enum_to_str(family, cw, enum):
+ map_name = f'{enum.render_name}_strmap'
+ cw.block_start(line=f"static const char * const {map_name}[] =")
+ for entry in enum.entries.values():
+ cw.p(f'[{entry.value}] = "{entry.name}",')
+ cw.block_end(line=';')
+ cw.nl()
+
+ _put_enum_to_str_helper(cw, enum.render_name, map_name, 'value', enum=enum)
+
+
def put_req_nested(ri, struct):
func_args = ['struct nlmsghdr *nlh',
'unsigned int attr_type',
@@ -1196,6 +1423,11 @@ def _multi_parse(ri, struct, init_lines, local_vars):
local_vars.append('struct ynl_parse_arg parg;')
init_lines.append('parg.ys = yarg->ys;')
+ all_multi = array_nests | multi_attrs
+
+ for anest in sorted(all_multi):
+ local_vars.append(f"unsigned int n_{struct[anest].c_name} = 0;")
+
ri.cw.block_start()
ri.cw.write_func_lvar(local_vars)
@@ -1206,13 +1438,21 @@ def _multi_parse(ri, struct, init_lines, local_vars):
for arg in struct.inherited:
ri.cw.p(f'dst->{arg} = {arg};')
+ for anest in sorted(all_multi):
+ aspec = struct[anest]
+ ri.cw.p(f"if (dst->{aspec.c_name})")
+ ri.cw.p(f'return ynl_error_parse(yarg, "attribute already present ({struct.attr_set.name}.{aspec.name})");')
+
ri.cw.nl()
ri.cw.block_start(line=iter_line)
+ ri.cw.p('unsigned int type = mnl_attr_get_type(attr);')
+ ri.cw.nl()
first = True
for _, arg in struct.member_list():
- arg.attr_get(ri, 'dst', first=first)
- first = False
+ good = arg.attr_get(ri, 'dst', first=first)
+ # First may be 'unused' or 'pad', ignore those
+ first &= not good
ri.cw.block_end()
ri.cw.nl()
@@ -1220,8 +1460,9 @@ def _multi_parse(ri, struct, init_lines, local_vars):
for anest in sorted(array_nests):
aspec = struct[anest]
- ri.cw.block_start(line=f"if (dst->n_{aspec.c_name})")
- ri.cw.p(f"dst->{aspec.c_name} = calloc(dst->n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.block_start(line=f"if (n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc({aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
ri.cw.p('i = 0;')
ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
ri.cw.block_start(line=f"mnl_attr_for_each_nested(attr, attr_{aspec.c_name})")
@@ -1235,8 +1476,9 @@ def _multi_parse(ri, struct, init_lines, local_vars):
for anest in sorted(multi_attrs):
aspec = struct[anest]
- ri.cw.block_start(line=f"if (dst->n_{aspec.c_name})")
- ri.cw.p(f"dst->{aspec.c_name} = calloc(dst->n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.block_start(line=f"if (n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
ri.cw.p('i = 0;')
if 'nested-attributes' in aspec:
ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
@@ -1304,13 +1546,13 @@ def print_req(ri):
ret_err = '-1'
direction = "request"
local_vars = ['struct nlmsghdr *nlh;',
- 'int len, err;']
+ 'int err;']
if 'reply' in ri.op[ri.op_mode]:
ret_ok = 'rsp'
ret_err = 'NULL'
local_vars += [f'{type_name(ri, rdir(direction))} *rsp;',
- 'struct ynl_parse_arg yarg = { .ys = ys, };']
+ 'struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };']
print_prototype(ri, direction, terminate=False)
ri.cw.block_start()
@@ -1320,41 +1562,39 @@ def print_req(ri):
ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
if 'reply' in ri.op[ri.op_mode]:
- ri.cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.p(f"yrs.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
ri.cw.nl()
for _, attr in ri.struct["request"].member_list():
attr.attr_put(ri, "req")
ri.cw.nl()
- ri.cw.p('err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);')
- ri.cw.p('if (err < 0)')
- ri.cw.p(f"return {ret_err};")
- ri.cw.nl()
- ri.cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
- ri.cw.p('if (len < 0)')
- ri.cw.p(f"return {ret_err};")
- ri.cw.nl()
-
+ parse_arg = "NULL"
if 'reply' in ri.op[ri.op_mode]:
ri.cw.p('rsp = calloc(1, sizeof(*rsp));')
- ri.cw.p('yarg.data = rsp;')
+ ri.cw.p('yrs.yarg.data = rsp;')
+ ri.cw.p(f"yrs.cb = {op_prefix(ri, 'reply')}_parse;")
+ if ri.op.value is not None:
+ ri.cw.p(f'yrs.rsp_cmd = {ri.op.enum_name};')
+ else:
+ ri.cw.p(f'yrs.rsp_cmd = {ri.op.rsp_value};')
ri.cw.nl()
- ri.cw.p(f"err = {ri.nl.parse_cb_run(op_prefix(ri, 'reply') + '_parse', '&yarg', False)};")
- ri.cw.p('if (err < 0)')
+ parse_arg = '&yrs'
+ ri.cw.p(f"err = ynl_exec(ys, nlh, {parse_arg});")
+ ri.cw.p('if (err < 0)')
+ if 'reply' in ri.op[ri.op_mode]:
ri.cw.p('goto err_free;')
- ri.cw.nl()
-
- ri.cw.p('err = ynl_recv_ack(ys, err);')
- ri.cw.p('if (err)')
- ri.cw.p('goto err_free;')
+ else:
+ ri.cw.p('return -1;')
ri.cw.nl()
+
ri.cw.p(f"return {ret_ok};")
ri.cw.nl()
- ri.cw.p('err_free:')
if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p('err_free:')
ri.cw.p(f"{call_free(ri, rdir(direction), 'rsp')}")
- ri.cw.p(f"return {ret_err};")
+ ri.cw.p(f"return {ret_err};")
+
ri.cw.block_end()
@@ -1364,7 +1604,7 @@ def print_dump(ri):
ri.cw.block_start()
local_vars = ['struct ynl_dump_state yds = {};',
'struct nlmsghdr *nlh;',
- 'int len, err;']
+ 'int err;']
for var in local_vars:
ri.cw.p(f'{var}')
@@ -1373,6 +1613,10 @@ def print_dump(ri):
ri.cw.p('yds.ys = ys;')
ri.cw.p(f"yds.alloc_sz = sizeof({type_name(ri, rdir(direction))});")
ri.cw.p(f"yds.cb = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ if ri.op.value is not None:
+ ri.cw.p(f'yds.rsp_cmd = {ri.op.enum_name};')
+ else:
+ ri.cw.p(f'yds.rsp_cmd = {ri.op.rsp_value};')
ri.cw.p(f"yds.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
ri.cw.nl()
ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
@@ -1384,20 +1628,9 @@ def print_dump(ri):
attr.attr_put(ri, "req")
ri.cw.nl()
- ri.cw.p('err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);')
- ri.cw.p('if (err < 0)')
- ri.cw.p('return NULL;')
- ri.cw.nl()
-
- ri.cw.block_start(line='do')
- ri.cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
- ri.cw.p('if (len < 0)')
- ri.cw.p('goto free_list;')
- ri.cw.nl()
- ri.cw.p(f"err = {ri.nl.parse_cb_run('ynl_dump_trampoline', '&yds', False, indent=2)};")
+ ri.cw.p('err = ynl_exec_dump(ys, nlh, &yds);')
ri.cw.p('if (err < 0)')
ri.cw.p('goto free_list;')
- ri.cw.block_end(line='while (err > 0);')
ri.cw.nl()
ri.cw.p('return yds.first;')
@@ -1418,14 +1651,27 @@ def free_arg_name(direction):
return 'obj'
+def print_alloc_wrapper(ri, direction):
+ name = op_prefix(ri, direction)
+ ri.cw.write_func_prot(f'static inline struct {name} *', f"{name}_alloc", [f"void"])
+ ri.cw.block_start()
+ ri.cw.p(f'return calloc(1, sizeof(struct {name}));')
+ ri.cw.block_end()
+
+
def print_free_prototype(ri, direction, suffix=';'):
name = op_prefix(ri, direction)
+ struct_name = name
+ if ri.type_name_conflict:
+ struct_name += '_'
arg = free_arg_name(direction)
- ri.cw.write_func_prot('void', f"{name}_free", [f"struct {name} *{arg}"], suffix=suffix)
+ ri.cw.write_func_prot('void', f"{name}_free", [f"struct {struct_name} *{arg}"], suffix=suffix)
def _print_type(ri, direction, struct):
suffix = f'_{ri.type_name}{direction_to_suffix[direction]}'
+ if not direction and ri.type_name_conflict:
+ suffix += '_'
if ri.op_mode == 'dump':
suffix += '_dump'
@@ -1465,6 +1711,7 @@ def print_type_full(ri, struct):
def print_type_helpers(ri, direction, deref=False):
print_free_prototype(ri, direction)
+ ri.cw.nl()
if ri.ku_space == 'user' and direction == 'request':
for _, attr in ri.struct[direction].member_list():
@@ -1473,6 +1720,7 @@ def print_type_helpers(ri, direction, deref=False):
def print_req_type_helpers(ri):
+ print_alloc_wrapper(ri, "request")
print_type_helpers(ri, "request")
@@ -1496,6 +1744,12 @@ def print_req_type(ri):
print_type(ri, "request")
+def print_req_free(ri):
+ if 'request' not in ri.op[ri.op_mode]:
+ return
+ _free_type(ri, 'request', ri.struct['request'])
+
+
def print_rsp_type(ri):
if (ri.op_mode == 'do' or ri.op_mode == 'dump') and 'reply' in ri.op[ri.op_mode]:
direction = 'reply'
@@ -1513,6 +1767,7 @@ def print_wrapped_type(ri):
elif ri.op_mode == 'notify' or ri.op_mode == 'event':
ri.cw.p('__u16 family;')
ri.cw.p('__u8 cmd;')
+ ri.cw.p('struct ynl_ntf_base_type *next;')
ri.cw.p(f"void (*free)({type_name(ri, 'reply')} *ntf);")
ri.cw.p(f"{type_name(ri, 'reply', deref=True)} obj __attribute__ ((aligned (8)));")
ri.cw.block_end(line=';')
@@ -1564,7 +1819,7 @@ def print_dump_type_free(ri):
ri.cw.block_start()
ri.cw.p(f"{sub_type} *next = rsp;")
ri.cw.nl()
- ri.cw.block_start(line='while (next)')
+ ri.cw.block_start(line='while ((void *)next != YNL_LIST_END)')
_free_type_members_iter(ri, ri.struct['reply'])
ri.cw.p('rsp = next;')
ri.cw.p('next = rsp->next;')
@@ -1587,70 +1842,6 @@ def print_ntf_type_free(ri):
ri.cw.nl()
-def print_ntf_parse_prototype(family, cw, suffix=';'):
- cw.write_func_prot('struct ynl_ntf_base_type *', f"{family['name']}_ntf_parse",
- ['struct ynl_sock *ys'], suffix=suffix)
-
-
-def print_ntf_type_parse(family, cw, ku_mode):
- print_ntf_parse_prototype(family, cw, suffix='')
- cw.block_start()
- cw.write_func_lvar(['struct genlmsghdr *genlh;',
- 'struct nlmsghdr *nlh;',
- 'struct ynl_parse_arg yarg = { .ys = ys, };',
- 'struct ynl_ntf_base_type *rsp;',
- 'int len, err;',
- 'mnl_cb_t parse;'])
- cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
- cw.p('if (len < (ssize_t)(sizeof(*nlh) + sizeof(*genlh)))')
- cw.p('return NULL;')
- cw.nl()
- cw.p('nlh = (struct nlmsghdr *)ys->rx_buf;')
- cw.p('genlh = mnl_nlmsg_get_payload(nlh);')
- cw.nl()
- cw.block_start(line='switch (genlh->cmd)')
- for ntf_op in sorted(family.all_notify.keys()):
- op = family.ops[ntf_op]
- ri = RenderInfo(cw, family, ku_mode, op, ntf_op, "notify")
- for ntf in op['notify']['cmds']:
- cw.p(f"case {ntf.enum_name}:")
- cw.p(f"rsp = calloc(1, sizeof({type_name(ri, 'notify')}));")
- cw.p(f"parse = {op_prefix(ri, 'reply', deref=True)}_parse;")
- cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
- cw.p(f"rsp->free = (void *){op_prefix(ri, 'notify')}_free;")
- cw.p('break;')
- for op_name, op in family.ops.items():
- if 'event' not in op:
- continue
- ri = RenderInfo(cw, family, ku_mode, op, op_name, "event")
- cw.p(f"case {op.enum_name}:")
- cw.p(f"rsp = calloc(1, sizeof({type_name(ri, 'event')}));")
- cw.p(f"parse = {op_prefix(ri, 'reply', deref=True)}_parse;")
- cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
- cw.p(f"rsp->free = (void *){op_prefix(ri, 'notify')}_free;")
- cw.p('break;')
- cw.p('default:')
- cw.p('ynl_error_unknown_notification(ys, genlh->cmd);')
- cw.p('return NULL;')
- cw.block_end()
- cw.nl()
- cw.p('yarg.data = rsp->data;')
- cw.nl()
- cw.p(f"err = {cw.nlib.parse_cb_run('parse', '&yarg', True)};")
- cw.p('if (err < 0)')
- cw.p('goto err_free;')
- cw.nl()
- cw.p('rsp->family = nlh->nlmsg_type;')
- cw.p('rsp->cmd = genlh->cmd;')
- cw.p('return rsp;')
- cw.nl()
- cw.p('err_free:')
- cw.p('free(rsp);')
- cw.p('return NULL;')
- cw.block_end()
- cw.nl()
-
-
def print_req_policy_fwd(cw, struct, ri=None, terminate=True):
if terminate and ri and kernel_can_gen_family_struct(struct.family):
return
@@ -2035,6 +2226,48 @@ def render_uapi(family, cw):
cw.p(f'#endif /* {hdr_prot} */')
+def _render_user_ntf_entry(ri, op):
+ ri.cw.block_start(line=f"[{op.enum_name}] = ")
+ ri.cw.p(f".alloc_sz\t= sizeof({type_name(ri, 'event')}),")
+ ri.cw.p(f".cb\t\t= {op_prefix(ri, 'reply', deref=True)}_parse,")
+ ri.cw.p(f".policy\t\t= &{ri.struct['reply'].render_name}_nest,")
+ ri.cw.p(f".free\t\t= (void *){op_prefix(ri, 'notify')}_free,")
+ ri.cw.block_end(line=',')
+
+
+def render_user_family(family, cw, prototype):
+ symbol = f'const struct ynl_family ynl_{family.c_name}_family'
+ if prototype:
+ cw.p(f'extern {symbol};')
+ return
+
+ if family.ntfs:
+ cw.block_start(line=f"static const struct ynl_ntf_info {family['name']}_ntf_info[] = ")
+ for ntf_op_name, ntf_op in family.ntfs.items():
+ if 'notify' in ntf_op:
+ op = family.ops[ntf_op['notify']]
+ ri = RenderInfo(cw, family, "user", op, "notify")
+ elif 'event' in ntf_op:
+ ri = RenderInfo(cw, family, "user", ntf_op, "event")
+ else:
+ raise Exception('Invalid notification ' + ntf_op_name)
+ _render_user_ntf_entry(ri, ntf_op)
+ for op_name, op in family.ops.items():
+ if 'event' not in op:
+ continue
+ ri = RenderInfo(cw, family, "user", op, "event")
+ _render_user_ntf_entry(ri, op)
+ cw.block_end(line=";")
+ cw.nl()
+
+ cw.block_start(f'{symbol} = ')
+ cw.p(f'.name\t\t= "{family.name}",')
+ if family.ntfs:
+ cw.p(f".ntf_info\t= {family['name']}_ntf_info,")
+ cw.p(f".ntf_info_size\t= MNL_ARRAY_SIZE({family['name']}_ntf_info),")
+ cw.block_end(line=';')
+
+
def find_kernel_root(full_path):
sub_path = ''
while True:
@@ -2052,6 +2285,7 @@ def main():
parser.add_argument('--header', dest='header', action='store_true', default=None)
parser.add_argument('--source', dest='header', action='store_false')
parser.add_argument('--user-header', nargs='+', default=[])
+ parser.add_argument('--exclude-op', action='append', default=[])
parser.add_argument('-o', dest='out_file', type=str)
args = parser.parse_args()
@@ -2060,8 +2294,10 @@ def main():
if args.header is None:
parser.error("--header or --source is required")
+ exclude_ops = [re.compile(expr) for expr in args.exclude_op]
+
try:
- parsed = Family(args.spec)
+ parsed = Family(args.spec, exclude_ops)
if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
print('Spec license:', parsed.license)
print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
@@ -2071,6 +2307,13 @@ def main():
os.sys.exit(1)
return
+ supported_models = ['unified']
+ if args.mode == 'user':
+ supported_models += ['directional']
+ if parsed.msg_id_model not in supported_models:
+ print(f'Message enum-model {parsed.msg_id_model} not supported for {args.mode} generation')
+ os.sys.exit(1)
+
cw = CodeWriter(BaseNlLib(), out_file)
_, spec_kernel = find_kernel_root(args.spec)
@@ -2081,6 +2324,11 @@ def main():
cw.p("/* Do not edit directly, auto-generated from: */")
cw.p(f"/*\t{spec_kernel} */")
cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
+ if args.exclude_op or args.user_header:
+ line = ''
+ line += ' --user-header '.join([''] + args.user_header)
+ line += ' --exclude-op '.join([''] + args.exclude_op)
+ cw.p(f'/* YNL-ARG{line} */')
cw.nl()
if args.mode == 'uapi':
@@ -2101,7 +2349,16 @@ def main():
if args.out_file:
cw.p(f'#include "{os.path.basename(args.out_file[:-2])}.h"')
cw.nl()
- headers = [parsed.uapi_header]
+ headers = ['uapi/' + parsed.uapi_header]
+ else:
+ cw.p('#include <stdlib.h>')
+ cw.p('#include <string.h>')
+ if args.header:
+ cw.p('#include <linux/types.h>')
+ else:
+ cw.p(f'#include "{parsed.name}-user.h"')
+ cw.p('#include "ynl.h"')
+ headers = [parsed.uapi_header]
for definition in parsed['definitions']:
if 'header' in definition:
headers.append(definition['header'])
@@ -2111,9 +2368,6 @@ def main():
if args.mode == "user":
if not args.header:
- cw.p("#include <stdlib.h>")
- cw.p("#include <stdio.h>")
- cw.p("#include <string.h>")
cw.p("#include <libmnl/libmnl.h>")
cw.p("#include <linux/genetlink.h>")
cw.nl()
@@ -2121,6 +2375,8 @@ def main():
cw.p(f'#include "{one}"')
else:
cw.p('struct ynl_sock;')
+ cw.nl()
+ render_user_family(parsed, cw, True)
cw.nl()
if args.mode == "kernel":
@@ -2144,7 +2400,7 @@ def main():
if parsed.kernel_policy in {'per-op', 'split'}:
for op_name, op in parsed.ops.items():
if 'do' in op and 'event' not in op:
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
print_req_policy_fwd(cw, ri.struct['request'], ri=ri)
cw.nl()
@@ -2173,7 +2429,7 @@ def main():
for op_mode in ['do', 'dump']:
if op_mode in op and 'request' in op[op_mode]:
cw.p(f"/* {op.enum_name} - {op_mode} */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, op_mode)
+ ri = RenderInfo(cw, parsed, args.mode, op, op_mode)
print_req_policy(cw, ri.struct['request'], ri=ri)
cw.nl()
@@ -2182,11 +2438,18 @@ def main():
print_kernel_family_struct_src(parsed, cw)
if args.mode == "user":
- has_ntf = False
if args.header:
+ cw.p('/* Enums */')
+ put_op_name_fwd(parsed, cw)
+
+ for name, const in parsed.consts.items():
+ if isinstance(const, EnumSet):
+ put_enum_to_str_fwd(parsed, cw, const)
+ cw.nl()
+
cw.p('/* Common nested types */')
- for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
- ri = RenderInfo(cw, parsed, args.mode, "", "", "", attr_set)
+ for attr_set, struct in parsed.pure_nested_structs.items():
+ ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
print_type_full(ri, struct)
for op_name, op in parsed.ops.items():
@@ -2194,7 +2457,7 @@ def main():
if 'do' in op and 'event' not in op:
cw.p(f"/* {op.enum_name} - do */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
print_req_type(ri)
print_req_type_helpers(ri)
cw.nl()
@@ -2206,7 +2469,7 @@ def main():
if 'dump' in op:
cw.p(f"/* {op.enum_name} - dump */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'dump')
+ ri = RenderInfo(cw, parsed, args.mode, op, 'dump')
if 'request' in op['dump']:
print_req_type(ri)
print_req_type_helpers(ri)
@@ -2216,39 +2479,41 @@ def main():
print_dump_prototype(ri)
cw.nl()
- if 'notify' in op:
+ if op.has_ntf:
cw.p(f"/* {op.enum_name} - notify */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'notify')
- has_ntf = True
+ ri = RenderInfo(cw, parsed, args.mode, op, 'notify')
if not ri.type_consistent:
- raise Exception('Only notifications with consistent types supported')
+ raise Exception(f'Only notifications with consistent types supported ({op.name})')
print_wrapped_type(ri)
+ for op_name, op in parsed.ntfs.items():
if 'event' in op:
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'event')
+ ri = RenderInfo(cw, parsed, args.mode, op, 'event')
cw.p(f"/* {op.enum_name} - event */")
print_rsp_type(ri)
cw.nl()
print_wrapped_type(ri)
-
- if has_ntf:
- cw.p('/* --------------- Common notification parsing --------------- */')
- print_ntf_parse_prototype(parsed, cw)
cw.nl()
else:
- cw.p('/* Policies */')
- for name, _ in parsed.attr_sets.items():
- struct = Struct(parsed, name)
- put_typol_fwd(cw, struct)
+ cw.p('/* Enums */')
+ put_op_name(parsed, cw)
+
+ for name, const in parsed.consts.items():
+ if isinstance(const, EnumSet):
+ put_enum_to_str(parsed, cw, const)
cw.nl()
- for name, _ in parsed.attr_sets.items():
+ cw.p('/* Policies */')
+ for name in parsed.pure_nested_structs:
+ struct = Struct(parsed, name)
+ put_typol(cw, struct)
+ for name in parsed.root_sets:
struct = Struct(parsed, name)
put_typol(cw, struct)
cw.p('/* Common nested types */')
- for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
- ri = RenderInfo(cw, parsed, args.mode, "", "", "", attr_set)
+ for attr_set, struct in parsed.pure_nested_structs.items():
+ ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
free_rsp_nested(ri, struct)
if struct.request:
@@ -2260,7 +2525,8 @@ def main():
cw.p(f"/* ============== {op.enum_name} ============== */")
if 'do' in op and 'event' not in op:
cw.p(f"/* {op.enum_name} - do */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
+ print_req_free(ri)
print_rsp_free(ri)
parse_rsp_msg(ri)
print_req(ri)
@@ -2268,34 +2534,31 @@ def main():
if 'dump' in op:
cw.p(f"/* {op.enum_name} - dump */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, "dump")
+ ri = RenderInfo(cw, parsed, args.mode, op, "dump")
if not ri.type_consistent:
parse_rsp_msg(ri, deref=True)
print_dump_type_free(ri)
print_dump(ri)
cw.nl()
- if 'notify' in op:
+ if op.has_ntf:
cw.p(f"/* {op.enum_name} - notify */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'notify')
- has_ntf = True
+ ri = RenderInfo(cw, parsed, args.mode, op, 'notify')
if not ri.type_consistent:
- raise Exception('Only notifications with consistent types supported')
+ raise Exception(f'Only notifications with consistent types supported ({op.name})')
print_ntf_type_free(ri)
+ for op_name, op in parsed.ntfs.items():
if 'event' in op:
cw.p(f"/* {op.enum_name} - event */")
- has_ntf = True
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
parse_rsp_msg(ri)
- ri = RenderInfo(cw, parsed, args.mode, op, op_name, "event")
+ ri = RenderInfo(cw, parsed, args.mode, op, "event")
print_ntf_type_free(ri)
-
- if has_ntf:
- cw.p('/* --------------- Common notification parsing --------------- */')
- print_ntf_type_parse(parsed, cw, args.mode)
+ cw.nl()
+ render_user_family(parsed, cw, False)
if args.header:
cw.p(f'#endif /* {hdr_prot} */')
diff --git a/tools/net/ynl/ynl-regen.sh b/tools/net/ynl/ynl-regen.sh
index 74f5de1c2399..8d4ca6a50582 100755
--- a/tools/net/ynl/ynl-regen.sh
+++ b/tools/net/ynl/ynl-regen.sh
@@ -14,11 +14,12 @@ done
KDIR=$(dirname $(dirname $(dirname $(dirname $(realpath $0)))))
-files=$(git grep --files-with-matches '^/\* YNL-GEN \(kernel\|uapi\)')
+files=$(git grep --files-with-matches '^/\* YNL-GEN \(kernel\|uapi\|user\)')
for f in $files; do
# params: 0 1 2 3
# $YAML YNL-GEN kernel $mode
params=( $(git grep -B1 -h '/\* YNL-GEN' $f | sed 's@/\*\(.*\)\*/@\1@') )
+ args=$(sed -n 's@/\* YNL-ARG \(.*\) \*/@\1@p' $f)
if [ $f -nt ${params[0]} -a -z "$force" ]; then
echo -e "\tSKIP $f"
@@ -26,5 +27,6 @@ for f in $files; do
fi
echo -e "\tGEN ${params[2]}\t$f"
- $TOOL --mode ${params[2]} --${params[3]} --spec $KDIR/${params[0]} -o $f
+ $TOOL --mode ${params[2]} --${params[3]} --spec $KDIR/${params[0]} \
+ $args -o $f
done
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 13c3a237b9c9..3bef212a24d7 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -318,7 +318,6 @@ struct ucred {
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
-#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
index ea68db08b8e7..aa9934020232 100644
--- a/tools/perf/trace/beauty/msg_flags.c
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -8,8 +8,8 @@
#ifndef MSG_WAITFORONE
#define MSG_WAITFORONE 0x10000
#endif
-#ifndef MSG_SENDPAGE_NOTLAST
-#define MSG_SENDPAGE_NOTLAST 0x20000
+#ifndef MSG_SPLICE_PAGES
+#define MSG_SPLICE_PAGES 0x8000000
#endif
#ifndef MSG_FASTOPEN
#define MSG_FASTOPEN 0x20000000
@@ -50,7 +50,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
P_MSG_FLAG(NOSIGNAL);
P_MSG_FLAG(MORE);
P_MSG_FLAG(WAITFORONE);
- P_MSG_FLAG(SENDPAGE_NOTLAST);
+ P_MSG_FLAG(SPLICE_PAGES);
P_MSG_FLAG(FASTOPEN);
P_MSG_FLAG(CMSG_CLOEXEC);
#undef P_MSG_FLAG
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 0a6837f97c32..08adc805878b 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,33 +1,6 @@
-bloom_filter_map # libbpf: prog 'check_bloom': failed to attach: ERROR: strerror_r(-524)=22
-bpf_cookie/lsm
-bpf_cookie/multi_kprobe_attach_api
-bpf_cookie/multi_kprobe_link_api
-bpf_cookie/trampoline
-bpf_loop/check_callback_fn_stop # link unexpected error: -524
-bpf_loop/check_invalid_flags
-bpf_loop/check_nested_calls
-bpf_loop/check_non_constant_callback
-bpf_loop/check_nr_loops
-bpf_loop/check_null_callback_ctx
-bpf_loop/check_stack
-bpf_mod_race # bpf_mod_kfunc_race__attach unexpected error: -524 (errno 524)
-bpf_tcp_ca/dctcp_fallback
-btf_dump/btf_dump: var_data # find type id unexpected find type id: actual -2 < expected 0
-cgroup_hierarchical_stats # attach unexpected error: -524 (errno 524)
-d_path/basic # setup attach failed: -524
-deny_namespace # attach unexpected error: -524 (errno 524)
-fentry_fexit # fentry_attach unexpected error: -1 (errno 524)
-fentry_test # fentry_attach unexpected error: -1 (errno 524)
-fexit_sleep # fexit_attach fexit attach failed: -1
-fexit_stress # fexit attach unexpected fexit attach: actual -524 < expected 0
-fexit_test # fexit_attach unexpected error: -1 (errno 524)
-get_func_args_test # get_func_args_test__attach unexpected error: -524 (errno 524) (trampoline)
-get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (errno 524) (trampoline)
-htab_update/reenter_update
-kfree_skb # attach fentry unexpected error: -524 (trampoline)
-kfunc_call/subprog # extern (var ksym) 'bpf_prog_active': not found in kernel BTF
-kfunc_call/subprog_lskel # skel unexpected error: -2
-kfunc_dynptr_param/dynptr_data_null # libbpf: prog 'dynptr_data_null': failed to attach: ERROR: strerror_r(-524)=22
+bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95
@@ -35,51 +8,5 @@ kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_mu
kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0
kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0
-kprobe_multi_test/skel_api # kprobe_multi__attach unexpected error: -524 (errno 524)
-ksyms_module/libbpf # 'bpf_testmod_ksym_percpu': not found in kernel BTF
-ksyms_module/lskel # test_ksyms_module_lskel__open_and_load unexpected error: -2
-libbpf_get_fd_by_id_opts # test_libbpf_get_fd_by_id_opts__attach unexpected error: -524 (errno 524)
-linked_list
-lookup_key # test_lookup_key__attach unexpected error: -524 (errno 524)
-lru_bug # lru_bug__attach unexpected error: -524 (errno 524)
-modify_return # modify_return__attach failed unexpected error: -524 (errno 524)
-module_attach # skel_attach skeleton attach failed: -524
-module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0
-mptcp/base # run_test mptcp unexpected error: -524 (errno 524)
-netcnt # packets unexpected packets: actual 10001 != expected 10000
-rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22
-recursion # skel_attach unexpected error: -524 (errno 524)
-ringbuf # skel_attach skeleton attachment failed: -1
-setget_sockopt # attach_cgroup unexpected error: -524
-sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (errno 524)
-skc_to_unix_sock # could not attach BPF object unexpected error: -524 (errno 524)
-socket_cookie # prog_attach unexpected error: -524
-stacktrace_build_id # compare_stack_ips stackmap vs. stack_amap err -1 errno 2
-task_local_storage/exit_creds # skel_attach unexpected error: -524 (errno 524)
-task_local_storage/recursion # skel_attach unexpected error: -524 (errno 524)
-test_bprm_opts # attach attach failed: -524
-test_ima # attach attach failed: -524
-test_local_storage # attach lsm attach failed: -524
-test_lsm # test_lsm_first_attach unexpected error: -524 (errno 524)
-test_overhead # attach_fentry unexpected error: -524
-timer # timer unexpected error: -524 (errno 524)
-timer_crash # timer_crash__attach unexpected error: -524 (errno 524)
-timer_mim # timer_mim unexpected error: -524 (errno 524)
-trace_printk # trace_printk__attach unexpected error: -1 (errno 524)
-trace_vprintk # trace_vprintk__attach unexpected error: -1 (errno 524)
-tracing_struct # tracing_struct__attach unexpected error: -524 (errno 524)
-trampoline_count # attach_prog unexpected error: -524
-unpriv_bpf_disabled # skel_attach unexpected error: -524 (errno 524)
-user_ringbuf/test_user_ringbuf_post_misaligned # misaligned_skel unexpected error: -524 (errno 524)
-user_ringbuf/test_user_ringbuf_post_producer_wrong_offset
-user_ringbuf/test_user_ringbuf_post_larger_than_ringbuf_sz
-user_ringbuf/test_user_ringbuf_basic # ringbuf_basic_skel unexpected error: -524 (errno 524)
-user_ringbuf/test_user_ringbuf_sample_full_ring_buffer
-user_ringbuf/test_user_ringbuf_post_alignment_autoadjust
-user_ringbuf/test_user_ringbuf_overfill
-user_ringbuf/test_user_ringbuf_discards_properly_ignored
-user_ringbuf/test_user_ringbuf_loop
-user_ringbuf/test_user_ringbuf_msg_protocol
-user_ringbuf/test_user_ringbuf_blocking_reserve
-verify_pkcs7_sig # test_verify_pkcs7_sig__attach unexpected error: -524 (errno 524)
-vmlinux # skel_attach skeleton attach failed: -524
+kprobe_multi_test/skel_api # libbpf: failed to load BPF skeleton 'kprobe_multi': -3
+module_attach # prog 'kprobe_multi': failed to auto-attach: -95
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index c7463f3ec3c0..5061d9e24c16 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -26,3 +26,4 @@ user_ringbuf # failed to find kernel BTF type ID of
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
xdp_metadata # JIT does not support calling kernel function (kfunc)
+test_task_under_cgroup # JIT does not support calling kernel function (kfunc)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 28d2c77262be..538df8fb8c42 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -88,8 +88,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
xdp_features
-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read $(OUTPUT)/sign-file
-TEST_GEN_FILES += liburandom_read.so
+TEST_GEN_FILES += liburandom_read.so urandom_read sign-file
# Emit succinct information message describing current building step
# $1 - generic step name (e.g., CC, LINK, etc);
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index d9c080ac1796..41fe5a82b88b 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -17,7 +17,7 @@ struct env env = {
.duration_sec = 5,
.affinity = false,
.quiet = false,
- .consumer_cnt = 1,
+ .consumer_cnt = 0,
.producer_cnt = 1,
};
@@ -441,12 +441,14 @@ static void setup_timer()
static void set_thread_affinity(pthread_t thread, int cpu)
{
cpu_set_t cpuset;
+ int err;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
- if (pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset)) {
+ err = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
+ if (err) {
fprintf(stderr, "setting affinity to CPU #%d failed: %d\n",
- cpu, errno);
+ cpu, -err);
exit(1);
}
}
@@ -467,7 +469,7 @@ static int next_cpu(struct cpu_set *cpu_set)
exit(1);
}
- return cpu_set->next_cpu++;
+ return cpu_set->next_cpu++ % env.nr_cpus;
}
static struct bench_state {
@@ -605,7 +607,7 @@ static void setup_benchmark(void)
bench->consumer_thread, (void *)(long)i);
if (err) {
fprintf(stderr, "failed to create consumer thread #%d: %d\n",
- i, -errno);
+ i, -err);
exit(1);
}
if (env.affinity)
@@ -624,7 +626,7 @@ static void setup_benchmark(void)
bench->producer_thread, (void *)(long)i);
if (err) {
fprintf(stderr, "failed to create producer thread #%d: %d\n",
- i, -errno);
+ i, -err);
exit(1);
}
if (env.affinity)
@@ -657,6 +659,7 @@ static void collect_measurements(long delta_ns) {
int main(int argc, char **argv)
{
+ env.nr_cpus = get_nprocs();
parse_cmdline_args_init(argc, argv);
if (env.list) {
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index 402729c6a3ac..7ff32be3d730 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -27,6 +27,7 @@ struct env {
bool quiet;
int consumer_cnt;
int producer_cnt;
+ int nr_cpus;
struct cpu_set prod_cpus;
struct cpu_set cons_cpus;
};
diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
index 7c8ccc108313..e289dd1a14ee 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
@@ -107,9 +107,9 @@ const struct argp bench_bloom_map_argp = {
static void validate(void)
{
- if (env.consumer_cnt != 1) {
+ if (env.consumer_cnt != 0) {
fprintf(stderr,
- "The bloom filter benchmarks do not support multi-consumer use\n");
+ "The bloom filter benchmarks do not support consumer\n");
exit(1);
}
}
@@ -421,18 +421,12 @@ static void measure(struct bench_res *res)
last_false_hits = total_false_hits;
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
const struct bench bench_bloom_lookup = {
.name = "bloom-lookup",
.argp = &bench_bloom_map_argp,
.validate = validate,
.setup = bloom_lookup_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -444,7 +438,6 @@ const struct bench bench_bloom_update = {
.validate = validate,
.setup = bloom_update_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -456,7 +449,6 @@ const struct bench bench_bloom_false_positive = {
.validate = validate,
.setup = false_positive_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = false_hits_report_progress,
.report_final = false_hits_report_final,
@@ -468,7 +460,6 @@ const struct bench bench_hashmap_without_bloom = {
.validate = validate,
.setup = hashmap_no_bloom_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -480,7 +471,6 @@ const struct bench bench_hashmap_with_bloom = {
.validate = validate,
.setup = hashmap_with_bloom_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
index 75abe8137b6c..ee1dc12c5e5e 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
@@ -14,8 +14,8 @@ static struct ctx {
static void validate(void)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
@@ -30,11 +30,6 @@ static void *producer(void *input)
return NULL;
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
static void measure(struct bench_res *res)
{
}
@@ -88,7 +83,6 @@ const struct bench bench_bpf_hashmap_full_update = {
.validate = validate,
.setup = setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = NULL,
.report_final = hashmap_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
index 8dbb02f75cff..279ff1b8b5b2 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
@@ -113,8 +113,8 @@ const struct argp bench_hashmap_lookup_argp = {
static void validate(void)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
@@ -134,11 +134,6 @@ static void *producer(void *input)
return NULL;
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
static void measure(struct bench_res *res)
{
}
@@ -276,7 +271,6 @@ const struct bench bench_bpf_hashmap_lookup = {
.validate = validate,
.setup = setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = NULL,
.report_final = hashmap_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
index d8a0394e10b1..a705cfb2bccc 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
@@ -47,8 +47,8 @@ const struct argp bench_bpf_loop_argp = {
static void validate(void)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
@@ -62,11 +62,6 @@ static void *producer(void *input)
return NULL;
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
static void measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
@@ -99,7 +94,6 @@ const struct bench bench_bpf_loop = {
.validate = validate,
.setup = setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = ops_report_progress,
.report_final = ops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_count.c b/tools/testing/selftests/bpf/benchs/bench_count.c
index 078972ce208e..ba89ed3936b7 100644
--- a/tools/testing/selftests/bpf/benchs/bench_count.c
+++ b/tools/testing/selftests/bpf/benchs/bench_count.c
@@ -18,11 +18,6 @@ static void *count_global_producer(void *input)
return NULL;
}
-static void *count_global_consumer(void *input)
-{
- return NULL;
-}
-
static void count_global_measure(struct bench_res *res)
{
struct count_global_ctx *ctx = &count_global_ctx;
@@ -40,7 +35,7 @@ static void count_local_setup(void)
{
struct count_local_ctx *ctx = &count_local_ctx;
- ctx->hits = calloc(env.consumer_cnt, sizeof(*ctx->hits));
+ ctx->hits = calloc(env.producer_cnt, sizeof(*ctx->hits));
if (!ctx->hits)
exit(1);
}
@@ -56,11 +51,6 @@ static void *count_local_producer(void *input)
return NULL;
}
-static void *count_local_consumer(void *input)
-{
- return NULL;
-}
-
static void count_local_measure(struct bench_res *res)
{
struct count_local_ctx *ctx = &count_local_ctx;
@@ -74,7 +64,6 @@ static void count_local_measure(struct bench_res *res)
const struct bench bench_count_global = {
.name = "count-global",
.producer_thread = count_global_producer,
- .consumer_thread = count_global_consumer,
.measure = count_global_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -84,7 +73,6 @@ const struct bench bench_count_local = {
.name = "count-local",
.setup = count_local_setup,
.producer_thread = count_local_producer,
- .consumer_thread = count_local_consumer,
.measure = count_local_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage.c b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
index d4b2817306d4..452499428ceb 100644
--- a/tools/testing/selftests/bpf/benchs/bench_local_storage.c
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
@@ -74,8 +74,8 @@ static void validate(void)
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
exit(1);
}
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
@@ -230,11 +230,6 @@ static inline void trigger_bpf_program(void)
syscall(__NR_getpgid);
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
static void *producer(void *input)
{
while (true)
@@ -259,7 +254,6 @@ const struct bench bench_local_storage_cache_seq_get = {
.validate = validate,
.setup = local_storage_cache_get_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = local_storage_report_progress,
.report_final = local_storage_report_final,
@@ -271,7 +265,6 @@ const struct bench bench_local_storage_cache_interleaved_get = {
.validate = validate,
.setup = local_storage_cache_get_interleaved_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = local_storage_report_progress,
.report_final = local_storage_report_final,
@@ -283,7 +276,6 @@ const struct bench bench_local_storage_cache_hashmap_control = {
.validate = validate,
.setup = hashmap_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = local_storage_report_progress,
.report_final = local_storage_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
index cff703f90e95..b36de42ee4d9 100644
--- a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
@@ -71,7 +71,7 @@ const struct argp bench_local_storage_create_argp = {
static void validate(void)
{
- if (env.consumer_cnt > 1) {
+ if (env.consumer_cnt != 0) {
fprintf(stderr,
"local-storage-create benchmark does not need consumer\n");
exit(1);
@@ -143,11 +143,6 @@ static void measure(struct bench_res *res)
res->drops = atomic_swap(&skel->bss->kmalloc_cnts, 0);
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
static void *sk_producer(void *input)
{
struct thread *t = &threads[(long)(input)];
@@ -257,7 +252,6 @@ const struct bench bench_local_storage_create = {
.validate = validate,
.setup = setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = report_progress,
.report_final = report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
index d5eb5587f2aa..edf0b00418c1 100644
--- a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
@@ -72,8 +72,8 @@ static void validate(void)
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
exit(1);
}
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
@@ -197,11 +197,6 @@ static void measure(struct bench_res *res)
ctx.prev_kthread_stime = ticks;
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
static void *producer(void *input)
{
while (true)
@@ -262,7 +257,6 @@ const struct bench bench_local_storage_tasks_trace = {
.validate = validate,
.setup = local_storage_tasks_trace_setup,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = report_progress,
.report_final = report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
index 3c203b6d6a6e..bf66893c7a33 100644
--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
+++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
@@ -17,8 +17,8 @@ static void validate(void)
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
exit(1);
}
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
@@ -106,17 +106,11 @@ static void setup_fexit(void)
attach_bpf(ctx.skel->progs.prog5);
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
const struct bench bench_rename_base = {
.name = "rename-base",
.validate = validate,
.setup = setup_base,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -127,7 +121,6 @@ const struct bench bench_rename_kprobe = {
.validate = validate,
.setup = setup_kprobe,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -138,7 +131,6 @@ const struct bench bench_rename_kretprobe = {
.validate = validate,
.setup = setup_kretprobe,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -149,7 +141,6 @@ const struct bench bench_rename_rawtp = {
.validate = validate,
.setup = setup_rawtp,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -160,7 +151,6 @@ const struct bench bench_rename_fentry = {
.validate = validate,
.setup = setup_fentry,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -171,7 +161,6 @@ const struct bench bench_rename_fexit = {
.validate = validate,
.setup = setup_fexit,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
index fc91fdac4faa..3ca14ad36607 100644
--- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
+++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
@@ -96,7 +96,7 @@ static inline void bufs_trigger_batch(void)
static void bufs_validate(void)
{
if (env.consumer_cnt != 1) {
- fprintf(stderr, "rb-libbpf benchmark doesn't support multi-consumer!\n");
+ fprintf(stderr, "rb-libbpf benchmark needs one consumer!\n");
exit(1);
}
diff --git a/tools/testing/selftests/bpf/benchs/bench_strncmp.c b/tools/testing/selftests/bpf/benchs/bench_strncmp.c
index d3fad2ba6916..a5e1428fd7a0 100644
--- a/tools/testing/selftests/bpf/benchs/bench_strncmp.c
+++ b/tools/testing/selftests/bpf/benchs/bench_strncmp.c
@@ -50,8 +50,8 @@ const struct argp bench_strncmp_argp = {
static void strncmp_validate(void)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "strncmp benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "strncmp benchmark doesn't support consumer!\n");
exit(1);
}
}
@@ -128,11 +128,6 @@ static void *strncmp_producer(void *ctx)
return NULL;
}
-static void *strncmp_consumer(void *ctx)
-{
- return NULL;
-}
-
static void strncmp_measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
@@ -144,7 +139,6 @@ const struct bench bench_strncmp_no_helper = {
.validate = strncmp_validate,
.setup = strncmp_no_helper_setup,
.producer_thread = strncmp_producer,
- .consumer_thread = strncmp_consumer,
.measure = strncmp_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -156,7 +150,6 @@ const struct bench bench_strncmp_helper = {
.validate = strncmp_validate,
.setup = strncmp_helper_setup,
.producer_thread = strncmp_producer,
- .consumer_thread = strncmp_consumer,
.measure = strncmp_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index 0c481de2833d..dbd362771d6a 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -13,8 +13,8 @@ static struct counter base_hits;
static void trigger_validate(void)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
@@ -103,11 +103,6 @@ static void trigger_fmodret_setup(void)
attach_bpf(ctx.skel->progs.bench_trigger_fmodret);
}
-static void *trigger_consumer(void *input)
-{
- return NULL;
-}
-
/* make sure call is not inlined and not avoided by compiler, so __weak and
* inline asm volatile in the body of the function
*
@@ -205,7 +200,6 @@ const struct bench bench_trig_base = {
.name = "trig-base",
.validate = trigger_validate,
.producer_thread = trigger_base_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_base_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -216,7 +210,6 @@ const struct bench bench_trig_tp = {
.validate = trigger_validate,
.setup = trigger_tp_setup,
.producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -227,7 +220,6 @@ const struct bench bench_trig_rawtp = {
.validate = trigger_validate,
.setup = trigger_rawtp_setup,
.producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -238,7 +230,6 @@ const struct bench bench_trig_kprobe = {
.validate = trigger_validate,
.setup = trigger_kprobe_setup,
.producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -249,7 +240,6 @@ const struct bench bench_trig_fentry = {
.validate = trigger_validate,
.setup = trigger_fentry_setup,
.producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -260,7 +250,6 @@ const struct bench bench_trig_fentry_sleep = {
.validate = trigger_validate,
.setup = trigger_fentry_sleep_setup,
.producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -271,7 +260,6 @@ const struct bench bench_trig_fmodret = {
.validate = trigger_validate,
.setup = trigger_fmodret_setup,
.producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -281,7 +269,6 @@ const struct bench bench_trig_uprobe_base = {
.name = "trig-uprobe-base",
.setup = NULL, /* no uprobe/uretprobe is attached */
.producer_thread = uprobe_base_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_base_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -291,7 +278,6 @@ const struct bench bench_trig_uprobe_with_nop = {
.name = "trig-uprobe-with-nop",
.setup = uprobe_setup_with_nop,
.producer_thread = uprobe_producer_with_nop,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -301,7 +287,6 @@ const struct bench bench_trig_uretprobe_with_nop = {
.name = "trig-uretprobe-with-nop",
.setup = uretprobe_setup_with_nop,
.producer_thread = uprobe_producer_with_nop,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -311,7 +296,6 @@ const struct bench bench_trig_uprobe_without_nop = {
.name = "trig-uprobe-without-nop",
.setup = uprobe_setup_without_nop,
.producer_thread = uprobe_producer_without_nop,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -321,7 +305,6 @@ const struct bench bench_trig_uretprobe_without_nop = {
.name = "trig-uretprobe-without-nop",
.setup = uretprobe_setup_without_nop,
.producer_thread = uprobe_producer_without_nop,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
index ada028aa9007..91e3567962ff 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
@@ -4,46 +4,48 @@ source ./benchs/run_common.sh
set -eufo pipefail
+RUN_RB_BENCH="$RUN_BENCH -c1"
+
header "Single-producer, parallel producer"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH $b)"
+ summarize $b "$($RUN_RB_BENCH $b)"
done
header "Single-producer, parallel producer, sampled notification"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH --rb-sampled $b)"
+ summarize $b "$($RUN_RB_BENCH --rb-sampled $b)"
done
header "Single-producer, back-to-back mode"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH --rb-b2b $b)"
- summarize $b-sampled "$($RUN_BENCH --rb-sampled --rb-b2b $b)"
+ summarize $b "$($RUN_RB_BENCH --rb-b2b $b)"
+ summarize $b-sampled "$($RUN_RB_BENCH --rb-sampled --rb-b2b $b)"
done
header "Ringbuf back-to-back, effect of sample rate"
for b in 1 5 10 25 50 100 250 500 1000 2000 3000; do
- summarize "rb-sampled-$b" "$($RUN_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b rb-custom)"
+ summarize "rb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b rb-custom)"
done
header "Perfbuf back-to-back, effect of sample rate"
for b in 1 5 10 25 50 100 250 500 1000 2000 3000; do
- summarize "pb-sampled-$b" "$($RUN_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b pb-custom)"
+ summarize "pb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b pb-custom)"
done
header "Ringbuf back-to-back, reserve+commit vs output"
-summarize "reserve" "$($RUN_BENCH --rb-b2b rb-custom)"
-summarize "output" "$($RUN_BENCH --rb-b2b --rb-use-output rb-custom)"
+summarize "reserve" "$($RUN_RB_BENCH --rb-b2b rb-custom)"
+summarize "output" "$($RUN_RB_BENCH --rb-b2b --rb-use-output rb-custom)"
header "Ringbuf sampled, reserve+commit vs output"
-summarize "reserve-sampled" "$($RUN_BENCH --rb-sampled rb-custom)"
-summarize "output-sampled" "$($RUN_BENCH --rb-sampled --rb-use-output rb-custom)"
+summarize "reserve-sampled" "$($RUN_RB_BENCH --rb-sampled rb-custom)"
+summarize "output-sampled" "$($RUN_RB_BENCH --rb-sampled --rb-use-output rb-custom)"
header "Single-producer, consumer/producer competing on the same CPU, low batch count"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH --rb-batch-cnt 1 --rb-sample-rate 1 --prod-affinity 0 --cons-affinity 0 $b)"
+ summarize $b "$($RUN_RB_BENCH --rb-batch-cnt 1 --rb-sample-rate 1 --prod-affinity 0 --cons-affinity 0 $b)"
done
header "Ringbuf, multi-producer contention"
for b in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do
- summarize "rb-libbpf nr_prod $b" "$($RUN_BENCH -p$b --rb-batch-cnt 50 rb-libbpf)"
+ summarize "rb-libbpf nr_prod $b" "$($RUN_RB_BENCH -p$b --rb-batch-cnt 50 rb-libbpf)"
done
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index 8c993ec8ceea..642dda0e758a 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -35,4 +35,10 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset,
void *buffer, __u32 buffer__szk) __ksym;
+extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym;
+extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym;
+extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym;
+extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym;
+extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym;
+
#endif
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 52785ba671e6..aaf6ef1201c7 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -9,6 +9,7 @@
#include <linux/sysfs.h>
#include <linux/tracepoint.h>
#include "bpf_testmod.h"
+#include "bpf_testmod_kfunc.h"
#define CREATE_TRACE_POINTS
#include "bpf_testmod-events.h"
@@ -190,8 +191,6 @@ noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
return a + b + c;
}
-__diag_pop();
-
int bpf_testmod_fentry_ok;
noinline ssize_t
@@ -272,6 +271,14 @@ bpf_testmod_test_write(struct file *file, struct kobject *kobj,
EXPORT_SYMBOL(bpf_testmod_test_write);
ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
+noinline int bpf_fentry_shadow_test(int a)
+{
+ return a + 2;
+}
+EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
+
+__diag_pop();
+
static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.attr = { .name = "bpf_testmod", .mode = 0666, },
.read = bpf_testmod_test_read,
@@ -289,8 +296,171 @@ static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.set = &bpf_testmod_common_kfunc_ids,
};
+__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
+{
+ return a + b + c + d;
+}
+
+__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
+{
+ return a + b;
+}
+
+__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
+{
+ return sk;
+}
+
+__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
+{
+ /* Provoke the compiler to assume that the caller has sign-extended a,
+ * b and c on platforms where this is required (e.g. s390x).
+ */
+ return (long)a + (long)b + (long)c + d;
+}
+
+static struct prog_test_ref_kfunc prog_test_struct = {
+ .a = 42,
+ .b = 108,
+ .next = &prog_test_struct,
+ .cnt = REFCOUNT_INIT(1),
+};
+
+__bpf_kfunc struct prog_test_ref_kfunc *
+bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
+{
+ refcount_inc(&prog_test_struct.cnt);
+ return &prog_test_struct;
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
+{
+ WARN_ON_ONCE(1);
+}
+
+__bpf_kfunc struct prog_test_member *
+bpf_kfunc_call_memb_acquire(void)
+{
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
+{
+ if (size > 2 * sizeof(int))
+ return NULL;
+
+ return (int *)p;
+}
+
+__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
+ const int rdwr_buf_size)
+{
+ return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
+}
+
+__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
+ const int rdonly_buf_size)
+{
+ return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
+}
+
+/* the next 2 ones can't be really used for testing expect to ensure
+ * that the verifier rejects the call.
+ * Acquire functions must return struct pointers, so these ones are
+ * failing.
+ */
+__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
+ const int rdonly_buf_size)
+{
+ return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
+}
+
+__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
+{
+ /* p != NULL, but p->cnt could be 0 */
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_destructive(void)
+{
+}
+
+__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
+{
+ return arg;
+}
+
BTF_SET8_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
BTF_SET8_END(bpf_testmod_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
@@ -298,12 +468,6 @@ static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
.set = &bpf_testmod_check_kfunc_ids,
};
-noinline int bpf_fentry_shadow_test(int a)
-{
- return a + 2;
-}
-EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
-
extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
@@ -312,6 +476,8 @@ static int bpf_testmod_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
new file mode 100644
index 000000000000..f5c5b1375c24
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BPF_TESTMOD_KFUNC_H
+#define _BPF_TESTMOD_KFUNC_H
+
+#ifndef __KERNEL__
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#else
+#define __ksym
+struct prog_test_member1 {
+ int a;
+};
+
+struct prog_test_member {
+ struct prog_test_member1 m;
+ int c;
+};
+
+struct prog_test_ref_kfunc {
+ int a;
+ int b;
+ struct prog_test_member memb;
+ struct prog_test_ref_kfunc *next;
+ refcount_t cnt;
+};
+#endif
+
+struct prog_test_pass1 {
+ int x0;
+ struct {
+ int x1;
+ struct {
+ int x2;
+ struct {
+ int x3;
+ };
+ };
+ };
+};
+
+struct prog_test_pass2 {
+ int len;
+ short arr1[4];
+ struct {
+ char arr2[4];
+ unsigned long arr3[8];
+ } x;
+};
+
+struct prog_test_fail1 {
+ void *p;
+ int x;
+};
+
+struct prog_test_fail2 {
+ int x8;
+ struct prog_test_pass1 x;
+};
+
+struct prog_test_fail3 {
+ int len;
+ char arr1[2];
+ char arr2[];
+};
+
+struct prog_test_ref_kfunc *
+bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym;
+void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym;
+
+void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
+int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
+int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
+int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
+void bpf_kfunc_call_int_mem_release(int *p) __ksym;
+
+/* The bpf_kfunc_call_test_static_unused_arg is defined as static,
+ * but bpf program compilation needs to see it as global symbol.
+ */
+#ifndef __KERNEL__
+u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym;
+#endif
+
+void bpf_testmod_test_mod_kfunc(int i) __ksym;
+
+__u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
+ __u32 c, __u64 d) __ksym;
+int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
+struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym;
+long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym;
+
+void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym;
+void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym;
+void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym;
+void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
+
+void bpf_kfunc_call_test_destructive(void) __ksym;
+
+void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p);
+struct prog_test_member *bpf_kfunc_call_memb_acquire(void);
+void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p);
+void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p);
+void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p);
+void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p);
+void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len);
+#endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 63cd4ab70171..3b350bc31343 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -13,6 +13,9 @@ CONFIG_CGROUP_BPF=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DYNAMIC_FTRACE=y
CONFIG_FPROBE=y
CONFIG_FTRACE_SYSCALLS=y
@@ -60,6 +63,7 @@ CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_SCHED=y
CONFIG_NETDEVSIM=y
CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
CONFIG_NETFILTER_SYNPROXY=y
CONFIG_NETFILTER_XT_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 596caa176582..a105c0cd008a 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -427,3 +427,26 @@ void close_netns(struct nstoken *token)
close(token->orig_netns_fd);
free(token);
}
+
+int get_socket_local_port(int sock_fd)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen = sizeof(addr);
+ int err;
+
+ err = getsockname(sock_fd, (struct sockaddr *)&addr, &addrlen);
+ if (err < 0)
+ return err;
+
+ if (addr.ss_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)&addr;
+
+ return sin->sin_port;
+ } else if (addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&addr;
+
+ return sin->sin6_port;
+ }
+
+ return -1;
+}
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index f882c691b790..694185644da6 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -56,6 +56,7 @@ int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len);
char *ping_command(int family);
+int get_socket_local_port(int sock_fd);
struct nstoken;
/**
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
index b17bfa0e0aac..bb143de68875 100644
--- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
+++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
@@ -96,12 +96,80 @@ static void test_parse_test_list(void)
goto error;
ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count"))
+ goto error;
+ ASSERT_OK(strcmp("t", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name");
+ ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name");
error:
free_test_filter_set(&set);
}
+static void test_parse_test_list_file(void)
+{
+ struct test_filter_set set;
+ char tmpfile[80];
+ FILE *fp;
+ int fd;
+
+ snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX");
+ fd = mkstemp(tmpfile);
+ if (!ASSERT_GE(fd, 0, "create tmp"))
+ return;
+
+ fp = fdopen(fd, "w");
+ if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) {
+ close(fd);
+ goto out_remove;
+ }
+
+ fprintf(fp, "# comment\n");
+ fprintf(fp, " test_with_spaces \n");
+ fprintf(fp, "testA/subtest # comment\n");
+ fprintf(fp, "testB#comment with no space\n");
+ fprintf(fp, "testB # duplicate\n");
+ fprintf(fp, "testA/subtest # subtest duplicate\n");
+ fprintf(fp, "testA/subtest2\n");
+ fprintf(fp, "testC_no_eof_newline");
+ fflush(fp);
+
+ if (!ASSERT_OK(ferror(fp), "prepare tmp"))
+ goto out_fclose;
+
+ init_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file");
+
+ ASSERT_EQ(set.cnt, 4, "test count");
+ ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
+ ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
+ ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count");
+ ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0");
+ ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1");
+ ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
+ ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
+
+ free_test_filter_set(&set);
+
+out_fclose:
+ fclose(fp);
+out_remove:
+ remove(tmpfile);
+}
+
void test_arg_parsing(void)
{
if (test__start_subtest("test_parse_test_list"))
test_parse_test_list();
+ if (test__start_subtest("test_parse_test_list_file"))
+ test_parse_test_list_file();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
index a4d0cc9d3367..fe2c502e5089 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
@@ -11,6 +11,7 @@
#include "ksym_race.skel.h"
#include "bpf_mod_race.skel.h"
#include "kfunc_call_race.skel.h"
+#include "testing_helpers.h"
/* This test crafts a race between btf_try_get_module and do_init_module, and
* checks whether btf_try_get_module handles the invocation for a well-formed
@@ -44,35 +45,10 @@ enum bpf_test_state {
static _Atomic enum bpf_test_state state = _TS_INVALID;
-static int sys_finit_module(int fd, const char *param_values, int flags)
-{
- return syscall(__NR_finit_module, fd, param_values, flags);
-}
-
-static int sys_delete_module(const char *name, unsigned int flags)
-{
- return syscall(__NR_delete_module, name, flags);
-}
-
-static int load_module(const char *mod)
-{
- int ret, fd;
-
- fd = open("bpf_testmod.ko", O_RDONLY);
- if (fd < 0)
- return fd;
-
- ret = sys_finit_module(fd, "", 0);
- close(fd);
- if (ret < 0)
- return ret;
- return 0;
-}
-
static void *load_module_thread(void *p)
{
- if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail"))
+ if (!ASSERT_NEQ(load_bpf_testmod(false), 0, "load_module_thread must fail"))
atomic_store(&state, TS_MODULE_LOAD);
else
atomic_store(&state, TS_MODULE_LOAD_FAIL);
@@ -124,7 +100,7 @@ static void test_bpf_mod_race_config(const struct test_config *config)
if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
return;
- if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod"))
+ if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod"))
goto end_mmap;
skel = bpf_mod_race__open();
@@ -202,8 +178,8 @@ end_destroy:
bpf_mod_race__destroy(skel);
ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
end_module:
- sys_delete_module("bpf_testmod", 0);
- ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod");
+ unload_bpf_testmod(false);
+ ASSERT_OK(load_bpf_testmod(false), "restore bpf_testmod");
end_mmap:
munmap(fault_addr, 4096);
atomic_store(&state, _TS_INVALID);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
new file mode 100644
index 000000000000..31f1e815f671
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <linux/unistd.h>
+#include <linux/mount.h>
+#include <sys/syscall.h>
+
+static inline int sys_fsopen(const char *fsname, unsigned flags)
+{
+ return syscall(__NR_fsopen, fsname, flags);
+}
+
+static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux)
+{
+ return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux);
+}
+
+static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags)
+{
+ return syscall(__NR_fsmount, fs_fd, flags, ms_flags);
+}
+
+__attribute__((unused))
+static inline int sys_move_mount(int from_dfd, const char *from_path,
+ int to_dfd, const char *to_path,
+ unsigned int ms_flags)
+{
+ return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, ms_flags);
+}
+
+static void bpf_obj_pinning_detached(void)
+{
+ LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts);
+ LIBBPF_OPTS(bpf_obj_get_opts, get_opts);
+ int fs_fd = -1, mnt_fd = -1;
+ int map_fd = -1, map_fd2 = -1;
+ int zero = 0, src_value, dst_value, err;
+ const char *map_name = "fsmount_map";
+
+ /* A bunch of below UAPI calls are constructed based on reading:
+ * https://brauner.io/2023/02/28/mounting-into-mount-namespaces.html
+ */
+
+ /* create VFS context */
+ fs_fd = sys_fsopen("bpf", 0);
+ if (!ASSERT_GE(fs_fd, 0, "fs_fd"))
+ goto cleanup;
+
+ /* instantiate FS object */
+ err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
+ if (!ASSERT_OK(err, "fs_create"))
+ goto cleanup;
+
+ /* create O_PATH fd for detached mount */
+ mnt_fd = sys_fsmount(fs_fd, 0, 0);
+ if (!ASSERT_GE(mnt_fd, 0, "mnt_fd"))
+ goto cleanup;
+
+ /* If we wanted to expose detached mount in the file system, we'd do
+ * something like below. But the whole point is that we actually don't
+ * even have to expose BPF FS in the file system to be able to work
+ * (pin/get objects) with it.
+ *
+ * err = sys_move_mount(mnt_fd, "", -EBADF, mnt_path, MOVE_MOUNT_F_EMPTY_PATH);
+ * if (!ASSERT_OK(err, "move_mount"))
+ * goto cleanup;
+ */
+
+ /* create BPF map to pin */
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ goto cleanup;
+
+ /* pin BPF map into detached BPF FS through mnt_fd */
+ pin_opts.file_flags = BPF_F_PATH_FD;
+ pin_opts.path_fd = mnt_fd;
+ err = bpf_obj_pin_opts(map_fd, map_name, &pin_opts);
+ if (!ASSERT_OK(err, "map_pin"))
+ goto cleanup;
+
+ /* get BPF map from detached BPF FS through mnt_fd */
+ get_opts.file_flags = BPF_F_PATH_FD;
+ get_opts.path_fd = mnt_fd;
+ map_fd2 = bpf_obj_get_opts(map_name, &get_opts);
+ if (!ASSERT_GE(map_fd2, 0, "map_get"))
+ goto cleanup;
+
+ /* update map through one FD */
+ src_value = 0xcafebeef;
+ err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
+ ASSERT_OK(err, "map_update");
+
+ /* check values written/read through different FDs do match */
+ dst_value = 0;
+ err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
+ ASSERT_OK(err, "map_lookup");
+ ASSERT_EQ(dst_value, src_value, "map_value_eq1");
+ ASSERT_EQ(dst_value, 0xcafebeef, "map_value_eq2");
+
+cleanup:
+ if (map_fd >= 0)
+ ASSERT_OK(close(map_fd), "close_map_fd");
+ if (map_fd2 >= 0)
+ ASSERT_OK(close(map_fd2), "close_map_fd2");
+ if (fs_fd >= 0)
+ ASSERT_OK(close(fs_fd), "close_fs_fd");
+ if (mnt_fd >= 0)
+ ASSERT_OK(close(mnt_fd), "close_mnt_fd");
+}
+
+enum path_kind
+{
+ PATH_STR_ABS,
+ PATH_STR_REL,
+ PATH_FD_REL,
+};
+
+static void validate_pin(int map_fd, const char *map_name, int src_value,
+ enum path_kind path_kind)
+{
+ LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts);
+ char abs_path[PATH_MAX], old_cwd[PATH_MAX];
+ const char *pin_path = NULL;
+ int zero = 0, dst_value, map_fd2, err;
+
+ snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name);
+ old_cwd[0] = '\0';
+
+ switch (path_kind) {
+ case PATH_STR_ABS:
+ /* absolute path */
+ pin_path = abs_path;
+ break;
+ case PATH_STR_REL:
+ /* cwd + relative path */
+ ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd");
+ ASSERT_OK(chdir("/sys/fs/bpf"), "chdir");
+ pin_path = map_name;
+ break;
+ case PATH_FD_REL:
+ /* dir fd + relative path */
+ pin_opts.file_flags = BPF_F_PATH_FD;
+ pin_opts.path_fd = open("/sys/fs/bpf", O_PATH);
+ ASSERT_GE(pin_opts.path_fd, 0, "path_fd");
+ pin_path = map_name;
+ break;
+ }
+
+ /* pin BPF map using specified path definition */
+ err = bpf_obj_pin_opts(map_fd, pin_path, &pin_opts);
+ ASSERT_OK(err, "obj_pin");
+
+ /* cleanup */
+ if (pin_opts.path_fd >= 0)
+ close(pin_opts.path_fd);
+ if (old_cwd[0])
+ ASSERT_OK(chdir(old_cwd), "restore_cwd");
+
+ map_fd2 = bpf_obj_get(abs_path);
+ if (!ASSERT_GE(map_fd2, 0, "map_get"))
+ goto cleanup;
+
+ /* update map through one FD */
+ err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
+ ASSERT_OK(err, "map_update");
+
+ /* check values written/read through different FDs do match */
+ dst_value = 0;
+ err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
+ ASSERT_OK(err, "map_lookup");
+ ASSERT_EQ(dst_value, src_value, "map_value_eq");
+cleanup:
+ if (map_fd2 >= 0)
+ ASSERT_OK(close(map_fd2), "close_map_fd2");
+ unlink(abs_path);
+}
+
+static void validate_get(int map_fd, const char *map_name, int src_value,
+ enum path_kind path_kind)
+{
+ LIBBPF_OPTS(bpf_obj_get_opts, get_opts);
+ char abs_path[PATH_MAX], old_cwd[PATH_MAX];
+ const char *pin_path = NULL;
+ int zero = 0, dst_value, map_fd2, err;
+
+ snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name);
+ /* pin BPF map using specified path definition */
+ err = bpf_obj_pin(map_fd, abs_path);
+ if (!ASSERT_OK(err, "pin_map"))
+ return;
+
+ old_cwd[0] = '\0';
+
+ switch (path_kind) {
+ case PATH_STR_ABS:
+ /* absolute path */
+ pin_path = abs_path;
+ break;
+ case PATH_STR_REL:
+ /* cwd + relative path */
+ ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd");
+ ASSERT_OK(chdir("/sys/fs/bpf"), "chdir");
+ pin_path = map_name;
+ break;
+ case PATH_FD_REL:
+ /* dir fd + relative path */
+ get_opts.file_flags = BPF_F_PATH_FD;
+ get_opts.path_fd = open("/sys/fs/bpf", O_PATH);
+ ASSERT_GE(get_opts.path_fd, 0, "path_fd");
+ pin_path = map_name;
+ break;
+ }
+
+ map_fd2 = bpf_obj_get_opts(pin_path, &get_opts);
+ if (!ASSERT_GE(map_fd2, 0, "map_get"))
+ goto cleanup;
+
+ /* cleanup */
+ if (get_opts.path_fd >= 0)
+ close(get_opts.path_fd);
+ if (old_cwd[0])
+ ASSERT_OK(chdir(old_cwd), "restore_cwd");
+
+ /* update map through one FD */
+ err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
+ ASSERT_OK(err, "map_update");
+
+ /* check values written/read through different FDs do match */
+ dst_value = 0;
+ err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
+ ASSERT_OK(err, "map_lookup");
+ ASSERT_EQ(dst_value, src_value, "map_value_eq");
+cleanup:
+ if (map_fd2 >= 0)
+ ASSERT_OK(close(map_fd2), "close_map_fd2");
+ unlink(abs_path);
+}
+
+static void bpf_obj_pinning_mounted(enum path_kind path_kind)
+{
+ const char *map_name = "mounted_map";
+ int map_fd;
+
+ /* create BPF map to pin */
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ return;
+
+ validate_pin(map_fd, map_name, 100 + (int)path_kind, path_kind);
+ validate_get(map_fd, map_name, 200 + (int)path_kind, path_kind);
+ ASSERT_OK(close(map_fd), "close_map_fd");
+}
+
+void test_bpf_obj_pinning()
+{
+ if (test__start_subtest("detached"))
+ bpf_obj_pinning_detached();
+ if (test__start_subtest("mounted-str-abs"))
+ bpf_obj_pinning_mounted(PATH_STR_ABS);
+ if (test__start_subtest("mounted-str-rel"))
+ bpf_obj_pinning_mounted(PATH_STR_REL);
+ if (test__start_subtest("mounted-fd-rel"))
+ bpf_obj_pinning_mounted(PATH_FD_REL);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 210d643fda6c..4e0cdb593318 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -3991,6 +3991,46 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Invalid arg#1",
},
{
+ .descr = "decl_tag test #18, decl_tag as the map key type",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(0, 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 32),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0m1\0m2\0tag"),
+ .map_type = BPF_MAP_TYPE_HASH,
+ .map_name = "tag_type_check_btf",
+ .key_size = 8,
+ .value_size = 4,
+ .key_type_id = 3,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .map_create_err = true,
+},
+{
+ .descr = "decl_tag test #19, decl_tag as the map value type",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(0, 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 32),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0m1\0m2\0tag"),
+ .map_type = BPF_MAP_TYPE_HASH,
+ .map_name = "tag_type_check_btf",
+ .key_size = 4,
+ .value_size = 8,
+ .key_type_id = 1,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .map_create_err = true,
+},
+{
.descr = "type_tag test #1",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c
index 4d2fa99273d8..2bb5773d6f99 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c
@@ -25,6 +25,8 @@ static void test_setsockopt_set(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach setsockopt that sets EUNATCH, assert that
* we actually get that error when we run setsockopt()
*/
@@ -59,6 +61,8 @@ static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach setsockopt that sets EUNATCH, and one that gets the
* previously set errno. Assert that we get the same errno back.
*/
@@ -100,6 +104,8 @@ static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach setsockopt that gets the previously set errno.
* Assert that, without anything setting one, we get 0.
*/
@@ -134,6 +140,8 @@ static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach setsockopt that gets the previously set errno, and then
* one that sets the errno to EUNATCH. Assert that the get does not
* see EUNATCH set later, and does not prevent EUNATCH from being set.
@@ -177,6 +185,8 @@ static void test_setsockopt_override(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach setsockopt that sets EUNATCH, then one that sets EISCONN,
* and then one that gets the exported errno. Assert both the syscall
* and the helper sees the last set errno.
@@ -224,6 +234,8 @@ static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach setsockopt that return a reject without setting errno
* (legacy reject), and one that gets the errno. Assert that for
* backward compatibility the syscall result in EPERM, and this
@@ -268,6 +280,8 @@ static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach setsockopt that sets EUNATCH, then one that return a reject
* without setting errno, and then one that gets the exported errno.
* Assert both the syscall and the helper's errno are unaffected by
@@ -319,6 +333,8 @@ static void test_getsockopt_get(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach getsockopt that gets previously set errno. Assert that the
* error from kernel is in both ctx_retval_value and retval_value.
*/
@@ -359,6 +375,8 @@ static void test_getsockopt_override(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach getsockopt that sets retval to -EISCONN. Assert that this
* overrides the value from kernel.
*/
@@ -396,6 +414,8 @@ static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd)
if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
/* Attach getsockopt that sets retval to -EISCONN, and one that clears
* ctx retval. Assert that the clearing ctx retval is synced to helper
* and clears any errors both from kernel and BPF..
diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
index 5338d2ea0460..2a9a30650350 100644
--- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
@@ -183,7 +183,7 @@ cleanup:
void serial_test_check_mtu(void)
{
- __u32 mtu_lo;
+ int mtu_lo;
if (test__start_subtest("bpf_check_mtu XDP-attach"))
test_check_mtu_xdp_attach();
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
index cdf4acc18e4c..756ea8b590b6 100644
--- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -10,6 +10,7 @@ static const char * const cpumask_success_testcases[] = {
"test_set_clear_cpu",
"test_setall_clear_cpu",
"test_first_firstzero_cpu",
+ "test_firstand_nocpu",
"test_test_and_set_clear",
"test_and_or_xor",
"test_intersects_subset",
@@ -70,5 +71,6 @@ void test_cpumask(void)
verify_success(cpumask_success_testcases[i]);
}
+ RUN_TESTS(cpumask_success);
RUN_TESTS(cpumask_failure);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
index d176c34a7d2e..7cfac53c0d58 100644
--- a/tools/testing/selftests/bpf/prog_tests/dynptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -20,6 +20,14 @@ static struct {
{"test_ringbuf", SETUP_SYSCALL_SLEEP},
{"test_skb_readonly", SETUP_SKB_PROG},
{"test_dynptr_skb_data", SETUP_SKB_PROG},
+ {"test_adjust", SETUP_SYSCALL_SLEEP},
+ {"test_adjust_err", SETUP_SYSCALL_SLEEP},
+ {"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_is_null", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_is_rdonly", SETUP_SKB_PROG},
+ {"test_dynptr_clone", SETUP_SKB_PROG},
+ {"test_dynptr_skb_no_buff", SETUP_SKB_PROG},
+ {"test_dynptr_skb_strcmp", SETUP_SKB_PROG},
};
static void verify_success(const char *prog_name, enum test_setup_type setup_type)
diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
index a1e712105811..2fd05649bad1 100644
--- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <linux/rtnetlink.h>
#include <sys/types.h>
#include <net/if.h>
@@ -15,14 +16,23 @@
#define IPV4_IFACE_ADDR "10.0.0.254"
#define IPV4_NUD_FAILED_ADDR "10.0.0.1"
#define IPV4_NUD_STALE_ADDR "10.0.0.2"
+#define IPV4_TBID_ADDR "172.0.0.254"
+#define IPV4_TBID_NET "172.0.0.0"
+#define IPV4_TBID_DST "172.0.0.2"
+#define IPV6_TBID_ADDR "fd00::FFFF"
+#define IPV6_TBID_NET "fd00::"
+#define IPV6_TBID_DST "fd00::2"
#define DMAC "11:11:11:11:11:11"
#define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, }
+#define DMAC2 "01:01:01:01:01:01"
+#define DMAC_INIT2 { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, }
struct fib_lookup_test {
const char *desc;
const char *daddr;
int expected_ret;
int lookup_flags;
+ __u32 tbid;
__u8 dmac[6];
};
@@ -43,6 +53,22 @@ static const struct fib_lookup_test tests[] = {
{ .desc = "IPv4 skip neigh",
.daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
.lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv4 TBID lookup failure",
+ .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID,
+ .tbid = RT_TABLE_MAIN, },
+ { .desc = "IPv4 TBID lookup success",
+ .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100,
+ .dmac = DMAC_INIT2, },
+ { .desc = "IPv6 TBID lookup failure",
+ .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID,
+ .tbid = RT_TABLE_MAIN, },
+ { .desc = "IPv6 TBID lookup success",
+ .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100,
+ .dmac = DMAC_INIT2, },
};
static int ifindex;
@@ -53,6 +79,7 @@ static int setup_netns(void)
SYS(fail, "ip link add veth1 type veth peer name veth2");
SYS(fail, "ip link set dev veth1 up");
+ SYS(fail, "ip link set dev veth2 up");
err = write_sysctl("/proc/sys/net/ipv4/neigh/veth1/gc_stale_time", "900");
if (!ASSERT_OK(err, "write_sysctl(net.ipv4.neigh.veth1.gc_stale_time)"))
@@ -70,6 +97,17 @@ static int setup_netns(void)
SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
+ /* Setup for tbid lookup tests */
+ SYS(fail, "ip addr add %s/24 dev veth2", IPV4_TBID_ADDR);
+ SYS(fail, "ip route del %s/24 dev veth2", IPV4_TBID_NET);
+ SYS(fail, "ip route add table 100 %s/24 dev veth2", IPV4_TBID_NET);
+ SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV4_TBID_DST, DMAC2);
+
+ SYS(fail, "ip addr add %s/64 dev veth2", IPV6_TBID_ADDR);
+ SYS(fail, "ip -6 route del %s/64 dev veth2", IPV6_TBID_NET);
+ SYS(fail, "ip -6 route add table 100 %s/64 dev veth2", IPV6_TBID_NET);
+ SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV6_TBID_DST, DMAC2);
+
err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1");
if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)"))
goto fail;
@@ -83,7 +121,7 @@ fail:
return -1;
}
-static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr)
+static int set_lookup_params(struct bpf_fib_lookup *params, const struct fib_lookup_test *test)
{
int ret;
@@ -91,8 +129,9 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr)
params->l4_protocol = IPPROTO_TCP;
params->ifindex = ifindex;
+ params->tbid = test->tbid;
- if (inet_pton(AF_INET6, daddr, params->ipv6_dst) == 1) {
+ if (inet_pton(AF_INET6, test->daddr, params->ipv6_dst) == 1) {
params->family = AF_INET6;
ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src);
if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)"))
@@ -100,7 +139,7 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr)
return 0;
}
- ret = inet_pton(AF_INET, daddr, &params->ipv4_dst);
+ ret = inet_pton(AF_INET, test->daddr, &params->ipv4_dst);
if (!ASSERT_EQ(ret, 1, "convert IP[46] address"))
return -1;
params->family = AF_INET;
@@ -154,13 +193,12 @@ void test_fib_lookup(void)
fib_params = &skel->bss->fib_params;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- printf("Testing %s\n", tests[i].desc);
+ printf("Testing %s ", tests[i].desc);
- if (set_lookup_params(fib_params, tests[i].daddr))
+ if (set_lookup_params(fib_params, &tests[i]))
continue;
skel->bss->fib_lookup_ret = -1;
- skel->bss->lookup_flags = BPF_FIB_LOOKUP_OUTPUT |
- tests[i].lookup_flags;
+ skel->bss->lookup_flags = tests[i].lookup_flags;
err = bpf_prog_test_run_opts(prog_fd, &run_opts);
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
@@ -175,7 +213,14 @@ void test_fib_lookup(void)
mac_str(expected, tests[i].dmac);
mac_str(actual, fib_params->dmac);
- printf("dmac expected %s actual %s\n", expected, actual);
+ printf("dmac expected %s actual %s ", expected, actual);
+ }
+
+ // ensure tbid is zero'd out after fib lookup.
+ if (tests[i].lookup_flags & BPF_FIB_LOOKUP_DIRECT) {
+ if (!ASSERT_EQ(skel->bss->fib_params.tbid, 0,
+ "expected fib_params.tbid to be zero"))
+ goto fail;
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c
new file mode 100644
index 000000000000..fd41425d2e5c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <errno.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include "test_global_map_resize.skel.h"
+#include "test_progs.h"
+
+static void run_prog_bss_array_sum(void)
+{
+ (void)syscall(__NR_getpid);
+}
+
+static void run_prog_data_array_sum(void)
+{
+ (void)syscall(__NR_getuid);
+}
+
+static void global_map_resize_bss_subtest(void)
+{
+ int err;
+ struct test_global_map_resize *skel;
+ struct bpf_map *map;
+ const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2;
+ size_t array_len, actual_sz;
+
+ skel = test_global_map_resize__open();
+ if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
+ goto teardown;
+
+ /* set some initial value before resizing.
+ * it is expected this non-zero value will be preserved
+ * while resizing.
+ */
+ skel->bss->array[0] = 1;
+
+ /* resize map value and verify the new size */
+ map = skel->maps.bss;
+ err = bpf_map__set_value_size(map, desired_sz);
+ if (!ASSERT_OK(err, "bpf_map__set_value_size"))
+ goto teardown;
+ if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
+ goto teardown;
+
+ /* set the expected number of elements based on the resized array */
+ array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]);
+ if (!ASSERT_GT(array_len, 1, "array_len"))
+ goto teardown;
+
+ skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz);
+ if (!ASSERT_OK_PTR(skel->bss, "bpf_map__initial_value (ptr)"))
+ goto teardown;
+ if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
+ goto teardown;
+
+ /* fill the newly resized array with ones,
+ * skipping the first element which was previously set
+ */
+ for (int i = 1; i < array_len; i++)
+ skel->bss->array[i] = 1;
+
+ /* set global const values before loading */
+ skel->rodata->pid = getpid();
+ skel->rodata->bss_array_len = array_len;
+ skel->rodata->data_array_len = 1;
+
+ err = test_global_map_resize__load(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__load"))
+ goto teardown;
+ err = test_global_map_resize__attach(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__attach"))
+ goto teardown;
+
+ /* run the bpf program which will sum the contents of the array.
+ * since the array was filled with ones,verify the sum equals array_len
+ */
+ run_prog_bss_array_sum();
+ if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
+ goto teardown;
+
+teardown:
+ test_global_map_resize__destroy(skel);
+}
+
+static void global_map_resize_data_subtest(void)
+{
+ int err;
+ struct test_global_map_resize *skel;
+ struct bpf_map *map;
+ const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2;
+ size_t array_len, actual_sz;
+
+ skel = test_global_map_resize__open();
+ if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
+ goto teardown;
+
+ /* set some initial value before resizing.
+ * it is expected this non-zero value will be preserved
+ * while resizing.
+ */
+ skel->data_custom->my_array[0] = 1;
+
+ /* resize map value and verify the new size */
+ map = skel->maps.data_custom;
+ err = bpf_map__set_value_size(map, desired_sz);
+ if (!ASSERT_OK(err, "bpf_map__set_value_size"))
+ goto teardown;
+ if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
+ goto teardown;
+
+ /* set the expected number of elements based on the resized array */
+ array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]);
+ if (!ASSERT_GT(array_len, 1, "array_len"))
+ goto teardown;
+
+ skel->data_custom = bpf_map__initial_value(skel->maps.data_custom, &actual_sz);
+ if (!ASSERT_OK_PTR(skel->data_custom, "bpf_map__initial_value (ptr)"))
+ goto teardown;
+ if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
+ goto teardown;
+
+ /* fill the newly resized array with ones,
+ * skipping the first element which was previously set
+ */
+ for (int i = 1; i < array_len; i++)
+ skel->data_custom->my_array[i] = 1;
+
+ /* set global const values before loading */
+ skel->rodata->pid = getpid();
+ skel->rodata->bss_array_len = 1;
+ skel->rodata->data_array_len = array_len;
+
+ err = test_global_map_resize__load(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__load"))
+ goto teardown;
+ err = test_global_map_resize__attach(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__attach"))
+ goto teardown;
+
+ /* run the bpf program which will sum the contents of the array.
+ * since the array was filled with ones,verify the sum equals array_len
+ */
+ run_prog_data_array_sum();
+ if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
+ goto teardown;
+
+teardown:
+ test_global_map_resize__destroy(skel);
+}
+
+static void global_map_resize_invalid_subtest(void)
+{
+ int err;
+ struct test_global_map_resize *skel;
+ struct bpf_map *map;
+ __u32 element_sz, desired_sz;
+
+ skel = test_global_map_resize__open();
+ if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
+ return;
+
+ /* attempt to resize a global datasec map to size
+ * which does NOT align with array
+ */
+ map = skel->maps.data_custom;
+ if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.custom initial btf"))
+ goto teardown;
+ /* set desired size a fraction of element size beyond an aligned size */
+ element_sz = sizeof(skel->data_custom->my_array[0]);
+ desired_sz = element_sz + element_sz / 2;
+ /* confirm desired size does NOT align with array */
+ if (!ASSERT_NEQ(desired_sz % element_sz, 0, "my_array alignment"))
+ goto teardown;
+ err = bpf_map__set_value_size(map, desired_sz);
+ /* confirm resize is OK but BTF info is cleared */
+ if (!ASSERT_OK(err, ".data.custom bpf_map__set_value_size") ||
+ !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.custom clear btf key") ||
+ !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.custom clear btf val"))
+ goto teardown;
+
+ /* attempt to resize a global datasec map whose only var is NOT an array */
+ map = skel->maps.data_non_array;
+ if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array initial btf"))
+ goto teardown;
+ /* set desired size to arbitrary value */
+ desired_sz = 1024;
+ err = bpf_map__set_value_size(map, desired_sz);
+ /* confirm resize is OK but BTF info is cleared */
+ if (!ASSERT_OK(err, ".data.non_array bpf_map__set_value_size") ||
+ !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.non_array clear btf key") ||
+ !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array clear btf val"))
+ goto teardown;
+
+ /* attempt to resize a global datasec map
+ * whose last var is NOT an array
+ */
+ map = skel->maps.data_array_not_last;
+ if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last initial btf"))
+ goto teardown;
+ /* set desired size to a multiple of element size */
+ element_sz = sizeof(skel->data_array_not_last->my_array_first[0]);
+ desired_sz = element_sz * 8;
+ /* confirm desired size aligns with array */
+ if (!ASSERT_EQ(desired_sz % element_sz, 0, "my_array_first alignment"))
+ goto teardown;
+ err = bpf_map__set_value_size(map, desired_sz);
+ /* confirm resize is OK but BTF info is cleared */
+ if (!ASSERT_OK(err, ".data.array_not_last bpf_map__set_value_size") ||
+ !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.array_not_last clear btf key") ||
+ !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last clear btf val"))
+ goto teardown;
+
+teardown:
+ test_global_map_resize__destroy(skel);
+}
+
+void test_global_map_resize(void)
+{
+ if (test__start_subtest("global_map_resize_bss"))
+ global_map_resize_bss_subtest();
+
+ if (test__start_subtest("global_map_resize_data"))
+ global_map_resize_data_subtest();
+
+ if (test__start_subtest("global_map_resize_invalid"))
+ global_map_resize_invalid_subtest();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c
index 7fc01ff490db..f53d658ed080 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c
@@ -4,6 +4,7 @@
#include <test_progs.h>
#include <stdbool.h>
#include "test_module_attach.skel.h"
+#include "testing_helpers.h"
static int duration;
@@ -32,11 +33,6 @@ static int trigger_module_test_writable(int *val)
return 0;
}
-static int delete_module(const char *name, int flags)
-{
- return syscall(__NR_delete_module, name, flags);
-}
-
void test_module_attach(void)
{
const int READ_SZ = 456;
@@ -93,21 +89,21 @@ void test_module_attach(void)
if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
- ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
+ ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
bpf_link__destroy(link);
link = bpf_program__attach(skel->progs.handle_fexit);
if (!ASSERT_OK_PTR(link, "attach_fexit"))
goto cleanup;
- ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
+ ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
bpf_link__destroy(link);
link = bpf_program__attach(skel->progs.kprobe_multi);
if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
goto cleanup;
- ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module");
+ ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
bpf_link__destroy(link);
cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c
index d3915c58d0e1..c3333edd029f 100644
--- a/tools/testing/selftests/bpf/prog_tests/netcnt.c
+++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c
@@ -67,12 +67,12 @@ void serial_test_netcnt(void)
}
/* No packets should be lost */
- ASSERT_EQ(packets, 10000, "packets");
+ ASSERT_GE(packets, 10000, "packets");
/* Let's check that bytes counter matches the number of packets
* multiplied by the size of ipv6 ICMP packet.
*/
- ASSERT_EQ(bytes, packets * 104, "bytes");
+ ASSERT_GE(bytes, packets * 104, "bytes");
err:
if (cg_fd != -1)
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
new file mode 100644
index 000000000000..b0583309a94e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <bpf/bpf_endian.h>
+
+#include "sock_destroy_prog.skel.h"
+#include "sock_destroy_prog_fail.skel.h"
+#include "network_helpers.h"
+
+#define TEST_NS "sock_destroy_netns"
+
+static void start_iter_sockets(struct bpf_program *prog)
+{
+ struct bpf_link *link;
+ char buf[50] = {};
+ int iter_fd, len;
+
+ link = bpf_program__attach_iter(prog, NULL);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ return;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
+ goto free_link;
+
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ ASSERT_GE(len, 0, "read");
+
+ close(iter_fd);
+
+free_link:
+ bpf_link__destroy(link);
+}
+
+static void test_tcp_client(struct sock_destroy_prog *skel)
+{
+ int serv = -1, clien = -1, accept_serv = -1, n;
+
+ serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ clien = connect_to_fd(serv, 0);
+ if (!ASSERT_GE(clien, 0, "connect_to_fd"))
+ goto cleanup;
+
+ accept_serv = accept(serv, NULL, NULL);
+ if (!ASSERT_GE(accept_serv, 0, "serv accept"))
+ goto cleanup;
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_EQ(n, 1, "client send"))
+ goto cleanup;
+
+ /* Run iterator program that destroys connected client sockets. */
+ start_iter_sockets(skel->progs.iter_tcp6_client);
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
+ goto cleanup;
+ ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket");
+
+cleanup:
+ if (clien != -1)
+ close(clien);
+ if (accept_serv != -1)
+ close(accept_serv);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_tcp_server(struct sock_destroy_prog *skel)
+{
+ int serv = -1, clien = -1, accept_serv = -1, n, serv_port;
+
+ serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+ serv_port = get_socket_local_port(serv);
+ if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
+ goto cleanup;
+ skel->bss->serv_port = (__be16) serv_port;
+
+ clien = connect_to_fd(serv, 0);
+ if (!ASSERT_GE(clien, 0, "connect_to_fd"))
+ goto cleanup;
+
+ accept_serv = accept(serv, NULL, NULL);
+ if (!ASSERT_GE(accept_serv, 0, "serv accept"))
+ goto cleanup;
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_EQ(n, 1, "client send"))
+ goto cleanup;
+
+ /* Run iterator program that destroys server sockets. */
+ start_iter_sockets(skel->progs.iter_tcp6_server);
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
+ goto cleanup;
+ ASSERT_EQ(errno, ECONNRESET, "error code on destroyed socket");
+
+cleanup:
+ if (clien != -1)
+ close(clien);
+ if (accept_serv != -1)
+ close(accept_serv);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_udp_client(struct sock_destroy_prog *skel)
+{
+ int serv = -1, clien = -1, n = 0;
+
+ serv = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ clien = connect_to_fd(serv, 0);
+ if (!ASSERT_GE(clien, 0, "connect_to_fd"))
+ goto cleanup;
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_EQ(n, 1, "client send"))
+ goto cleanup;
+
+ /* Run iterator program that destroys sockets. */
+ start_iter_sockets(skel->progs.iter_udp6_client);
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
+ goto cleanup;
+ /* UDP sockets have an overriding error code after they are disconnected,
+ * so we don't check for ECONNABORTED error code.
+ */
+
+cleanup:
+ if (clien != -1)
+ close(clien);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_udp_server(struct sock_destroy_prog *skel)
+{
+ int *listen_fds = NULL, n, i, serv_port;
+ unsigned int num_listens = 5;
+ char buf[1];
+
+ /* Start reuseport servers. */
+ listen_fds = start_reuseport_server(AF_INET6, SOCK_DGRAM,
+ "::1", 0, 0, num_listens);
+ if (!ASSERT_OK_PTR(listen_fds, "start_reuseport_server"))
+ goto cleanup;
+ serv_port = get_socket_local_port(listen_fds[0]);
+ if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
+ goto cleanup;
+ skel->bss->serv_port = (__be16) serv_port;
+
+ /* Run iterator program that destroys server sockets. */
+ start_iter_sockets(skel->progs.iter_udp6_server);
+
+ for (i = 0; i < num_listens; ++i) {
+ n = read(listen_fds[i], buf, sizeof(buf));
+ if (!ASSERT_EQ(n, -1, "read") ||
+ !ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket"))
+ break;
+ }
+ ASSERT_EQ(i, num_listens, "server socket");
+
+cleanup:
+ free_fds(listen_fds, num_listens);
+}
+
+void test_sock_destroy(void)
+{
+ struct sock_destroy_prog *skel;
+ struct nstoken *nstoken = NULL;
+ int cgroup_fd;
+
+ skel = sock_destroy_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ cgroup_fd = test__join_cgroup("/sock_destroy");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ goto cleanup;
+
+ skel->links.sock_connect = bpf_program__attach_cgroup(
+ skel->progs.sock_connect, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.sock_connect, "prog_attach"))
+ goto cleanup;
+
+ SYS(cleanup, "ip netns add %s", TEST_NS);
+ SYS(cleanup, "ip -net %s link set dev lo up", TEST_NS);
+
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto cleanup;
+
+ if (test__start_subtest("tcp_client"))
+ test_tcp_client(skel);
+ if (test__start_subtest("tcp_server"))
+ test_tcp_server(skel);
+ if (test__start_subtest("udp_client"))
+ test_udp_client(skel);
+ if (test__start_subtest("udp_server"))
+ test_udp_server(skel);
+
+ RUN_TESTS(sock_destroy_prog_fail);
+
+cleanup:
+ if (nstoken)
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
+ sock_destroy_prog__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c
index aa4debf62fc6..9e6a5e3ed4de 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c
@@ -5,10 +5,15 @@
static char bpf_log_buf[4096];
static bool verbose;
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
enum sockopt_test_error {
OK = 0,
DENY_LOAD,
DENY_ATTACH,
+ EOPNOTSUPP_GETSOCKOPT,
EPERM_GETSOCKOPT,
EFAULT_GETSOCKOPT,
EPERM_SETSOCKOPT,
@@ -273,10 +278,31 @@ static struct sockopt_test {
.error = EFAULT_GETSOCKOPT,
},
{
- .descr = "getsockopt: deny arbitrary ctx->retval",
+ .descr = "getsockopt: ignore >PAGE_SIZE optlen",
.insns = {
- /* ctx->retval = 123 */
- BPF_MOV64_IMM(BPF_REG_0, 123),
+ /* write 0xFF to the first optval byte */
+
+ /* r6 = ctx->optval */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval)),
+ /* r2 = ctx->optval */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ /* r6 = ctx->optval + 1 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+
+ /* r7 = ctx->optval_end */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval_end)),
+
+ /* if (ctx->optval + 1 <= ctx->optval_end) { */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
+ /* ctx->optval[0] = 0xF0 */
+ BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xFF),
+ /* } */
+
+ /* retval changes are ignored */
+ /* ctx->retval = 5 */
+ BPF_MOV64_IMM(BPF_REG_0, 5),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
@@ -287,9 +313,11 @@ static struct sockopt_test {
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
- .get_optlen = 64,
-
- .error = EFAULT_GETSOCKOPT,
+ .get_level = 1234,
+ .get_optname = 5678,
+ .get_optval = {}, /* the changes are ignored */
+ .get_optlen = PAGE_SIZE + 1,
+ .error = EOPNOTSUPP_GETSOCKOPT,
},
{
.descr = "getsockopt: support smaller ctx->optlen",
@@ -649,6 +677,45 @@ static struct sockopt_test {
.error = EFAULT_SETSOCKOPT,
},
{
+ .descr = "setsockopt: ignore >PAGE_SIZE optlen",
+ .insns = {
+ /* write 0xFF to the first optval byte */
+
+ /* r6 = ctx->optval */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval)),
+ /* r2 = ctx->optval */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ /* r6 = ctx->optval + 1 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+
+ /* r7 = ctx->optval_end */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval_end)),
+
+ /* if (ctx->optval + 1 <= ctx->optval_end) { */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
+ /* ctx->optval[0] = 0xF0 */
+ BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0),
+ /* } */
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SETSOCKOPT,
+ .expected_attach_type = BPF_CGROUP_SETSOCKOPT,
+
+ .set_level = SOL_IP,
+ .set_optname = IP_TOS,
+ .set_optval = {},
+ .set_optlen = PAGE_SIZE + 1,
+
+ .get_level = SOL_IP,
+ .get_optname = IP_TOS,
+ .get_optval = {}, /* the changes are ignored */
+ .get_optlen = 4,
+ },
+ {
.descr = "setsockopt: allow changing ctx->optlen within bounds",
.insns = {
/* r6 = ctx->optval */
@@ -906,6 +973,13 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
}
if (test->set_optlen) {
+ if (test->set_optlen >= PAGE_SIZE) {
+ int num_pages = test->set_optlen / PAGE_SIZE;
+ int remainder = test->set_optlen % PAGE_SIZE;
+
+ test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
+ }
+
err = setsockopt(sock_fd, test->set_level, test->set_optname,
test->set_optval, test->set_optlen);
if (err) {
@@ -921,7 +995,15 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
}
if (test->get_optlen) {
+ if (test->get_optlen >= PAGE_SIZE) {
+ int num_pages = test->get_optlen / PAGE_SIZE;
+ int remainder = test->get_optlen % PAGE_SIZE;
+
+ test->get_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
+ }
+
optval = malloc(test->get_optlen);
+ memset(optval, 0, test->get_optlen);
socklen_t optlen = test->get_optlen;
socklen_t expected_get_optlen = test->get_optlen_ret ?:
test->get_optlen;
@@ -929,6 +1011,8 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
err = getsockopt(sock_fd, test->get_level, test->get_optname,
optval, &optlen);
if (err) {
+ if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT)
+ goto free_optval;
if (errno == EPERM && test->error == EPERM_GETSOCKOPT)
goto free_optval;
if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT)
@@ -976,7 +1060,9 @@ void test_sockopt(void)
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- test__start_subtest(tests[i].descr);
+ if (!test__start_subtest(tests[i].descr))
+ continue;
+
ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
index 60c17a8e2789..917f486db826 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
@@ -2,6 +2,8 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
+#include "sockopt_inherit.skel.h"
+
#define SOL_CUSTOM 0xdeadbeef
#define CUSTOM_INHERIT1 0
#define CUSTOM_INHERIT2 1
@@ -132,58 +134,30 @@ static int start_server(void)
return fd;
}
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title,
- const char *prog_name)
-{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
-
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err) {
- log_err("Failed to deduct types for %s BPF program", prog_name);
- return -1;
- }
-
- prog = bpf_object__find_program_by_name(obj, prog_name);
- if (!prog) {
- log_err("Failed to find %s BPF program", prog_name);
- return -1;
- }
-
- err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
- attach_type, 0);
- if (err) {
- log_err("Failed to attach %s BPF program", prog_name);
- return -1;
- }
-
- return 0;
-}
-
static void run_test(int cgroup_fd)
{
+ struct bpf_link *link_getsockopt = NULL;
+ struct bpf_link *link_setsockopt = NULL;
int server_fd = -1, client_fd;
- struct bpf_object *obj;
+ struct sockopt_inherit *obj;
void *server_err;
pthread_t tid;
int err;
- obj = bpf_object__open_file("sockopt_inherit.bpf.o", NULL);
- if (!ASSERT_OK_PTR(obj, "obj_open"))
+ obj = sockopt_inherit__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
- err = bpf_object__load(obj);
- if (!ASSERT_OK(err, "obj_load"))
- goto close_bpf_object;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
- err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt");
- if (!ASSERT_OK(err, "prog_attach _getsockopt"))
+ link_getsockopt = bpf_program__attach_cgroup(obj->progs._getsockopt,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_getsockopt, "cg-attach-getsockopt"))
goto close_bpf_object;
- err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt");
- if (!ASSERT_OK(err, "prog_attach _setsockopt"))
+ link_setsockopt = bpf_program__attach_cgroup(obj->progs._setsockopt,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_setsockopt, "cg-attach-setsockopt"))
goto close_bpf_object;
server_fd = start_server();
@@ -217,7 +191,10 @@ static void run_test(int cgroup_fd)
close_server_fd:
close(server_fd);
close_bpf_object:
- bpf_object__close(obj);
+ bpf_link__destroy(link_getsockopt);
+ bpf_link__destroy(link_setsockopt);
+
+ sockopt_inherit__destroy(obj);
}
void test_sockopt_inherit(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
index 7f5659349011..759bbb6f8c5f 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
@@ -2,61 +2,13 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
-{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
-
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err) {
- log_err("Failed to deduct types for %s BPF program", title);
- return -1;
- }
-
- prog = bpf_object__find_program_by_name(obj, name);
- if (!prog) {
- log_err("Failed to find %s BPF program", name);
- return -1;
- }
-
- err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
- attach_type, BPF_F_ALLOW_MULTI);
- if (err) {
- log_err("Failed to attach %s BPF program", name);
- return -1;
- }
-
- return 0;
-}
+#include "sockopt_multi.skel.h"
-static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
-{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
-
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err)
- return -1;
-
- prog = bpf_object__find_program_by_name(obj, name);
- if (!prog)
- return -1;
-
- err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd,
- attach_type);
- if (err)
- return -1;
-
- return 0;
-}
-
-static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
+static int run_getsockopt_test(struct sockopt_multi *obj, int cg_parent,
int cg_child, int sock_fd)
{
+ struct bpf_link *link_parent = NULL;
+ struct bpf_link *link_child = NULL;
socklen_t optlen;
__u8 buf;
int err;
@@ -89,8 +41,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - child: 0x80 -> 0x90
*/
- err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
- if (err)
+ link_child = bpf_program__attach_cgroup(obj->progs._getsockopt_child,
+ cg_child);
+ if (!ASSERT_OK_PTR(link_child, "cg-attach-getsockopt_child"))
goto detach;
buf = 0x00;
@@ -113,8 +66,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - parent: 0x90 -> 0xA0
*/
- err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
- if (err)
+ link_parent = bpf_program__attach_cgroup(obj->progs._getsockopt_parent,
+ cg_parent);
+ if (!ASSERT_OK_PTR(link_parent, "cg-attach-getsockopt_parent"))
goto detach;
buf = 0x00;
@@ -157,11 +111,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - parent: unexpected 0x40, EPERM
*/
- err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
- if (err) {
- log_err("Failed to detach child program");
- goto detach;
- }
+ bpf_link__destroy(link_child);
+ link_child = NULL;
buf = 0x00;
optlen = 1;
@@ -198,15 +149,17 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
}
detach:
- prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
- prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
+ bpf_link__destroy(link_child);
+ bpf_link__destroy(link_parent);
return err;
}
-static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
+static int run_setsockopt_test(struct sockopt_multi *obj, int cg_parent,
int cg_child, int sock_fd)
{
+ struct bpf_link *link_parent = NULL;
+ struct bpf_link *link_child = NULL;
socklen_t optlen;
__u8 buf;
int err;
@@ -236,8 +189,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
/* Attach child program and make sure it adds 0x10. */
- err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
- if (err)
+ link_child = bpf_program__attach_cgroup(obj->progs._setsockopt,
+ cg_child);
+ if (!ASSERT_OK_PTR(link_child, "cg-attach-setsockopt_child"))
goto detach;
buf = 0x80;
@@ -263,8 +217,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
/* Attach parent program and make sure it adds another 0x10. */
- err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
- if (err)
+ link_parent = bpf_program__attach_cgroup(obj->progs._setsockopt,
+ cg_parent);
+ if (!ASSERT_OK_PTR(link_parent, "cg-attach-setsockopt_parent"))
goto detach;
buf = 0x80;
@@ -289,8 +244,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
}
detach:
- prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
- prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
+ bpf_link__destroy(link_child);
+ bpf_link__destroy(link_parent);
return err;
}
@@ -298,9 +253,8 @@ detach:
void test_sockopt_multi(void)
{
int cg_parent = -1, cg_child = -1;
- struct bpf_object *obj = NULL;
+ struct sockopt_multi *obj = NULL;
int sock_fd = -1;
- int err = -1;
cg_parent = test__join_cgroup("/parent");
if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent"))
@@ -310,13 +264,11 @@ void test_sockopt_multi(void)
if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child"))
goto out;
- obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL);
- if (!ASSERT_OK_PTR(obj, "obj_load"))
+ obj = sockopt_multi__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
goto out;
- err = bpf_object__load(obj);
- if (!ASSERT_OK(err, "obj_load"))
- goto out;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
sock_fd = socket(AF_INET, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "socket"))
@@ -327,7 +279,7 @@ void test_sockopt_multi(void)
out:
close(sock_fd);
- bpf_object__close(obj);
+ sockopt_multi__destroy(obj);
close(cg_child);
close(cg_parent);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c
index 6b53b3cb8dad..6b2d300e9fd4 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c
@@ -42,6 +42,8 @@ void test_sockopt_qos_to_cc(void)
if (!ASSERT_OK_PTR(skel, "skel"))
goto done;
+ skel->bss->page_size = sysconf(_SC_PAGESIZE);
+
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "v6 socket open"))
goto done;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
new file mode 100644
index 000000000000..4224727fb364
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include <cgroup_helpers.h>
+#include "test_task_under_cgroup.skel.h"
+
+#define FOO "/foo"
+
+void test_task_under_cgroup(void)
+{
+ struct test_task_under_cgroup *skel;
+ int ret, foo;
+ pid_t pid;
+
+ foo = test__join_cgroup(FOO);
+ if (!ASSERT_OK(foo < 0, "cgroup_join_foo"))
+ return;
+
+ skel = test_task_under_cgroup__open();
+ if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open"))
+ goto cleanup;
+
+ skel->rodata->local_pid = getpid();
+ skel->bss->remote_pid = getpid();
+ skel->rodata->cgid = get_cgroup_id(FOO);
+
+ ret = test_task_under_cgroup__load(skel);
+ if (!ASSERT_OK(ret, "test_task_under_cgroup__load"))
+ goto cleanup;
+
+ ret = test_task_under_cgroup__attach(skel);
+ if (!ASSERT_OK(ret, "test_task_under_cgroup__attach"))
+ goto cleanup;
+
+ pid = fork();
+ if (pid == 0)
+ exit(0);
+
+ ret = (pid == -1);
+ if (ASSERT_OK(ret, "fork process"))
+ wait(NULL);
+
+ test_task_under_cgroup__detach(skel);
+
+ ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid,
+ "test task_under_cgroup");
+
+cleanup:
+ test_task_under_cgroup__destroy(skel);
+ close(foo);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
index 8383a99f610f..0adf8d9475cb 100644
--- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
+++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
@@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
prog_insns, prog_insn_cnt, &load_opts),
-EPERM, "prog_load_fails");
- for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++)
+ /* some map types require particular correct parameters which could be
+ * sanity-checked before enforcing -EPERM, so only validate that
+ * the simple ARRAY and HASH maps are failing with -EPERM
+ */
+ for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
-EPERM, "map_create_fails");
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 2497716ee379..070a13833c3f 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -50,11 +50,13 @@
#include "verifier_regalloc.skel.h"
#include "verifier_ringbuf.skel.h"
#include "verifier_runtime_jit.skel.h"
+#include "verifier_scalar_ids.skel.h"
#include "verifier_search_pruning.skel.h"
#include "verifier_sock.skel.h"
#include "verifier_spill_fill.skel.h"
#include "verifier_spin_lock.skel.h"
#include "verifier_stack_ptr.skel.h"
+#include "verifier_subprog_precision.skel.h"
#include "verifier_subreg.skel.h"
#include "verifier_uninit.skel.h"
#include "verifier_unpriv.skel.h"
@@ -149,11 +151,13 @@ void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); }
void test_verifier_regalloc(void) { RUN(verifier_regalloc); }
void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); }
void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); }
+void test_verifier_scalar_ids(void) { RUN(verifier_scalar_ids); }
void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); }
void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
+void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
void test_verifier_uninit(void) { RUN(verifier_uninit); }
void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
diff --git a/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c
new file mode 100644
index 000000000000..2a5e207edad6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * Topology:
+ * ---------
+ * NS0 namespace | NS1 namespace
+ * |
+ * +--------------+ | +--------------+
+ * | veth01 |----------| veth10 |
+ * | 172.16.1.100 | | | 172.16.1.200 |
+ * | bpf | | +--------------+
+ * +--------------+ |
+ * server(UDP/TCP) |
+ * +-------------------+ |
+ * | vrf1 | |
+ * | +--------------+ | | +--------------+
+ * | | veth02 |----------| veth20 |
+ * | | 172.16.2.100 | | | | 172.16.2.200 |
+ * | | bpf | | | +--------------+
+ * | +--------------+ | |
+ * | server(UDP/TCP) | |
+ * +-------------------+ |
+ *
+ * Test flow
+ * -----------
+ * The tests verifies that socket lookup via TC is VRF aware:
+ * 1) Creates two veth pairs between NS0 and NS1:
+ * a) veth01 <-> veth10 outside the VRF
+ * b) veth02 <-> veth20 in the VRF
+ * 2) Attaches to veth01 and veth02 a program that calls:
+ * a) bpf_skc_lookup_tcp() with TCP and tcp_skc is true
+ * b) bpf_sk_lookup_tcp() with TCP and tcp_skc is false
+ * c) bpf_sk_lookup_udp() with UDP
+ * The program stores the lookup result in bss->lookup_status.
+ * 3) Creates a socket TCP/UDP server in/outside the VRF.
+ * 4) The test expects lookup_status to be:
+ * a) 0 from device in VRF to server outside VRF
+ * b) 0 from device outside VRF to server in VRF
+ * c) 1 from device in VRF to server in VRF
+ * d) 1 from device outside VRF to server outside VRF
+ */
+
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "vrf_socket_lookup.skel.h"
+
+#define NS0 "vrf_socket_lookup_0"
+#define NS1 "vrf_socket_lookup_1"
+
+#define IP4_ADDR_VETH01 "172.16.1.100"
+#define IP4_ADDR_VETH10 "172.16.1.200"
+#define IP4_ADDR_VETH02 "172.16.2.100"
+#define IP4_ADDR_VETH20 "172.16.2.200"
+
+#define NON_VRF_PORT 5000
+#define IN_VRF_PORT 5001
+
+#define TIMEOUT_MS 3000
+
+static int make_socket(int sotype, const char *ip, int port,
+ struct sockaddr_storage *addr)
+{
+ int err, fd;
+
+ err = make_sockaddr(AF_INET, ip, port, addr, NULL);
+ if (!ASSERT_OK(err, "make_address"))
+ return -1;
+
+ fd = socket(AF_INET, sotype, 0);
+ if (!ASSERT_GE(fd, 0, "socket"))
+ return -1;
+
+ if (!ASSERT_OK(settimeo(fd, TIMEOUT_MS), "settimeo"))
+ goto fail;
+
+ return fd;
+fail:
+ close(fd);
+ return -1;
+}
+
+static int make_server(int sotype, const char *ip, int port, const char *ifname)
+{
+ int err, fd = -1;
+
+ fd = start_server(AF_INET, sotype, ip, port, TIMEOUT_MS);
+ if (!ASSERT_GE(fd, 0, "start_server"))
+ return -1;
+
+ if (ifname) {
+ err = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
+ ifname, strlen(ifname) + 1);
+ if (!ASSERT_OK(err, "setsockopt(SO_BINDTODEVICE)"))
+ goto fail;
+ }
+
+ return fd;
+fail:
+ close(fd);
+ return -1;
+}
+
+static int attach_progs(char *ifname, int tc_prog_fd, int xdp_prog_fd)
+{
+ LIBBPF_OPTS(bpf_tc_hook, hook, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1,
+ .prog_fd = tc_prog_fd);
+ int ret, ifindex;
+
+ ifindex = if_nametoindex(ifname);
+ if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex"))
+ return -1;
+ hook.ifindex = ifindex;
+
+ ret = bpf_tc_hook_create(&hook);
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ return ret;
+
+ ret = bpf_tc_attach(&hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+ ret = bpf_xdp_attach(ifindex, xdp_prog_fd, 0, NULL);
+ if (!ASSERT_OK(ret, "bpf_xdp_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cleanup(void)
+{
+ SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete "
+ NS0);
+ SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete "
+ NS1);
+}
+
+static int setup(struct vrf_socket_lookup *skel)
+{
+ int tc_prog_fd, xdp_prog_fd, ret = 0;
+ struct nstoken *nstoken = NULL;
+
+ SYS(fail, "ip netns add " NS0);
+ SYS(fail, "ip netns add " NS1);
+
+ /* NS0 <-> NS1 [veth01 <-> veth10] */
+ SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10"
+ " netns " NS1);
+ SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01");
+ SYS(fail, "ip -net " NS0 " link set dev veth01 up");
+ SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10");
+ SYS(fail, "ip -net " NS1 " link set dev veth10 up");
+
+ /* NS0 <-> NS1 [veth02 <-> veth20] */
+ SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20"
+ " netns " NS1);
+ SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02");
+ SYS(fail, "ip -net " NS0 " link set dev veth02 up");
+ SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH20 "/24 dev veth20");
+ SYS(fail, "ip -net " NS1 " link set dev veth20 up");
+
+ /* veth02 -> vrf1 */
+ SYS(fail, "ip -net " NS0 " link add vrf1 type vrf table 11");
+ SYS(fail, "ip -net " NS0 " route add vrf vrf1 unreachable default"
+ " metric 4278198272");
+ SYS(fail, "ip -net " NS0 " link set vrf1 alias vrf");
+ SYS(fail, "ip -net " NS0 " link set vrf1 up");
+ SYS(fail, "ip -net " NS0 " link set veth02 master vrf1");
+
+ /* Attach TC and XDP progs to veth devices in NS0 */
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
+ goto fail;
+ tc_prog_fd = bpf_program__fd(skel->progs.tc_socket_lookup);
+ if (!ASSERT_GE(tc_prog_fd, 0, "bpf_program__tc_fd"))
+ goto fail;
+ xdp_prog_fd = bpf_program__fd(skel->progs.xdp_socket_lookup);
+ if (!ASSERT_GE(xdp_prog_fd, 0, "bpf_program__xdp_fd"))
+ goto fail;
+
+ if (attach_progs("veth01", tc_prog_fd, xdp_prog_fd))
+ goto fail;
+
+ if (attach_progs("veth02", tc_prog_fd, xdp_prog_fd))
+ goto fail;
+
+ goto close;
+fail:
+ ret = -1;
+close:
+ if (nstoken)
+ close_netns(nstoken);
+ return ret;
+}
+
+static int test_lookup(struct vrf_socket_lookup *skel, int sotype,
+ const char *ip, int port, bool test_xdp, bool tcp_skc,
+ int lookup_status_exp)
+{
+ static const char msg[] = "Hello Server";
+ struct sockaddr_storage addr = {};
+ int fd, ret = 0;
+
+ fd = make_socket(sotype, ip, port, &addr);
+ if (fd < 0)
+ return -1;
+
+ skel->bss->test_xdp = test_xdp;
+ skel->bss->tcp_skc = tcp_skc;
+ skel->bss->lookup_status = -1;
+
+ if (sotype == SOCK_STREAM)
+ connect(fd, (void *)&addr, sizeof(struct sockaddr_in));
+ else
+ sendto(fd, msg, sizeof(msg), 0, (void *)&addr,
+ sizeof(struct sockaddr_in));
+
+ if (!ASSERT_EQ(skel->bss->lookup_status, lookup_status_exp,
+ "lookup_status"))
+ goto fail;
+
+ goto close;
+
+fail:
+ ret = -1;
+close:
+ close(fd);
+ return ret;
+}
+
+static void _test_vrf_socket_lookup(struct vrf_socket_lookup *skel, int sotype,
+ bool test_xdp, bool tcp_skc)
+{
+ int in_vrf_server = -1, non_vrf_server = -1;
+ struct nstoken *nstoken = NULL;
+
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
+ goto done;
+
+ /* Open sockets in and outside VRF */
+ non_vrf_server = make_server(sotype, "0.0.0.0", NON_VRF_PORT, NULL);
+ if (!ASSERT_GE(non_vrf_server, 0, "make_server__outside_vrf_fd"))
+ goto done;
+
+ in_vrf_server = make_server(sotype, "0.0.0.0", IN_VRF_PORT, "veth02");
+ if (!ASSERT_GE(in_vrf_server, 0, "make_server__in_vrf_fd"))
+ goto done;
+
+ /* Perform test from NS1 */
+ close_netns(nstoken);
+ nstoken = open_netns(NS1);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS1))
+ goto done;
+
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, NON_VRF_PORT,
+ test_xdp, tcp_skc, 0), "in_to_out"))
+ goto done;
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, IN_VRF_PORT,
+ test_xdp, tcp_skc, 1), "in_to_in"))
+ goto done;
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, NON_VRF_PORT,
+ test_xdp, tcp_skc, 1), "out_to_out"))
+ goto done;
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, IN_VRF_PORT,
+ test_xdp, tcp_skc, 0), "out_to_in"))
+ goto done;
+
+done:
+ if (non_vrf_server >= 0)
+ close(non_vrf_server);
+ if (in_vrf_server >= 0)
+ close(in_vrf_server);
+ if (nstoken)
+ close_netns(nstoken);
+}
+
+void test_vrf_socket_lookup(void)
+{
+ struct vrf_socket_lookup *skel;
+
+ cleanup();
+
+ skel = vrf_socket_lookup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "vrf_socket_lookup__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(setup(skel), "setup"))
+ goto done;
+
+ if (test__start_subtest("tc_socket_lookup_tcp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
+ if (test__start_subtest("tc_socket_lookup_tcp_skc"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
+ if (test__start_subtest("tc_socket_lookup_udp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
+ if (test__start_subtest("xdp_socket_lookup_tcp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
+ if (test__start_subtest("xdp_socket_lookup_tcp_skc"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
+ if (test__start_subtest("xdp_socket_lookup_udp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
+
+done:
+ vrf_socket_lookup__destroy(skel);
+ cleanup();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
index d19f79048ff6..c3b45745cbcc 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
@@ -18,6 +18,7 @@
#include <linux/if_bonding.h>
#include <linux/limits.h>
#include <linux/udp.h>
+#include <uapi/linux/netdev.h>
#include "xdp_dummy.skel.h"
#include "xdp_redirect_multi_kern.skel.h"
@@ -492,6 +493,123 @@ out:
system("ip link del bond_nest2");
}
+static void test_xdp_bonding_features(struct skeletons *skeletons)
+{
+ LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
+ int bond_idx, veth1_idx, err;
+ struct bpf_link *link = NULL;
+
+ if (!ASSERT_OK(system("ip link add bond type bond"), "add bond"))
+ goto out;
+
+ bond_idx = if_nametoindex("bond");
+ if (!ASSERT_GE(bond_idx, 0, "if_nametoindex bond"))
+ goto out;
+
+ /* query default xdp-feature for bond device */
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link add veth0 type veth peer name veth1"),
+ "add veth{0,1} pair"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link add veth2 type veth peer name veth3"),
+ "add veth{2,3} pair"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth0 master bond"),
+ "add veth0 to master bond"))
+ goto out;
+
+ /* xdp-feature for bond device should be obtained from the single slave
+ * device (veth0)
+ */
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ veth1_idx = if_nametoindex("veth1");
+ if (!ASSERT_GE(veth1_idx, 0, "if_nametoindex veth1"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog,
+ veth1_idx);
+ if (!ASSERT_OK_PTR(link, "attach program to veth1"))
+ goto out;
+
+ /* xdp-feature for veth0 are changed */
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth2 master bond"),
+ "add veth2 to master bond"))
+ goto out;
+
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ /* xdp-feature for bond device should be set to the most restrict
+ * value obtained from attached slave devices (veth0 and veth2)
+ */
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth2 nomaster"),
+ "del veth2 to master bond"))
+ goto out;
+
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth0 nomaster"),
+ "del veth0 to master bond"))
+ goto out;
+
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+ "bond query_opts.feature_flags");
+out:
+ bpf_link__destroy(link);
+ system("ip link del veth0");
+ system("ip link del veth2");
+ system("ip link del bond");
+}
+
static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args)
{
@@ -546,6 +664,9 @@ void serial_test_xdp_bonding(void)
if (test__start_subtest("xdp_bonding_nested"))
test_xdp_bonding_nested(&skeletons);
+ if (test__start_subtest("xdp_bonding_features"))
+ test_xdp_bonding_features(&skeletons);
+
for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) {
struct bond_test_case *test_case = &bond_test_cases[i];
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index d3c1217ba79a..38a57a2e70db 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -86,6 +86,10 @@
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
+#ifndef __used
+#define __used __attribute__((used))
+#endif
+
#if defined(__TARGET_ARCH_x86)
#define SYSCALL_WRAPPER 1
#define SYS_PREFIX "__x64_"
diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c
index 50f95ec61165..76d661b20e87 100644
--- a/tools/testing/selftests/bpf/progs/cb_refs.c
+++ b/tools/testing/selftests/bpf/progs/cb_refs.c
@@ -2,6 +2,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr *ptr;
@@ -14,9 +15,6 @@ struct {
__uint(max_entries, 16);
} array_map SEC(".maps");
-extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
-
static __noinline int cb1(void *map, void *key, void *value, void *ctx)
{
void *p = *(void **)ctx;
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c
index b2a409e6382a..932b8ecd4ae3 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c
@@ -12,6 +12,7 @@ __u32 invocations = 0;
__u32 assertion_error = 0;
__u32 retval_value = 0;
__u32 ctx_retval_value = 0;
+__u32 page_size = 0;
SEC("cgroup/getsockopt")
int get_retval(struct bpf_sockopt *ctx)
@@ -20,6 +21,10 @@ int get_retval(struct bpf_sockopt *ctx)
ctx_retval_value = ctx->retval;
__sync_fetch_and_add(&invocations, 1);
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
return 1;
}
@@ -31,6 +36,10 @@ int set_eisconn(struct bpf_sockopt *ctx)
if (bpf_set_retval(-EISCONN))
assertion_error = 1;
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
return 1;
}
@@ -41,5 +50,9 @@ int clear_retval(struct bpf_sockopt *ctx)
ctx->retval = 0;
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c
index d6e5903e06ba..b7fa8804e19d 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c
@@ -11,6 +11,7 @@
__u32 invocations = 0;
__u32 assertion_error = 0;
__u32 retval_value = 0;
+__u32 page_size = 0;
SEC("cgroup/setsockopt")
int get_retval(struct bpf_sockopt *ctx)
@@ -18,6 +19,10 @@ int get_retval(struct bpf_sockopt *ctx)
retval_value = bpf_get_retval();
__sync_fetch_and_add(&invocations, 1);
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
return 1;
}
@@ -29,6 +34,10 @@ int set_eunatch(struct bpf_sockopt *ctx)
if (bpf_set_retval(-EUNATCH))
assertion_error = 1;
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
return 0;
}
@@ -40,6 +49,10 @@ int set_eisconn(struct bpf_sockopt *ctx)
if (bpf_set_retval(-EISCONN))
assertion_error = 1;
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
return 0;
}
@@ -48,5 +61,9 @@ int legacy_eperm(struct bpf_sockopt *ctx)
{
__sync_fetch_and_add(&invocations, 1);
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
index 0c5b785a93e4..b15c588ace15 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_common.h
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -28,6 +28,8 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+u32 bpf_cpumask_first_and(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
@@ -50,8 +52,8 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
-u32 bpf_cpumask_any(const struct cpumask *src) __ksym;
-u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym;
+u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
index 2fcdd7f68ac7..674a63424dee 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_success.c
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
#include "cpumask_common.h"
char _license[] SEC("license") = "GPL";
@@ -175,6 +176,38 @@ release_exit:
}
SEC("tp_btf/task_newtask")
+int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2;
+ u32 first;
+
+ if (!is_test_task())
+ return 0;
+
+ mask1 = create_cpumask();
+ if (!mask1)
+ return 0;
+
+ mask2 = create_cpumask();
+ if (!mask2)
+ goto release_exit;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+
+ first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
+ if (first <= 1)
+ err = 3;
+
+release_exit:
+ if (mask1)
+ bpf_cpumask_release(mask1);
+ if (mask2)
+ bpf_cpumask_release(mask2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
@@ -311,13 +344,13 @@ int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
bpf_cpumask_set_cpu(1, mask2);
bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
- cpu = bpf_cpumask_any(cast(mask1));
+ cpu = bpf_cpumask_any_distribute(cast(mask1));
if (cpu != 0) {
err = 6;
goto release_exit;
}
- cpu = bpf_cpumask_any(cast(dst2));
+ cpu = bpf_cpumask_any_distribute(cast(dst2));
if (cpu < nr_cpus) {
err = 7;
goto release_exit;
@@ -329,13 +362,13 @@ int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
goto release_exit;
}
- cpu = bpf_cpumask_any(cast(dst2));
+ cpu = bpf_cpumask_any_distribute(cast(dst2));
if (cpu > 1) {
err = 9;
goto release_exit;
}
- cpu = bpf_cpumask_any_and(cast(mask1), cast(mask2));
+ cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
if (cpu < nr_cpus) {
err = 10;
goto release_exit;
@@ -426,3 +459,26 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
return 0;
}
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2;
+
+ mask1 = bpf_cpumask_create();
+ mask2 = bpf_cpumask_create();
+
+ if (!mask1 || !mask2)
+ goto free_masks_return;
+
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
+
+free_masks_return:
+ if (mask1)
+ bpf_cpumask_release(mask1);
+ if (mask2)
+ bpf_cpumask_release(mask2);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 759eb5c245cd..7ce7e827d5f0 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -3,6 +3,7 @@
#include <errno.h>
#include <string.h>
+#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/if_ether.h>
@@ -1378,3 +1379,310 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
return 0;
}
+
+/* bpf_dynptr_adjust can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_adjust_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_adjust(&ptr, 1, 2);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_null can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_is_null_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_is_null(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_is_rdonly_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_is_rdonly(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_size can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_size_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_size(&ptr);
+
+ return 0;
+}
+
+/* Only initialized dynptrs can be cloned */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int clone_invalid1(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &ptr2);
+
+ return 0;
+}
+
+/* Can't overwrite an existing dynptr when cloning */
+SEC("?xdp")
+__failure __msg("cannot overwrite referenced dynptr")
+int clone_invalid2(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr clone;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr1);
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its clones */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #3")
+int clone_invalidate1(void *ctx)
+{
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its parent */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #3")
+int clone_invalidate2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its siblings */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #3")
+int clone_invalidate3(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its clones
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_data(&clone, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its parent
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate5(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+ data = bpf_dynptr_data(&ptr, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its sibling
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate6(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ data = bpf_dynptr_data(&clone1, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A skb clone's data slices should be invalid anytime packet data changes */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int clone_skb_packet_data(struct __sk_buff *skb)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ __u32 *data;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A xdp clone's data slices should be invalid anytime packet data changes */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_xdp_packet_data(struct xdp_md *xdp)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ __u32 *data;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Buffers that are provided must be sufficiently long */
+SEC("?cgroup_skb/egress")
+__failure __msg("memory, len pair leads to invalid memory access")
+int test_dynptr_skb_small_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char buffer[8] = {};
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
+
+ return !!data;
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c
index b2fa6c47ecc0..5985920d162e 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_success.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_success.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2022 Facebook */
#include <string.h>
+#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
@@ -207,3 +208,339 @@ int test_dynptr_skb_data(struct __sk_buff *skb)
return 1;
}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u32 bytes = 64;
+ __u32 off = 10;
+ __u32 trim = 15;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
+ if (err) {
+ err = 1;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes) {
+ err = 2;
+ goto done;
+ }
+
+ /* Advance the dynptr by off */
+ err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
+ if (err) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes - off) {
+ err = 4;
+ goto done;
+ }
+
+ /* Trim the dynptr */
+ err = bpf_dynptr_adjust(&ptr, off, 15);
+ if (err) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that the size was adjusted correctly */
+ if (bpf_dynptr_size(&ptr) != trim - off) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust_err(void *ctx)
+{
+ char write_data[45] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+ __u32 off = 20;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* Check that start can't be greater than end */
+ if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
+ err = 2;
+ goto done;
+ }
+
+ /* Check that start can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
+ err = 3;
+ goto done;
+ }
+
+ /* Check that end can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
+ err = 4;
+ goto done;
+ }
+
+ if (bpf_dynptr_adjust(&ptr, off, size)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that you can't write more bytes than available into the dynptr
+ * after you've adjusted it
+ */
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 6;
+ goto done;
+ }
+
+ /* Check that even after adjusting, submitting/discarding
+ * a ringbuf dynptr works
+ */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_zero_size_dynptr(void *ctx)
+{
+ char write_data = 'x', read_data;
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* After this, the dynptr has a size of 0 */
+ if (bpf_dynptr_adjust(&ptr, size, size)) {
+ err = 2;
+ goto done;
+ }
+
+ /* Test that reading + writing non-zero bytes is not ok */
+ if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 4;
+ goto done;
+ }
+
+ /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
+ if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
+ err = 5;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
+ err = 6;
+ goto done;
+ }
+
+ err = 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_is_null(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u64 size = 4;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ goto exit_early;
+ }
+
+ /* Test that the invalid dynptr is null */
+ if (!bpf_dynptr_is_null(&ptr1)) {
+ err = 2;
+ goto exit_early;
+ }
+
+ /* Get a valid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
+ err = 3;
+ goto exit;
+ }
+
+ /* Test that the valid dynptr is not null */
+ if (bpf_dynptr_is_null(&ptr2)) {
+ err = 4;
+ goto exit;
+ }
+
+exit:
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+exit_early:
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_is_rdonly(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ struct bpf_dynptr ptr3;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ return 0;
+ }
+
+ /* Test that an invalid dynptr is_rdonly returns false */
+ if (bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 2;
+ return 0;
+ }
+
+ /* Get a read-only dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (!bpf_dynptr_is_rdonly(&ptr2)) {
+ err = 4;
+ return 0;
+ }
+
+ /* Get a read-writeable dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (bpf_dynptr_is_rdonly(&ptr3)) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr3, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_clone(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u32 off = 2, size;
+
+ /* Get a dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
+ err = 1;
+ return 0;
+ }
+
+ if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
+ err = 2;
+ return 0;
+ }
+
+ /* Clone the dynptr */
+ if (bpf_dynptr_clone(&ptr1, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ size = bpf_dynptr_size(&ptr1);
+
+ /* Check that the clone has the same size and rd-only */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 4;
+ return 0;
+ }
+
+ if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 5;
+ return 0;
+ }
+
+ /* Advance and trim the original dynptr */
+ bpf_dynptr_adjust(&ptr1, 5, 5);
+
+ /* Check that only original dynptr was affected, and the clone wasn't */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 6;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_no_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
+
+ return !!data;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_strcmp(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
+ if (data) {
+ bpf_strncmp(data, 10, "foo");
+ return 1;
+ }
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index be16143ae292..6b9b3c56f009 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -651,29 +651,25 @@ int iter_stack_array_loop(const void *ctx)
return sum;
}
-#define ARR_SZ 16
-
-static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul)
+static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
{
- int *t;
- __u64 i;
+ int *t, i;
while ((t = bpf_iter_num_next(it))) {
i = *t;
- if (i >= ARR_SZ)
+ if (i >= n)
break;
arr[i] = i * mul;
}
}
-static __noinline int sum(struct bpf_iter_num *it, int *arr)
+static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
{
- int *t, sum = 0;;
- __u64 i;
+ int *t, i, sum = 0;;
while ((t = bpf_iter_num_next(it))) {
i = *t;
- if (i >= ARR_SZ)
+ if (i >= n)
break;
sum += arr[i];
}
@@ -685,7 +681,7 @@ SEC("raw_tp")
__success
int iter_pass_iter_ptr_to_subprog(const void *ctx)
{
- int arr1[ARR_SZ], arr2[ARR_SZ];
+ int arr1[16], arr2[32];
struct bpf_iter_num it;
int n, sum1, sum2;
@@ -694,25 +690,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx)
/* fill arr1 */
n = ARRAY_SIZE(arr1);
bpf_iter_num_new(&it, 0, n);
- fill(&it, arr1, 2);
+ fill(&it, arr1, n, 2);
bpf_iter_num_destroy(&it);
/* fill arr2 */
n = ARRAY_SIZE(arr2);
bpf_iter_num_new(&it, 0, n);
- fill(&it, arr2, 10);
+ fill(&it, arr2, n, 10);
bpf_iter_num_destroy(&it);
/* sum arr1 */
n = ARRAY_SIZE(arr1);
bpf_iter_num_new(&it, 0, n);
- sum1 = sum(&it, arr1);
+ sum1 = sum(&it, arr1, n);
bpf_iter_num_destroy(&it);
/* sum arr2 */
n = ARRAY_SIZE(arr2);
bpf_iter_num_new(&it, 0, n);
- sum2 = sum(&it, arr2);
+ sum2 = sum(&it, arr2, n);
bpf_iter_num_destroy(&it);
bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
index 13f00ca2ed0a..f9789e668297 100644
--- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c
+++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
@@ -3,13 +3,11 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
static struct prog_test_ref_kfunc __kptr *v;
long total_sum = -1;
-extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
-
SEC("tc")
int test_jit_probe_mem(struct __sk_buff *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
index 767472bc5a97..7632d9ecb253 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-
-extern void bpf_kfunc_call_test_destructive(void) __ksym;
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_destructive_test(void)
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
index b98313d391c6..4b0b7b79cdfb 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
@@ -2,14 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-
-extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
-extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
-extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
-extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
-extern int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
-extern void bpf_kfunc_call_int_mem_release(int *p) __ksym;
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct syscall_test_args {
__u8 data[16];
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
index 4e8fed75a4e0..d532af07decf 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_race.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-
-extern void bpf_testmod_test_mod_kfunc(int i) __ksym;
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_fail(struct __sk_buff *ctx)
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index 7daa8f5720b9..cf68d1e48a0f 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -2,22 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-
-extern long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym;
-extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
- __u32 c, __u64 d) __ksym;
-
-extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
-extern void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym;
-extern void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym;
-extern void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym;
-extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
-extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
-extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
-extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
-extern u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym;
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_test4(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
index c1fdecabeabf..2380c75e74ce 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
@@ -1,13 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include "bpf_tcp_helpers.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
extern const int bpf_prog_active __ksym;
-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
- __u32 c, __u64 d) __ksym;
-extern struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym;
int active_res = -1;
int sk_state_res = -1;
@@ -28,7 +23,7 @@ int __noinline f1(struct __sk_buff *skb)
if (active)
active_res = *active;
- sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->sk_state;
+ sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state;
return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
}
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
index 0ef286da092b..06838083079c 100644
--- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -5,7 +5,8 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
-#include "bpf_experimental.h"
+#include "../bpf_experimental.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct node_data {
long key;
@@ -32,8 +33,6 @@ struct map_value {
*/
struct node_data *just_here_because_btf_bug;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
index d7150041e5d1..da30f0d59364 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -2,6 +2,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
@@ -114,10 +115,6 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
-extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
-void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym;
-
#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
static void test_kptr_unref(struct map_value *v)
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
index da8c724f839b..450bb373b179 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -4,6 +4,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct map_value {
char buf[8];
@@ -19,9 +20,6 @@ struct array_map {
__uint(max_entries, 1);
} array_map SEC(".maps");
-extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
-
SEC("?tc")
__failure __msg("kptr access size must be BPF_DW")
int size_not_bpf_dw(struct __sk_buff *ctx)
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index 1d348a225140..a3da610b1e6b 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -375,6 +375,8 @@ long rbtree_refcounted_node_ref_escapes(void *ctx)
bpf_rbtree_add(&aroot, &n->node, less_a);
m = bpf_refcount_acquire(n);
bpf_spin_unlock(&alock);
+ if (!m)
+ return 2;
m->key = 2;
bpf_obj_drop(m);
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index efcb308f80ad..0b09e5c915b1 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -29,7 +29,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=3 alloc_insn=21")
+__failure __msg("Unreleased reference id=4 alloc_insn=21")
long rbtree_refcounted_node_ref_escapes(void *ctx)
{
struct node_acquire *n, *m;
@@ -43,6 +43,8 @@ long rbtree_refcounted_node_ref_escapes(void *ctx)
/* m becomes an owning ref but is never drop'd or added to a tree */
m = bpf_refcount_acquire(n);
bpf_spin_unlock(&glock);
+ if (!m)
+ return 2;
m->key = 2;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c
new file mode 100644
index 000000000000..9e0bf7a54cec
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include "bpf_tracing_net.h"
+
+__be16 serv_port = 0;
+
+int bpf_sock_destroy(struct sock_common *sk) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} tcp_conn_sockets SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} udp_conn_sockets SEC(".maps");
+
+SEC("cgroup/connect6")
+int sock_connect(struct bpf_sock_addr *ctx)
+{
+ __u64 sock_cookie = 0;
+ int key = 0;
+ __u32 keyc = 0;
+
+ if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
+ return 1;
+
+ sock_cookie = bpf_get_socket_cookie(ctx);
+ if (ctx->protocol == IPPROTO_TCP)
+ bpf_map_update_elem(&tcp_conn_sockets, &key, &sock_cookie, 0);
+ else if (ctx->protocol == IPPROTO_UDP)
+ bpf_map_update_elem(&udp_conn_sockets, &keyc, &sock_cookie, 0);
+ else
+ return 1;
+
+ return 1;
+}
+
+SEC("iter/tcp")
+int iter_tcp6_client(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ __u64 sock_cookie = 0;
+ __u64 *val;
+ int key = 0;
+
+ if (!sk_common)
+ return 0;
+
+ if (sk_common->skc_family != AF_INET6)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk_common);
+ val = bpf_map_lookup_elem(&tcp_conn_sockets, &key);
+ if (!val)
+ return 0;
+ /* Destroy connected client sockets. */
+ if (sock_cookie == *val)
+ bpf_sock_destroy(sk_common);
+
+ return 0;
+}
+
+SEC("iter/tcp")
+int iter_tcp6_server(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ const struct inet_connection_sock *icsk;
+ const struct inet_sock *inet;
+ struct tcp6_sock *tcp_sk;
+ __be16 srcp;
+
+ if (!sk_common)
+ return 0;
+
+ if (sk_common->skc_family != AF_INET6)
+ return 0;
+
+ tcp_sk = bpf_skc_to_tcp6_sock(sk_common);
+ if (!tcp_sk)
+ return 0;
+
+ icsk = &tcp_sk->tcp.inet_conn;
+ inet = &icsk->icsk_inet;
+ srcp = inet->inet_sport;
+
+ /* Destroy server sockets. */
+ if (srcp == serv_port)
+ bpf_sock_destroy(sk_common);
+
+ return 0;
+}
+
+
+SEC("iter/udp")
+int iter_udp6_client(struct bpf_iter__udp *ctx)
+{
+ struct udp_sock *udp_sk = ctx->udp_sk;
+ struct sock *sk = (struct sock *) udp_sk;
+ __u64 sock_cookie = 0, *val;
+ int key = 0;
+
+ if (!sk)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk);
+ val = bpf_map_lookup_elem(&udp_conn_sockets, &key);
+ if (!val)
+ return 0;
+ /* Destroy connected client sockets. */
+ if (sock_cookie == *val)
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
+SEC("iter/udp")
+int iter_udp6_server(struct bpf_iter__udp *ctx)
+{
+ struct udp_sock *udp_sk = ctx->udp_sk;
+ struct sock *sk = (struct sock *) udp_sk;
+ struct inet_sock *inet;
+ __be16 srcp;
+
+ if (!sk)
+ return 0;
+
+ inet = &udp_sk->inet;
+ srcp = inet->inet_sport;
+ if (srcp == serv_port)
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c
new file mode 100644
index 000000000000..dd6850b58e25
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int bpf_sock_destroy(struct sock_common *sk) __ksym;
+
+SEC("tp_btf/tcp_destroy_sock")
+__failure __msg("calling kernel function bpf_sock_destroy is not allowed")
+int BPF_PROG(trace_tcp_destroy_sock, struct sock *sk)
+{
+ /* should not load */
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c
index 9fb241b97291..c8f59caa4639 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c
@@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL";
#define CUSTOM_INHERIT2 1
#define CUSTOM_LISTENER 2
+__u32 page_size = 0;
+
struct sockopt_inherit {
__u8 val;
};
@@ -55,7 +57,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
- return 1; /* only interested in SOL_CUSTOM */
+ goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -70,6 +72,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -80,7 +88,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
- return 1; /* only interested in SOL_CUSTOM */
+ goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -93,4 +101,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
ctx->optlen = -1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c
index 177a59069dae..96f29fce050b 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c
@@ -5,6 +5,8 @@
char _license[] SEC("license") = "GPL";
+__u32 page_size = 0;
+
SEC("cgroup/getsockopt")
int _getsockopt_child(struct bpf_sockopt *ctx)
{
@@ -12,7 +14,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -26,6 +28,12 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/getsockopt")
@@ -35,7 +43,7 @@ int _getsockopt_parent(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -49,6 +57,12 @@ int _getsockopt_parent(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -58,7 +72,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -67,4 +81,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c
index 1bce83b6e3a7..dbe235ede7f3 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c
@@ -9,6 +9,8 @@
char _license[] SEC("license") = "GPL";
+__u32 page_size = 0;
+
SEC("cgroup/setsockopt")
int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
{
@@ -19,7 +21,7 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
char cc_cubic[TCP_CA_NAME_MAX] = "cubic";
if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -36,4 +38,10 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
return 0;
}
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
index fe1df4cd206e..cb990a7d3d45 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
@@ -37,7 +37,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
/* Bypass AF_NETLINK. */
sk = ctx->sk;
if (sk && sk->family == AF_NETLINK)
- return 1;
+ goto out;
/* Make sure bpf_get_netns_cookie is callable.
*/
@@ -52,8 +52,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
- return 1;
+ goto out;
}
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
@@ -61,7 +60,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- return 1;
+ goto out;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) {
@@ -69,7 +68,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- return 1;
+ goto out;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) {
@@ -85,7 +84,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
if (((struct tcp_zerocopy_receive *)optval)->address != 0)
return 0; /* unexpected data */
- return 1;
+ goto out;
}
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
@@ -129,6 +128,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -142,7 +147,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
/* Bypass AF_NETLINK. */
sk = ctx->sk;
if (sk && sk->family == AF_NETLINK)
- return 1;
+ goto out;
/* Make sure bpf_get_netns_cookie is callable.
*/
@@ -224,4 +229,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
*/
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index b85fc8c423ba..17a9f59bf5f3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -10,6 +10,8 @@
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
{
+ asm volatile ("");
+
return skb->len;
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
new file mode 100644
index 000000000000..2588f2384246
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* rodata section */
+const volatile pid_t pid;
+const volatile size_t bss_array_len;
+const volatile size_t data_array_len;
+
+/* bss section */
+int sum = 0;
+int array[1];
+
+/* custom data secton */
+int my_array[1] SEC(".data.custom");
+
+/* custom data section which should NOT be resizable,
+ * since it contains a single var which is not an array
+ */
+int my_int SEC(".data.non_array");
+
+/* custom data section which should NOT be resizable,
+ * since its last var is not an array
+ */
+int my_array_first[1] SEC(".data.array_not_last");
+int my_int_last SEC(".data.array_not_last");
+
+SEC("tp/syscalls/sys_enter_getpid")
+int bss_array_sum(void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ sum = 0;
+
+ for (size_t i = 0; i < bss_array_len; ++i)
+ sum += array[i];
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int data_array_sum(void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ sum = 0;
+
+ for (size_t i = 0; i < data_array_len; ++i)
+ sum += my_array[i];
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c
index bbad3c2d9aa5..f75e531bf36f 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c
@@ -265,7 +265,10 @@ static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk)
static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk)
{
- __u16 *half = (__u16 *)&sk->dst_port;
+ __u16 *half;
+
+ asm volatile ("");
+ half = (__u16 *)&sk->dst_port;
return half[0] == bpf_htons(0xcafe);
}
diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
new file mode 100644
index 000000000000..56cdc0a553f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+const volatile int local_pid;
+const volatile __u64 cgid;
+int remote_pid;
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags)
+{
+ struct cgroup *cgrp = NULL;
+ struct task_struct *acquired;
+
+ if (local_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+
+ if (local_pid == acquired->tgid)
+ goto out;
+
+ cgrp = bpf_cgroup_from_id(cgid);
+ if (!cgrp)
+ goto out;
+
+ if (bpf_task_under_cgroup(acquired, cgrp))
+ remote_pid = acquired->tgid;
+
+out:
+ if (cgrp)
+ bpf_cgroup_release(cgrp);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
index 25ee4a22e48d..78c368e71797 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2022 Meta */
#include <stddef.h>
#include <string.h>
+#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
new file mode 100644
index 000000000000..13b29a7faa71
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Check that precision marks propagate through scalar IDs.
+ * Registers r{0,1,2} have the same scalar ID at the moment when r0 is
+ * marked to be precise, this mark is immediately propagated to r{1,2}.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10")
+__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
+__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_same_state(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ /* force r0 to be precise, this immediately marks r1 and r2 as
+ * precise as well because of shared IDs
+ */
+ "r3 = r10;"
+ "r3 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Same as precision_same_state, but mark propagates through state /
+ * parent state boundary.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1")
+__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10")
+__msg("frame0: parent state regs=r0,r1,r2 stack=:")
+__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
+__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
+__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__msg("frame0: parent state regs=r0 stack=:")
+__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_cross_state(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ /* force checkpoint */
+ "goto +0;"
+ /* force r0 to be precise, this immediately marks r1 and r2 as
+ * precise as well because of shared IDs
+ */
+ "r3 = r10;"
+ "r3 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Same as precision_same_state, but break one of the
+ * links, note that r1 is absent from regs=... in __msg below.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10")
+__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0")
+__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0")
+__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_same_state_broken_link(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ /* break link for r1, this is the only line that differs
+ * compared to the previous test
+ */
+ "r1 = 0;"
+ /* force r0 to be precise, this immediately marks r1 and r2 as
+ * precise as well because of shared IDs
+ */
+ "r3 = r10;"
+ "r3 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Same as precision_same_state_broken_link, but with state /
+ * parent state boundary.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10")
+__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0")
+__msg("frame0: parent state regs=r0,r2 stack=:")
+__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
+__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
+__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__msg("frame0: parent state regs=r0 stack=:")
+__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_cross_state_broken_link(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r2.id */
+ "r1 = r0;"
+ "r2 = r0;"
+ /* force checkpoint, although link between r1 and r{0,2} is
+ * broken by the next statement current precision tracking
+ * algorithm can't react to it and propagates mark for r1 to
+ * the parent state.
+ */
+ "goto +0;"
+ /* break link for r1, this is the only line that differs
+ * compared to precision_cross_state()
+ */
+ "r1 = 0;"
+ /* force r0 to be precise, this immediately marks r1 and r2 as
+ * precise as well because of shared IDs
+ */
+ "r3 = r10;"
+ "r3 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that precision marks propagate through scalar IDs.
+ * Use the same scalar ID in multiple stack frames, check that
+ * precision information is propagated up the call stack.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (0f) r2 += r1")
+/* Current state */
+__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1")
+__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10")
+__msg("frame2: parent state regs=r1 stack=")
+/* frame1.r{6,7} are marked because mark_precise_scalar_ids()
+ * looks for all registers with frame2.r1.id in the current state
+ */
+__msg("frame1: parent state regs=r6,r7 stack=")
+__msg("frame0: parent state regs=r6 stack=")
+/* Parent state */
+__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
+__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
+/* frame1.r1 is marked because of backtracking of call instruction */
+__msg("frame1: parent state regs=r1,r6,r7 stack=")
+__msg("frame0: parent state regs=r6 stack=")
+/* Parent state */
+__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
+__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
+__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
+__msg("frame1: parent state regs=r1 stack=")
+__msg("frame0: parent state regs=r6 stack=")
+/* Parent state */
+__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
+__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
+__msg("frame0: parent state regs=r1,r6 stack=")
+/* Parent state */
+__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
+__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0")
+__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_many_frames(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == r6.id */
+ "r1 = r0;"
+ "r6 = r0;"
+ "call precision_many_frames__foo;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+static __naked __noinline __used
+void precision_many_frames__foo(void)
+{
+ asm volatile (
+ /* conflate one of the register numbers (r6) with outer frame,
+ * to verify that those are tracked independently
+ */
+ "r6 = r1;"
+ "r7 = r1;"
+ "call precision_many_frames__bar;"
+ "exit"
+ ::: __clobber_all);
+}
+
+static __naked __noinline __used
+void precision_many_frames__bar(void)
+{
+ asm volatile (
+ /* force r1 to be precise, this immediately marks:
+ * - bar frame r1
+ * - foo frame r{1,6,7}
+ * - main frame r{1,6}
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Check that scalars with the same IDs are marked precise on stack as
+ * well as in registers.
+ */
+SEC("socket")
+__success __log_level(2)
+/* foo frame */
+__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10")
+__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
+__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
+__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
+/* main frame */
+__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
+__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_stack(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id == fp[-8].id */
+ "r1 = r0;"
+ "*(u64*)(r10 - 8) = r1;"
+ "call precision_stack__foo;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+static __naked __noinline __used
+void precision_stack__foo(void)
+{
+ asm volatile (
+ /* conflate one of the register numbers (r6) with outer frame,
+ * to verify that those are tracked independently
+ */
+ "*(u64*)(r10 - 8) = r1;"
+ "*(u64*)(r10 - 16) = r1;"
+ /* force r1 to be precise, this immediately marks:
+ * - foo frame r1,fp{-8,-16}
+ * - main frame r1,fp{-8}
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "exit"
+ ::: __clobber_all);
+}
+
+/* Use two separate scalar IDs to check that these are propagated
+ * independently.
+ */
+SEC("socket")
+__success __log_level(2)
+/* r{6,7} */
+__msg("11: (0f) r3 += r7")
+__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10")
+/* ... skip some insns ... */
+__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
+__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
+/* r{8,9} */
+__msg("12: (0f) r3 += r9")
+__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7")
+/* ... skip some insns ... */
+__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
+__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void precision_two_ids(void)
+{
+ asm volatile (
+ /* r6 = random number up to 0xff
+ * r6.id == r7.id
+ */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r6 = r0;"
+ "r7 = r0;"
+ /* same, but for r{8,9} */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r8 = r0;"
+ "r9 = r0;"
+ /* clear r0 id */
+ "r0 = 0;"
+ /* force checkpoint */
+ "goto +0;"
+ "r3 = r10;"
+ /* force r7 to be precise, this also marks r6 */
+ "r3 += r7;"
+ /* force r9 to be precise, this also marks r8 */
+ "r3 += r9;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Verify that check_ids() is used by regsafe() for scalars.
+ *
+ * r9 = ... some pointer with range X ...
+ * r6 = ... unbound scalar ID=a ...
+ * r7 = ... unbound scalar ID=b ...
+ * if (r6 > r7) goto +1
+ * r7 = r6
+ * if (r7 > X) goto exit
+ * r9 += r6
+ * ... access memory using r9 ...
+ *
+ * The memory access is safe only if r7 is bounded,
+ * which is true for one branch and not true for another.
+ */
+SEC("socket")
+__failure __msg("register with unbounded min value")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_ids_in_regsafe(void)
+{
+ asm volatile (
+ /* Bump allocated stack */
+ "r1 = 0;"
+ "*(u64*)(r10 - 8) = r1;"
+ /* r9 = pointer to stack */
+ "r9 = r10;"
+ "r9 += -8;"
+ /* r7 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r7 = r0;"
+ /* r6 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ /* if r6 > r7 is an unpredictable jump */
+ "if r6 > r7 goto l1_%=;"
+ "r7 = r6;"
+"l1_%=:"
+ /* if r7 > 4 ...; transfers range to r6 on one execution path
+ * but does not transfer on another
+ */
+ "if r7 > 4 goto l2_%=;"
+ /* Access memory at r9[r6], r6 is not always bounded */
+ "r9 += r6;"
+ "r0 = *(u8*)(r9 + 0);"
+"l2_%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Similar to check_ids_in_regsafe.
+ * The l0 could be reached in two states:
+ *
+ * (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
+ * (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
+ *
+ * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
+ * This example would be considered safe without changes to
+ * mark_chain_precision() to track scalar values with equal IDs.
+ */
+SEC("socket")
+__failure __msg("register with unbounded min value")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_ids_in_regsafe_2(void)
+{
+ asm volatile (
+ /* Bump allocated stack */
+ "r1 = 0;"
+ "*(u64*)(r10 - 8) = r1;"
+ /* r9 = pointer to stack */
+ "r9 = r10;"
+ "r9 += -8;"
+ /* r8 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r8 = r0;"
+ /* r7 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r7 = r0;"
+ /* r6 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ /* scratch .id from r0 */
+ "r0 = 0;"
+ /* if r6 > r7 is an unpredictable jump */
+ "if r6 > r7 goto l1_%=;"
+ /* tie r6 and r7 .id */
+ "r6 = r7;"
+"l0_%=:"
+ /* if r7 > 4 exit(0) */
+ "if r7 > 4 goto l2_%=;"
+ /* Access memory at r9[r6] */
+ "r9 += r6;"
+ "r0 = *(u8*)(r9 + 0);"
+"l2_%=:"
+ "r0 = 0;"
+ "exit;"
+"l1_%=:"
+ /* tie r6 and r8 .id */
+ "r6 = r8;"
+ "goto l0_%=;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that scalar IDs *are not* generated on register to register
+ * assignments if source register is a constant.
+ *
+ * If such IDs *are* generated the 'l1' below would be reached in
+ * two states:
+ *
+ * (1) r1{.id=A}, r2{.id=A}
+ * (2) r1{.id=C}, r2{.id=C}
+ *
+ * Thus forcing 'if r1 == r2' verification twice.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (1d) if r3 == r4 goto pc+0")
+__msg("frame 0: propagating r3,r4")
+__msg("11: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void no_scalar_id_for_const(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ /* unpredictable jump */
+ "if r0 > 7 goto l0_%=;"
+ /* possibly generate same scalar ids for r3 and r4 */
+ "r1 = 0;"
+ "r1 = r1;"
+ "r3 = r1;"
+ "r4 = r1;"
+ "goto l1_%=;"
+"l0_%=:"
+ /* possibly generate different scalar ids for r3 and r4 */
+ "r1 = 0;"
+ "r2 = 0;"
+ "r3 = r1;"
+ "r4 = r2;"
+"l1_%=:"
+ /* predictable jump, marks r3 and r4 precise */
+ "if r3 == r4 goto +0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Same as no_scalar_id_for_const() but for 32-bit values */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (1e) if w3 == w4 goto pc+0")
+__msg("frame 0: propagating r3,r4")
+__msg("11: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void no_scalar_id_for_const32(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ /* unpredictable jump */
+ "if r0 > 7 goto l0_%=;"
+ /* possibly generate same scalar ids for r3 and r4 */
+ "w1 = 0;"
+ "w1 = w1;"
+ "w3 = w1;"
+ "w4 = w1;"
+ "goto l1_%=;"
+"l0_%=:"
+ /* possibly generate different scalar ids for r3 and r4 */
+ "w1 = 0;"
+ "w2 = 0;"
+ "w3 = w1;"
+ "w4 = w2;"
+"l1_%=:"
+ /* predictable jump, marks r1 and r2 precise */
+ "if w3 == w4 goto +0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that unique scalar IDs are ignored when new verifier state is
+ * compared to cached verifier state. For this test:
+ * - cached state has no id on r1
+ * - new state has a unique id on r1
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("6: (25) if r6 > 0x7 goto pc+1")
+__msg("7: (57) r1 &= 255")
+__msg("8: (bf) r2 = r10")
+__msg("from 6 to 8: safe")
+__msg("processed 12 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void ignore_unique_scalar_ids_cur(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* r1.id == r0.id */
+ "r1 = r0;"
+ /* make r1.id unique */
+ "r0 = 0;"
+ "if r6 > 7 goto l0_%=;"
+ /* clear r1 id, but keep the range compatible */
+ "r1 &= 0xff;"
+"l0_%=:"
+ /* get here in two states:
+ * - first: r1 has no id (cached state)
+ * - second: r1 has a unique id (should be considered equivalent)
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that unique scalar IDs are ignored when new verifier state is
+ * compared to cached verifier state. For this test:
+ * - cached state has a unique id on r1
+ * - new state has no id on r1
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("6: (25) if r6 > 0x7 goto pc+1")
+__msg("7: (05) goto pc+1")
+__msg("9: (bf) r2 = r10")
+__msg("9: safe")
+__msg("processed 13 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void ignore_unique_scalar_ids_old(void)
+{
+ asm volatile (
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* r1.id == r0.id */
+ "r1 = r0;"
+ /* make r1.id unique */
+ "r0 = 0;"
+ "if r6 > 7 goto l1_%=;"
+ "goto l0_%=;"
+"l1_%=:"
+ /* clear r1 id, but keep the range compatible */
+ "r1 &= 0xff;"
+"l0_%=:"
+ /* get here in two states:
+ * - first: r1 has a unique id (cached state)
+ * - second: r1 has no id (should be considered equivalent)
+ */
+ "r2 = r10;"
+ "r2 += r1;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* Check that two different scalar IDs in a verified state can't be
+ * mapped to the same scalar ID in current state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* The exit instruction should be reachable from two states,
+ * use two matches and "processed .. insns" to ensure this.
+ */
+__msg("13: (95) exit")
+__msg("13: (95) exit")
+__msg("processed 18 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void two_old_ids_one_cur_id(void)
+{
+ asm volatile (
+ /* Give unique scalar IDs to r{6,7} */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ "r7 = r0;"
+ "r0 = 0;"
+ /* Maybe make r{6,7} IDs identical */
+ "if r6 > r7 goto l0_%=;"
+ "goto l1_%=;"
+"l0_%=:"
+ "r6 = r7;"
+"l1_%=:"
+ /* Mark r{6,7} precise.
+ * Get here in two states:
+ * - first: r6{.id=A}, r7{.id=B} (cached state)
+ * - second: r6{.id=A}, r7{.id=A}
+ * Currently we don't want to consider such states equivalent.
+ * Thus "exit;" would be verified twice.
+ */
+ "r2 = r10;"
+ "r2 += r6;"
+ "r2 += r7;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
new file mode 100644
index 000000000000..db6b3143338b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
+
+int vals[] SEC(".data.vals") = {1, 2, 3, 4};
+
+__naked __noinline __used
+static unsigned long identity_subprog()
+{
+ /* the simplest *static* 64-bit identity function */
+ asm volatile (
+ "r0 = r1;"
+ "exit;"
+ );
+}
+
+__noinline __used
+unsigned long global_identity_subprog(__u64 x)
+{
+ /* the simplest *global* 64-bit identity function */
+ return x;
+}
+
+__naked __noinline __used
+static unsigned long callback_subprog()
+{
+ /* the simplest callback function */
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int subprog_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ /* pass r6 through r1 into subprog to get it back as r0;
+ * this whole chain will have to be marked as precise later
+ */
+ "r1 = r6;"
+ "call identity_subprog;"
+ /* now use subprog's returned value (which is a
+ * r6 -> r1 -> r0 chain), as index into vals array, forcing
+ * all of that to be known precisely
+ */
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* here r0->r1->r6 chain is forced to be precise and has to be
+ * propagated back to the beginning, including through the
+ * subprog call
+ */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 9 first_idx 0")
+__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
+__naked int global_subprog_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ /* pass r6 through r1 into subprog to get it back as r0;
+ * given global_identity_subprog is global, precision won't
+ * propagate all the way back to r6
+ */
+ "r1 = r6;"
+ "call global_identity_subprog;"
+ /* now use subprog's returned value (which is unknown now, so
+ * we need to clamp it), as index into vals array, forcing r0
+ * to be marked precise (with no effect on r6, though)
+ */
+ "if r0 < %[vals_arr_sz] goto 1f;"
+ "r0 = %[vals_arr_sz] - 1;"
+ "1:"
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* here r0 is forced to be precise and has to be
+ * propagated back to the global subprog call, but it
+ * shouldn't go all the way to mark r6 as precise
+ */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_const(vals_arr_sz, ARRAY_SIZE(vals))
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("14: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 14 first_idx 10")
+__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
+__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
+__msg("mark_precise: frame0: parent state regs=r0 stack=:")
+__msg("mark_precise: frame0: last_idx 18 first_idx 0")
+__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
+__naked int callback_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and use result; r0 shouldn't propagate back to
+ * callback_subprog
+ */
+ "r1 = r6;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r6 = r0;"
+ "if r6 > 3 goto 1f;"
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the bpf_loop() call, but not beyond
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "1:"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 7 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call identity_subprog;"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 7 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise_global(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call global_identity_subprog;"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("12: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 12 first_idx 10")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
+__msg("mark_precise: frame0: parent state regs=r6 stack=:")
+__msg("mark_precise: frame0: last_idx 16 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise_with_callback(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 1;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) callback call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 6")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 13 first_idx 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_stack_slot_precise(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call identity_subprog;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 6")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 5 first_idx 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_stack_slot_precise_global(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call global_identity_subprog;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("14: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 14 first_idx 11")
+__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 18 first_idx 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+__naked int parent_stack_slot_precise_with_callback(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* ensure we have callback frame in jump history */
+ "r1 = r6;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+__noinline __used
+static __u64 subprog_with_precise_arg(__u64 x)
+{
+ return vals[x]; /* x is forced to be precise */
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("8: (0f) r2 += r1")
+__msg("mark_precise: frame1: last_idx 8 first_idx 0")
+__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
+__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
+__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
+__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
+__naked int subprog_arg_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ "r1 = r6;"
+ /* subprog_with_precise_arg expects its argument to be
+ * precise, so r1->r6 will be marked precise from inside the
+ * subprog
+ */
+ "call subprog_with_precise_arg;"
+ "r0 += r6;"
+ "exit;"
+ :
+ :
+ : __clobber_common, "r6"
+ );
+}
+
+/* r1 is pointer to stack slot;
+ * r2 is a register to spill into that slot
+ * subprog also spills r2 into its own stack slot
+ */
+__naked __noinline __used
+static __u64 subprog_spill_reg_precise(void)
+{
+ asm volatile (
+ /* spill to parent stack */
+ "*(u64 *)(r1 + 0) = r2;"
+ /* spill to subprog stack (we use -16 offset to avoid
+ * accidental confusion with parent's -8 stack slot in
+ * verifier log output)
+ */
+ "*(u64 *)(r10 - 16) = r2;"
+ /* use both spills as return result to propagete precision everywhere */
+ "r0 = *(u64 *)(r10 - 16);"
+ "r2 = *(u64 *)(r1 + 0);"
+ "r0 += r2;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+/* precision backtracking can't currently handle stack access not through r10,
+ * so we won't be able to mark stack slot fp-8 as precise, and so will
+ * fallback to forcing all as precise
+ */
+__msg("mark_precise: frame0: falling back to forcing all scalars precise")
+__naked int subprog_spill_into_parent_stack_slot_precise(void)
+{
+ asm volatile (
+ "r6 = 1;"
+
+ /* pass pointer to stack slot and r6 to subprog;
+ * r6 will be marked precise and spilled into fp-8 slot, which
+ * also should be marked precise
+ */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = r6;"
+ "call subprog_spill_reg_precise;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r7 = *(u64 *)(r10 - 8);"
+
+ "r7 *= 4;"
+ "r1 = %[vals];"
+ /* here r7 is forced to be precise and has to be propagated
+ * back to the beginning, handling subprog call and logic
+ */
+ "r1 += r7;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6", "r7"
+ );
+}
+
+__naked __noinline __used
+static __u64 subprog_with_checkpoint(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ /* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */
+ "goto +0;"
+ "exit;"
+ );
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c
new file mode 100644
index 000000000000..bcfb6feb38c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/pkt_cls.h>
+#include <stdbool.h>
+
+int lookup_status;
+bool test_xdp;
+bool tcp_skc;
+
+#define CUR_NS BPF_F_CURRENT_NETNS
+
+static void socket_lookup(void *ctx, void *data_end, void *data)
+{
+ struct ethhdr *eth = data;
+ struct bpf_sock_tuple *tp;
+ struct bpf_sock *sk;
+ struct iphdr *iph;
+ int tplen;
+
+ if (eth + 1 > data_end)
+ return;
+
+ if (eth->h_proto != bpf_htons(ETH_P_IP))
+ return;
+
+ iph = (struct iphdr *)(eth + 1);
+ if (iph + 1 > data_end)
+ return;
+
+ tp = (struct bpf_sock_tuple *)&iph->saddr;
+ tplen = sizeof(tp->ipv4);
+ if ((void *)tp + tplen > data_end)
+ return;
+
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ if (tcp_skc)
+ sk = bpf_skc_lookup_tcp(ctx, tp, tplen, CUR_NS, 0);
+ else
+ sk = bpf_sk_lookup_tcp(ctx, tp, tplen, CUR_NS, 0);
+ break;
+ case IPPROTO_UDP:
+ sk = bpf_sk_lookup_udp(ctx, tp, tplen, CUR_NS, 0);
+ break;
+ default:
+ return;
+ }
+
+ lookup_status = 0;
+
+ if (sk) {
+ bpf_sk_release(sk);
+ lookup_status = 1;
+ }
+}
+
+SEC("tc")
+int tc_socket_lookup(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+
+ if (test_xdp)
+ return TC_ACT_UNSPEC;
+
+ socket_lookup(skb, data_end, data);
+ return TC_ACT_UNSPEC;
+}
+
+SEC("xdp")
+int xdp_socket_lookup(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+
+ if (!test_xdp)
+ return XDP_PASS;
+
+ socket_lookup(xdp, data_end, data);
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
index e1c787815e44..b2dfd7066c6e 100644
--- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
@@ -77,7 +77,9 @@ int rx(struct xdp_md *ctx)
}
err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp);
- if (err)
+ if (!err)
+ meta->xdp_timestamp = bpf_ktime_get_tai_ns();
+ else
meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index ea82921110da..4d582cac2c09 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -11,7 +11,6 @@
#include <signal.h>
#include <string.h>
#include <execinfo.h> /* backtrace */
-#include <linux/membarrier.h>
#include <sys/sysinfo.h> /* get_nprocs */
#include <netinet/in.h>
#include <sys/select.h>
@@ -629,68 +628,6 @@ out:
return err;
}
-static int finit_module(int fd, const char *param_values, int flags)
-{
- return syscall(__NR_finit_module, fd, param_values, flags);
-}
-
-static int delete_module(const char *name, int flags)
-{
- return syscall(__NR_delete_module, name, flags);
-}
-
-/*
- * Trigger synchronize_rcu() in kernel.
- */
-int kern_sync_rcu(void)
-{
- return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
-}
-
-static void unload_bpf_testmod(void)
-{
- if (kern_sync_rcu())
- fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n");
- if (delete_module("bpf_testmod", 0)) {
- if (errno == ENOENT) {
- if (verbose())
- fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
- return;
- }
- fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
- return;
- }
- if (verbose())
- fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
-}
-
-static int load_bpf_testmod(void)
-{
- int fd;
-
- /* ensure previous instance of the module is unloaded */
- unload_bpf_testmod();
-
- if (verbose())
- fprintf(stdout, "Loading bpf_testmod.ko...\n");
-
- fd = open("bpf_testmod.ko", O_RDONLY);
- if (fd < 0) {
- fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
- return -ENOENT;
- }
- if (finit_module(fd, "", 0)) {
- fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
- close(fd);
- return -EINVAL;
- }
- close(fd);
-
- if (verbose())
- fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
- return 0;
-}
-
/* extern declarations for test funcs */
#define DEFINE_TEST(name) \
extern void test_##name(void) __weak; \
@@ -714,7 +651,13 @@ static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
const char *argp_program_version = "test_progs 0.1";
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
-static const char argp_program_doc[] = "BPF selftests test runner";
+static const char argp_program_doc[] =
+"BPF selftests test runner\v"
+"Options accepting the NAMES parameter take either a comma-separated list\n"
+"of test names, or a filename prefixed with @. The file contains one name\n"
+"(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
+"\n"
+"These options can be passed repeatedly to read multiple files.\n";
enum ARG_KEYS {
ARG_TEST_NUM = 'n',
@@ -797,6 +740,7 @@ extern int extra_prog_load_log_flags;
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
struct test_env *env = state->input;
+ int err = 0;
switch (key) {
case ARG_TEST_NUM: {
@@ -821,18 +765,28 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
case ARG_TEST_NAME_GLOB_ALLOWLIST:
case ARG_TEST_NAME: {
- if (parse_test_list(arg,
- &env->test_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
- return -ENOMEM;
+ if (arg[0] == '@')
+ err = parse_test_list_file(arg + 1,
+ &env->test_selector.whitelist,
+ key == ARG_TEST_NAME_GLOB_ALLOWLIST);
+ else
+ err = parse_test_list(arg,
+ &env->test_selector.whitelist,
+ key == ARG_TEST_NAME_GLOB_ALLOWLIST);
+
break;
}
case ARG_TEST_NAME_GLOB_DENYLIST:
case ARG_TEST_NAME_BLACKLIST: {
- if (parse_test_list(arg,
- &env->test_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
- return -ENOMEM;
+ if (arg[0] == '@')
+ err = parse_test_list_file(arg + 1,
+ &env->test_selector.blacklist,
+ key == ARG_TEST_NAME_GLOB_DENYLIST);
+ else
+ err = parse_test_list(arg,
+ &env->test_selector.blacklist,
+ key == ARG_TEST_NAME_GLOB_DENYLIST);
+
break;
}
case ARG_VERIFIER_STATS:
@@ -900,7 +854,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
default:
return ARGP_ERR_UNKNOWN;
}
- return 0;
+ return err;
}
/*
@@ -1703,9 +1657,14 @@ int main(int argc, char **argv)
env.stderr = stderr;
env.has_testmod = true;
- if (!env.list_test_names && load_bpf_testmod()) {
- fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
- env.has_testmod = false;
+ if (!env.list_test_names) {
+ /* ensure previous instance of the module is unloaded */
+ unload_bpf_testmod(verbose());
+
+ if (load_bpf_testmod(verbose())) {
+ fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
+ env.has_testmod = false;
+ }
}
/* initializing tests */
@@ -1802,7 +1761,7 @@ int main(int argc, char **argv)
close(env.saved_netns_fd);
out:
if (!env.list_test_names && env.has_testmod)
- unload_bpf_testmod();
+ unload_bpf_testmod(verbose());
free_test_selector(&env.test_selector);
free_test_selector(&env.subtest_selector);
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 0ed3134333d4..77bd492c6024 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -405,7 +405,6 @@ static inline void *u64_to_ptr(__u64 ptr)
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
int compare_map_keys(int map1_fd, int map2_fd);
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
-int kern_sync_rcu(void);
int trigger_module_test_read(int read_sz);
int trigger_module_test_write(int write_sz);
int write_sysctl(const char *sysctl, const char *value);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index e4657c5bc3f1..31f1c935cd07 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -40,6 +40,7 @@
#include "bpf_util.h"
#include "test_btf.h"
#include "../../../include/linux/filter.h"
+#include "testing_helpers.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
@@ -873,8 +874,140 @@ static int create_map_kptr(void)
return fd;
}
+static void set_root(bool set)
+{
+ __u64 caps;
+
+ if (set) {
+ if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps))
+ perror("cap_disable_effective(CAP_SYS_ADMIN)");
+ } else {
+ if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps))
+ perror("cap_disable_effective(CAP_SYS_ADMIN)");
+ }
+}
+
+static __u64 ptr_to_u64(const void *ptr)
+{
+ return (uintptr_t) ptr;
+}
+
+static struct btf *btf__load_testmod_btf(struct btf *vmlinux)
+{
+ struct bpf_btf_info info;
+ __u32 len = sizeof(info);
+ struct btf *btf = NULL;
+ char name[64];
+ __u32 id = 0;
+ int err, fd;
+
+ /* Iterate all loaded BTF objects and find bpf_testmod,
+ * we need SYS_ADMIN cap for that.
+ */
+ set_root(true);
+
+ while (true) {
+ err = bpf_btf_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ break;
+ perror("bpf_btf_get_next_id failed");
+ break;
+ }
+
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ perror("bpf_btf_get_fd_by_id failed");
+ break;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.name_len = sizeof(name);
+ info.name = ptr_to_u64(name);
+ len = sizeof(info);
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ close(fd);
+ perror("bpf_obj_get_info_by_fd failed");
+ break;
+ }
+
+ if (strcmp("bpf_testmod", name)) {
+ close(fd);
+ continue;
+ }
+
+ btf = btf__load_from_kernel_by_id_split(id, vmlinux);
+ if (!btf) {
+ close(fd);
+ break;
+ }
+
+ /* We need the fd to stay open so it can be used in fd_array.
+ * The final cleanup call to btf__free will free btf object
+ * and close the file descriptor.
+ */
+ btf__set_fd(btf, fd);
+ break;
+ }
+
+ set_root(false);
+ return btf;
+}
+
+static struct btf *testmod_btf;
+static struct btf *vmlinux_btf;
+
+static void kfuncs_cleanup(void)
+{
+ btf__free(testmod_btf);
+ btf__free(vmlinux_btf);
+}
+
+static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array,
+ struct kfunc_btf_id_pair *fixup_kfunc_btf_id)
+{
+ /* Patch in kfunc BTF IDs */
+ while (fixup_kfunc_btf_id->kfunc) {
+ int btf_id = 0;
+
+ /* try to find kfunc in kernel BTF */
+ vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf();
+ if (vmlinux_btf) {
+ btf_id = btf__find_by_name_kind(vmlinux_btf,
+ fixup_kfunc_btf_id->kfunc,
+ BTF_KIND_FUNC);
+ btf_id = btf_id < 0 ? 0 : btf_id;
+ }
+
+ /* kfunc not found in kernel BTF, try bpf_testmod BTF */
+ if (!btf_id) {
+ testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf);
+ if (testmod_btf) {
+ btf_id = btf__find_by_name_kind(testmod_btf,
+ fixup_kfunc_btf_id->kfunc,
+ BTF_KIND_FUNC);
+ btf_id = btf_id < 0 ? 0 : btf_id;
+ if (btf_id) {
+ /* We put bpf_testmod module fd into fd_array
+ * and its index 1 into instruction 'off'.
+ */
+ *fd_array = btf__fd(testmod_btf);
+ prog[fixup_kfunc_btf_id->insn_idx].off = 1;
+ }
+ }
+ }
+
+ prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
+ fixup_kfunc_btf_id++;
+ }
+}
+
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
- struct bpf_insn *prog, int *map_fds)
+ struct bpf_insn *prog, int *map_fds, int *fd_array)
{
int *fixup_map_hash_8b = test->fixup_map_hash_8b;
int *fixup_map_hash_48b = test->fixup_map_hash_48b;
@@ -899,7 +1032,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
int *fixup_map_timer = test->fixup_map_timer;
int *fixup_map_kptr = test->fixup_map_kptr;
- struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -1100,25 +1232,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
} while (*fixup_map_kptr);
}
- /* Patch in kfunc BTF IDs */
- if (fixup_kfunc_btf_id->kfunc) {
- struct btf *btf;
- int btf_id;
-
- do {
- btf_id = 0;
- btf = btf__load_vmlinux_btf();
- if (btf) {
- btf_id = btf__find_by_name_kind(btf,
- fixup_kfunc_btf_id->kfunc,
- BTF_KIND_FUNC);
- btf_id = btf_id < 0 ? 0 : btf_id;
- }
- btf__free(btf);
- prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
- fixup_kfunc_btf_id++;
- } while (fixup_kfunc_btf_id->kfunc);
- }
+ fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
}
struct libcap {
@@ -1227,45 +1341,46 @@ static bool cmp_str_seq(const char *log, const char *exp)
return true;
}
-static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
+static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt)
{
+ __u32 buf_element_size = sizeof(struct bpf_insn);
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 xlated_prog_len;
- __u32 buf_element_size = sizeof(struct bpf_insn);
+ struct bpf_insn *buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("bpf_prog_get_info_by_fd failed");
- return -1;
+ return NULL;
}
xlated_prog_len = info.xlated_prog_len;
if (xlated_prog_len % buf_element_size) {
printf("Program length %d is not multiple of %d\n",
xlated_prog_len, buf_element_size);
- return -1;
+ return NULL;
}
*cnt = xlated_prog_len / buf_element_size;
- *buf = calloc(*cnt, buf_element_size);
+ buf = calloc(*cnt, buf_element_size);
if (!buf) {
perror("can't allocate xlated program buffer");
- return -ENOMEM;
+ return NULL;
}
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
- info.xlated_prog_insns = (__u64)(unsigned long)*buf;
+ info.xlated_prog_insns = (__u64)(unsigned long)buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("second bpf_prog_get_info_by_fd failed");
goto out_free_buf;
}
- return 0;
+ return buf;
out_free_buf:
- free(*buf);
- return -1;
+ free(buf);
+ return NULL;
}
static bool is_null_insn(struct bpf_insn *insn)
@@ -1398,7 +1513,8 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog)
if (!check_expected && !check_unexpected)
goto out;
- if (get_xlated_program(fd_prog, &buf, &cnt)) {
+ buf = get_xlated_program(fd_prog, &cnt);
+ if (!buf) {
printf("FAIL: can't get xlated program\n");
result = false;
goto out;
@@ -1445,6 +1561,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
+ int fd_array[2] = { -1, -1 };
int saved_errno;
int fixup_skips;
__u32 pflags;
@@ -1458,7 +1575,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (!prog_type)
prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
fixup_skips = skips;
- do_test_fixup(test, prog_type, prog, map_fds);
+ do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
if (test->fill_insns) {
prog = test->fill_insns;
prog_len = test->prog_len;
@@ -1492,6 +1609,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
else
opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
opts.prog_flags = pflags;
+ if (fd_array[1] != -1)
+ opts.fd_array = &fd_array[0];
if ((prog_type == BPF_PROG_TYPE_TRACING ||
prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
@@ -1684,6 +1803,12 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
int i, passes = 0, errors = 0;
+ /* ensure previous instance of the module is unloaded */
+ unload_bpf_testmod(verbose);
+
+ if (load_bpf_testmod(verbose))
+ return EXIT_FAILURE;
+
for (i = from; i < to; i++) {
struct bpf_test *test = &tests[i];
@@ -1711,6 +1836,9 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
}
}
+ unload_bpf_testmod(verbose);
+ kfuncs_cleanup();
+
printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
skips, errors);
return errors ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index 377fb157a57c..c2ad50f26b63 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -68,9 +68,6 @@
# Run with verbose output:
# sudo ./test_xsk.sh -v
#
-# Run and dump packet contents:
-# sudo ./test_xsk.sh -D
-#
# Set up veth interfaces and leave them up so xskxceiver can be launched in a debugger:
# sudo ./test_xsk.sh -d
#
@@ -81,11 +78,10 @@
ETH=""
-while getopts "vDi:d" flag
+while getopts "vi:d" flag
do
case "${flag}" in
v) verbose=1;;
- D) dump_pkts=1;;
d) debug=1;;
i) ETH=${OPTARG};;
esac
@@ -157,10 +153,6 @@ if [[ $verbose -eq 1 ]]; then
ARGS+="-v "
fi
-if [[ $dump_pkts -eq 1 ]]; then
- ARGS="-D "
-fi
-
retval=$?
test_status $retval "${TEST_NAME}"
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 0b5e0829e5be..8d994884c7b4 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
/* Copyright (C) 2020 Facebook, Inc. */
+#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
@@ -8,6 +9,7 @@
#include <bpf/libbpf.h>
#include "test_progs.h"
#include "testing_helpers.h"
+#include <linux/membarrier.h>
int parse_num_list(const char *s, bool **num_set, int *num_set_len)
{
@@ -70,92 +72,168 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
return 0;
}
-int parse_test_list(const char *s,
- struct test_filter_set *set,
- bool is_glob_pattern)
+static int do_insert_test(struct test_filter_set *set,
+ char *test_str,
+ char *subtest_str)
{
- char *input, *state = NULL, *next;
- struct test_filter *tmp, *tests = NULL;
- int i, j, cnt = 0;
+ struct test_filter *tmp, *test;
+ char **ctmp;
+ int i;
- input = strdup(s);
- if (!input)
+ for (i = 0; i < set->cnt; i++) {
+ test = &set->tests[i];
+
+ if (strcmp(test_str, test->name) == 0) {
+ free(test_str);
+ goto subtest;
+ }
+ }
+
+ tmp = realloc(set->tests, sizeof(*test) * (set->cnt + 1));
+ if (!tmp)
return -ENOMEM;
- while ((next = strtok_r(state ? NULL : input, ",", &state))) {
- char *subtest_str = strchr(next, '/');
- char *pattern = NULL;
- int glob_chars = 0;
+ set->tests = tmp;
+ test = &set->tests[set->cnt];
- tmp = realloc(tests, sizeof(*tests) * (cnt + 1));
- if (!tmp)
- goto err;
- tests = tmp;
+ test->name = test_str;
+ test->subtests = NULL;
+ test->subtest_cnt = 0;
- tests[cnt].subtest_cnt = 0;
- tests[cnt].subtests = NULL;
+ set->cnt++;
- if (is_glob_pattern) {
- pattern = "%s";
- } else {
- pattern = "*%s*";
- glob_chars = 2;
- }
+subtest:
+ if (!subtest_str)
+ return 0;
- if (subtest_str) {
- char **tmp_subtests = NULL;
- int subtest_cnt = tests[cnt].subtest_cnt;
-
- *subtest_str = '\0';
- subtest_str += 1;
- tmp_subtests = realloc(tests[cnt].subtests,
- sizeof(*tmp_subtests) *
- (subtest_cnt + 1));
- if (!tmp_subtests)
- goto err;
- tests[cnt].subtests = tmp_subtests;
-
- tests[cnt].subtests[subtest_cnt] =
- malloc(strlen(subtest_str) + glob_chars + 1);
- if (!tests[cnt].subtests[subtest_cnt])
- goto err;
- sprintf(tests[cnt].subtests[subtest_cnt],
- pattern,
- subtest_str);
-
- tests[cnt].subtest_cnt++;
+ for (i = 0; i < test->subtest_cnt; i++) {
+ if (strcmp(subtest_str, test->subtests[i]) == 0) {
+ free(subtest_str);
+ return 0;
}
+ }
- tests[cnt].name = malloc(strlen(next) + glob_chars + 1);
- if (!tests[cnt].name)
- goto err;
- sprintf(tests[cnt].name, pattern, next);
+ ctmp = realloc(test->subtests,
+ sizeof(*test->subtests) * (test->subtest_cnt + 1));
+ if (!ctmp)
+ return -ENOMEM;
- cnt++;
+ test->subtests = ctmp;
+ test->subtests[test->subtest_cnt] = subtest_str;
+
+ test->subtest_cnt++;
+
+ return 0;
+}
+
+static int insert_test(struct test_filter_set *set,
+ char *test_spec,
+ bool is_glob_pattern)
+{
+ char *pattern, *subtest_str, *ext_test_str, *ext_subtest_str = NULL;
+ int glob_chars = 0;
+
+ if (is_glob_pattern) {
+ pattern = "%s";
+ } else {
+ pattern = "*%s*";
+ glob_chars = 2;
}
- tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt));
- if (!tmp)
+ subtest_str = strchr(test_spec, '/');
+ if (subtest_str) {
+ *subtest_str = '\0';
+ subtest_str += 1;
+ }
+
+ ext_test_str = malloc(strlen(test_spec) + glob_chars + 1);
+ if (!ext_test_str)
goto err;
- memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt);
- set->tests = tmp;
- set->cnt += cnt;
+ sprintf(ext_test_str, pattern, test_spec);
- free(tests);
- free(input);
- return 0;
+ if (subtest_str) {
+ ext_subtest_str = malloc(strlen(subtest_str) + glob_chars + 1);
+ if (!ext_subtest_str)
+ goto err;
+
+ sprintf(ext_subtest_str, pattern, subtest_str);
+ }
+
+ return do_insert_test(set, ext_test_str, ext_subtest_str);
err:
- for (i = 0; i < cnt; i++) {
- for (j = 0; j < tests[i].subtest_cnt; j++)
- free(tests[i].subtests[j]);
+ free(ext_test_str);
+ free(ext_subtest_str);
+
+ return -ENOMEM;
+}
+
+int parse_test_list_file(const char *path,
+ struct test_filter_set *set,
+ bool is_glob_pattern)
+{
+ char *buf = NULL, *capture_start, *capture_end, *scan_end;
+ size_t buflen = 0;
+ int err = 0;
+ FILE *f;
+
+ f = fopen(path, "r");
+ if (!f) {
+ err = -errno;
+ fprintf(stderr, "Failed to open '%s': %d\n", path, err);
+ return err;
+ }
+
+ while (getline(&buf, &buflen, f) != -1) {
+ capture_start = buf;
+
+ while (isspace(*capture_start))
+ ++capture_start;
+
+ capture_end = capture_start;
+ scan_end = capture_start;
+
+ while (*scan_end && *scan_end != '#') {
+ if (!isspace(*scan_end))
+ capture_end = scan_end;
+
+ ++scan_end;
+ }
+
+ if (capture_end == capture_start)
+ continue;
+
+ *(++capture_end) = '\0';
+
+ err = insert_test(set, capture_start, is_glob_pattern);
+ if (err)
+ break;
+ }
+
+ fclose(f);
+ return err;
+}
+
+int parse_test_list(const char *s,
+ struct test_filter_set *set,
+ bool is_glob_pattern)
+{
+ char *input, *state = NULL, *test_spec;
+ int err = 0;
+
+ input = strdup(s);
+ if (!input)
+ return -ENOMEM;
- free(tests[i].name);
+ while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) {
+ err = insert_test(set, test_spec, is_glob_pattern);
+ if (err)
+ break;
}
- free(tests);
+
free(input);
- return -ENOMEM;
+ return err;
}
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
@@ -249,3 +327,63 @@ __u64 read_perf_max_sample_freq(void)
fclose(f);
return sample_freq;
}
+
+static int finit_module(int fd, const char *param_values, int flags)
+{
+ return syscall(__NR_finit_module, fd, param_values, flags);
+}
+
+static int delete_module(const char *name, int flags)
+{
+ return syscall(__NR_delete_module, name, flags);
+}
+
+int unload_bpf_testmod(bool verbose)
+{
+ if (kern_sync_rcu())
+ fprintf(stdout, "Failed to trigger kernel-side RCU sync!\n");
+ if (delete_module("bpf_testmod", 0)) {
+ if (errno == ENOENT) {
+ if (verbose)
+ fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
+ return -1;
+ }
+ fprintf(stdout, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
+ return -1;
+ }
+ if (verbose)
+ fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
+ return 0;
+}
+
+int load_bpf_testmod(bool verbose)
+{
+ int fd;
+
+ if (verbose)
+ fprintf(stdout, "Loading bpf_testmod.ko...\n");
+
+ fd = open("bpf_testmod.ko", O_RDONLY);
+ if (fd < 0) {
+ fprintf(stdout, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
+ return -ENOENT;
+ }
+ if (finit_module(fd, "", 0)) {
+ fprintf(stdout, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
+ close(fd);
+ return -EINVAL;
+ }
+ close(fd);
+
+ if (verbose)
+ fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
+ return 0;
+}
+
+/*
+ * Trigger synchronize_rcu() in kernel.
+ */
+int kern_sync_rcu(void)
+{
+ return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
+}
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index eb8790f928e4..5312323881b6 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -1,5 +1,9 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/* Copyright (C) 2020 Facebook, Inc. */
+
+#ifndef __TESTING_HELPERS_H
+#define __TESTING_HELPERS_H
+
#include <stdbool.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
@@ -20,5 +24,13 @@ struct test_filter_set;
int parse_test_list(const char *s,
struct test_filter_set *test_set,
bool is_glob_pattern);
+int parse_test_list_file(const char *path,
+ struct test_filter_set *test_set,
+ bool is_glob_pattern);
__u64 read_perf_max_sample_freq(void);
+int load_bpf_testmod(bool verbose);
+int unload_bpf_testmod(bool verbose);
+int kern_sync_rcu(void);
+
+#endif /* __TESTING_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 6c03a7d805f9..99272bb890da 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -38,25 +38,24 @@
.fixup_map_array_48b = { 1 },
.result = VERBOSE_ACCEPT,
.errstr =
- "26: (85) call bpf_probe_read_kernel#113\
- last_idx 26 first_idx 20\
- regs=4 stack=0 before 25\
- regs=4 stack=0 before 24\
- regs=4 stack=0 before 23\
- regs=4 stack=0 before 22\
- regs=4 stack=0 before 20\
- parent didn't have regs=4 stack=0 marks\
- last_idx 19 first_idx 10\
- regs=4 stack=0 before 19\
- regs=200 stack=0 before 18\
- regs=300 stack=0 before 17\
- regs=201 stack=0 before 15\
- regs=201 stack=0 before 14\
- regs=200 stack=0 before 13\
- regs=200 stack=0 before 12\
- regs=200 stack=0 before 11\
- regs=200 stack=0 before 10\
- parent already had regs=0 stack=0 marks",
+ "mark_precise: frame0: last_idx 26 first_idx 20\
+ mark_precise: frame0: regs=r2 stack= before 25\
+ mark_precise: frame0: regs=r2 stack= before 24\
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: regs=r2 stack= before 20\
+ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 10\
+ mark_precise: frame0: regs=r2,r9 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: regs=r0,r9 stack= before 15\
+ mark_precise: frame0: regs=r0,r9 stack= before 14\
+ mark_precise: frame0: regs=r9 stack= before 13\
+ mark_precise: frame0: regs=r9 stack= before 12\
+ mark_precise: frame0: regs=r9 stack= before 11\
+ mark_precise: frame0: regs=r9 stack= before 10\
+ mark_precise: frame0: parent state regs= stack=:",
},
{
"precise: test 2",
@@ -100,20 +99,20 @@
.flags = BPF_F_TEST_STATE_FREQ,
.errstr =
"26: (85) call bpf_probe_read_kernel#113\
- last_idx 26 first_idx 22\
- regs=4 stack=0 before 25\
- regs=4 stack=0 before 24\
- regs=4 stack=0 before 23\
- regs=4 stack=0 before 22\
- parent didn't have regs=4 stack=0 marks\
- last_idx 20 first_idx 20\
- regs=4 stack=0 before 20\
- parent didn't have regs=4 stack=0 marks\
- last_idx 19 first_idx 17\
- regs=4 stack=0 before 19\
- regs=200 stack=0 before 18\
- regs=300 stack=0 before 17\
- parent already had regs=0 stack=0 marks",
+ mark_precise: frame0: last_idx 26 first_idx 22\
+ mark_precise: frame0: regs=r2 stack= before 25\
+ mark_precise: frame0: regs=r2 stack= before 24\
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 20 first_idx 20\
+ mark_precise: frame0: regs=r2,r9 stack= before 20\
+ mark_precise: frame0: parent state regs=r2,r9 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 17\
+ mark_precise: frame0: regs=r2,r9 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: parent state regs= stack=:",
},
{
"precise: cross frame pruning",
@@ -153,15 +152,16 @@
},
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
- .errstr = "5: (2d) if r4 > r0 goto pc+0\
- last_idx 5 first_idx 5\
- parent didn't have regs=10 stack=0 marks\
- last_idx 4 first_idx 2\
- regs=10 stack=0 before 4\
- regs=10 stack=0 before 3\
- regs=0 stack=1 before 2\
- last_idx 5 first_idx 5\
- parent didn't have regs=1 stack=0 marks",
+ .errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
+ mark_precise: frame0: parent state regs=r4 stack=:\
+ mark_precise: frame0: last_idx 4 first_idx 2\
+ mark_precise: frame0: regs=r4 stack= before 4\
+ mark_precise: frame0: regs=r4 stack= before 3\
+ mark_precise: frame0: regs= stack=-8 before 2\
+ mark_precise: frame0: falling back to forcing all scalars precise\
+ force_precise: frame0: forcing r0 to be precise\
+ mark_precise: frame0: last_idx 5 first_idx 5\
+ mark_precise: frame0: parent state regs= stack=:",
.result = VERBOSE_ACCEPT,
.retval = -1,
},
@@ -179,16 +179,19 @@
},
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
- .errstr = "last_idx 6 first_idx 6\
- parent didn't have regs=10 stack=0 marks\
- last_idx 5 first_idx 3\
- regs=10 stack=0 before 5\
- regs=10 stack=0 before 4\
- regs=0 stack=1 before 3\
- last_idx 6 first_idx 6\
- parent didn't have regs=1 stack=0 marks\
- last_idx 5 first_idx 3\
- regs=1 stack=0 before 5",
+ .errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
+ mark_precise: frame0: parent state regs=r4 stack=:\
+ mark_precise: frame0: last_idx 5 first_idx 3\
+ mark_precise: frame0: regs=r4 stack= before 5\
+ mark_precise: frame0: regs=r4 stack= before 4\
+ mark_precise: frame0: regs= stack=-8 before 3\
+ mark_precise: frame0: falling back to forcing all scalars precise\
+ force_precise: frame0: forcing r0 to be precise\
+ force_precise: frame0: forcing r0 to be precise\
+ force_precise: frame0: forcing r0 to be precise\
+ force_precise: frame0: forcing r0 to be precise\
+ mark_precise: frame0: last_idx 6 first_idx 6\
+ mark_precise: frame0: parent state regs= stack=:",
.result = VERBOSE_ACCEPT,
.retval = -1,
},
@@ -217,3 +220,39 @@
.errstr = "invalid access to memory, mem_size=1 off=42 size=8",
.result = REJECT,
},
+{
+ "precise: program doesn't prematurely prune branches",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
+ BPF_LD_MAP_FD(BPF_REG_4, 0),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 13 },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "register with unbounded min value is not allowed",
+},
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index 1db7185181da..655095810d4a 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -141,6 +141,7 @@ static struct env {
bool verbose;
bool debug;
bool quiet;
+ bool force_checkpoints;
enum resfmt out_fmt;
bool show_version;
bool comparison_mode;
@@ -209,6 +210,8 @@ static const struct argp_option opts[] = {
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
+ { "test-states", 't', NULL, 0,
+ "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
{ "quiet", 'q', NULL, 0, "Quiet mode" },
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
{ "sort", 's', "SPEC", 0, "Specify sort order" },
@@ -284,6 +287,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
argp_usage(state);
}
break;
+ case 't':
+ env.force_checkpoints = true;
+ break;
case 'C':
env.comparison_mode = true;
break;
@@ -989,6 +995,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
/* increase chances of successful BPF object loading */
fixup_obj(obj, prog, base_filename);
+ if (env.force_checkpoints)
+ bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ);
+
err = bpf_object__load(obj);
env.progs_processed++;
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index 987cf0db5ebc..613321eb84c1 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -27,6 +27,7 @@
#include <sys/mman.h>
#include <net/if.h>
#include <poll.h>
+#include <time.h>
#include "xdp_metadata.h"
@@ -134,18 +135,52 @@ static void refill_rx(struct xsk *xsk, __u64 addr)
}
}
-static void verify_xdp_metadata(void *data)
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+static __u64 gettime(clockid_t clock_id)
+{
+ struct timespec t;
+ int res;
+
+ /* See man clock_gettime(2) for type of clock_id's */
+ res = clock_gettime(clock_id, &t);
+
+ if (res < 0)
+ error(res, errno, "Error with clock_gettime()");
+
+ return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+static void verify_xdp_metadata(void *data, clockid_t clock_id)
{
struct xdp_meta *meta;
meta = data - sizeof(*meta);
- printf("rx_timestamp: %llu\n", meta->rx_timestamp);
if (meta->rx_hash_err < 0)
printf("No rx_hash err=%d\n", meta->rx_hash_err);
else
printf("rx_hash: 0x%X with RSS type:0x%X\n",
meta->rx_hash, meta->rx_hash_type);
+
+ printf("rx_timestamp: %llu (sec:%0.4f)\n", meta->rx_timestamp,
+ (double)meta->rx_timestamp / NANOSEC_PER_SEC);
+ if (meta->rx_timestamp) {
+ __u64 usr_clock = gettime(clock_id);
+ __u64 xdp_clock = meta->xdp_timestamp;
+ __s64 delta_X = xdp_clock - meta->rx_timestamp;
+ __s64 delta_X2U = usr_clock - xdp_clock;
+
+ printf("XDP RX-time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
+ xdp_clock, (double)xdp_clock / NANOSEC_PER_SEC,
+ (double)delta_X / NANOSEC_PER_SEC,
+ (double)delta_X / 1000);
+
+ printf("AF_XDP time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
+ usr_clock, (double)usr_clock / NANOSEC_PER_SEC,
+ (double)delta_X2U / NANOSEC_PER_SEC,
+ (double)delta_X2U / 1000);
+ }
+
}
static void verify_skb_metadata(int fd)
@@ -193,7 +228,7 @@ static void verify_skb_metadata(int fd)
printf("skb hwtstamp is not found!\n");
}
-static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
+static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id)
{
const struct xdp_desc *rx_desc;
struct pollfd fds[rxq + 1];
@@ -243,7 +278,8 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
xsk, idx, rx_desc->addr, addr, comp_addr);
- verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr));
+ verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr),
+ clock_id);
xsk_ring_cons__release(&xsk->rx, 1);
refill_rx(xsk, comp_addr);
}
@@ -370,6 +406,7 @@ static void timestamping_enable(int fd, int val)
int main(int argc, char *argv[])
{
+ clockid_t clock_id = CLOCK_TAI;
int server_fd = -1;
int ret;
int i;
@@ -443,7 +480,7 @@ int main(int argc, char *argv[])
error(1, -ret, "bpf_xdp_attach");
signal(SIGINT, handle_signal);
- ret = verify_metadata(rx_xsk, rxq, server_fd);
+ ret = verify_metadata(rx_xsk, rxq, server_fd, clock_id);
close(server_fd);
cleanup();
if (ret)
diff --git a/tools/testing/selftests/bpf/xdp_metadata.h b/tools/testing/selftests/bpf/xdp_metadata.h
index 0c4624dc6f2f..938a729bd307 100644
--- a/tools/testing/selftests/bpf/xdp_metadata.h
+++ b/tools/testing/selftests/bpf/xdp_metadata.h
@@ -11,6 +11,7 @@
struct xdp_meta {
__u64 rx_timestamp;
+ __u64 xdp_timestamp;
__u32 rx_hash;
union {
__u32 rx_hash_type;
diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index 04ed8b544712..8da8d557768b 100644
--- a/tools/testing/selftests/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -134,6 +134,11 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
__atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE);
}
+static inline void xsk_ring_prod__cancel(struct xsk_ring_prod *prod, __u32 nb)
+{
+ prod->cached_prod -= nb;
+}
+
static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
{
__u32 entries = xsk_cons_nb_avail(cons, nb);
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index f144d0604ddf..218d7f694e5c 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -76,16 +76,13 @@
#include <asm/barrier.h>
#include <linux/if_link.h>
#include <linux/if_ether.h>
-#include <linux/ip.h>
#include <linux/mman.h>
-#include <linux/udp.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <locale.h>
#include <poll.h>
#include <pthread.h>
#include <signal.h>
-#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -94,10 +91,8 @@
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
-#include <sys/queue.h>
#include <time.h>
#include <unistd.h>
-#include <stdatomic.h>
#include "xsk_xdp_progs.skel.h"
#include "xsk.h"
@@ -109,10 +104,6 @@
static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
-static const char *IP1 = "192.168.100.162";
-static const char *IP2 = "192.168.100.161";
-static const u16 UDP_PORT1 = 2020;
-static const u16 UDP_PORT2 = 2121;
static void __exit_with_error(int error, const char *file, const char *func, int line)
{
@@ -147,112 +138,25 @@ static void report_failure(struct test_spec *test)
test->fail = true;
}
-static void memset32_htonl(void *dest, u32 val, u32 size)
-{
- u32 *ptr = (u32 *)dest;
- int i;
-
- val = htonl(val);
-
- for (i = 0; i < (size & (~0x3)); i += 4)
- ptr[i >> 2] = val;
-}
-
-/*
- * Fold a partial checksum
- * This function code has been taken from
- * Linux kernel include/asm-generic/checksum.h
- */
-static __u16 csum_fold(__u32 csum)
-{
- u32 sum = (__force u32)csum;
-
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return (__force __u16)~sum;
-}
-
-/*
- * This function code has been taken from
- * Linux kernel lib/checksum.c
- */
-static u32 from64to32(u64 x)
-{
- /* add up 32-bit and 32-bit for 32+c bit */
- x = (x & 0xffffffff) + (x >> 32);
- /* add up carry.. */
- x = (x & 0xffffffff) + (x >> 32);
- return (u32)x;
-}
-
-/*
- * This function code has been taken from
- * Linux kernel lib/checksum.c
- */
-static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
-{
- unsigned long long s = (__force u32)sum;
-
- s += (__force u32)saddr;
- s += (__force u32)daddr;
-#ifdef __BIG_ENDIAN__
- s += proto + len;
-#else
- s += (proto + len) << 8;
-#endif
- return (__force __u32)from64to32(s);
-}
-
-/*
- * This function has been taken from
- * Linux kernel include/asm-generic/checksum.h
+/* The payload is a word consisting of a packet sequence number in the upper
+ * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
+ * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
*/
-static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
-
-static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt)
+static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
{
- u32 csum = 0;
- u32 cnt = 0;
-
- /* udp hdr and data */
- for (; cnt < len; cnt += 2)
- csum += udp_pkt[cnt >> 1];
+ u32 *ptr = (u32 *)dest, i;
- return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
+ start /= sizeof(*ptr);
+ size /= sizeof(*ptr);
+ for (i = 0; i < size; i++)
+ ptr[i] = htonl(pkt_nb << 16 | (i + start));
}
static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
{
memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
- eth_hdr->h_proto = htons(ETH_P_IP);
-}
-
-static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr)
-{
- ip_hdr->version = IP_PKT_VER;
- ip_hdr->ihl = 0x5;
- ip_hdr->tos = IP_PKT_TOS;
- ip_hdr->tot_len = htons(IP_PKT_SIZE);
- ip_hdr->id = 0;
- ip_hdr->frag_off = 0;
- ip_hdr->ttl = IPDEFTTL;
- ip_hdr->protocol = IPPROTO_UDP;
- ip_hdr->saddr = ifobject->src_ip;
- ip_hdr->daddr = ifobject->dst_ip;
- ip_hdr->check = 0;
-}
-
-static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject,
- struct udphdr *udp_hdr)
-{
- udp_hdr->source = htons(ifobject->src_port);
- udp_hdr->dest = htons(ifobject->dst_port);
- udp_hdr->len = htons(UDP_PKT_SIZE);
- memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE);
+ eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
}
static bool is_umem_valid(struct ifobject *ifobj)
@@ -260,19 +164,18 @@ static bool is_umem_valid(struct ifobject *ifobj)
return !!ifobj->umem->umem;
}
-static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
+static u32 mode_to_xdp_flags(enum test_mode mode)
{
- udp_hdr->check = 0;
- udp_hdr->check =
- udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr);
+ return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
}
-static u32 mode_to_xdp_flags(enum test_mode mode)
+static u64 umem_size(struct xsk_umem_info *umem)
{
- return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
+ return umem->num_frames * umem->frame_size;
}
-static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size)
+static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
+ u64 size)
{
struct xsk_umem_config cfg = {
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
@@ -292,9 +195,31 @@ static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size
return ret;
umem->buffer = buffer;
+ if (ifobj->shared_umem && ifobj->rx_on) {
+ umem->base_addr = umem_size(umem);
+ umem->next_buffer = umem_size(umem);
+ }
+
return 0;
}
+static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
+{
+ u64 addr;
+
+ addr = umem->next_buffer;
+ umem->next_buffer += umem->frame_size;
+ if (umem->next_buffer >= umem->base_addr + umem_size(umem))
+ umem->next_buffer = umem->base_addr;
+
+ return addr;
+}
+
+static void umem_reset_alloc(struct xsk_umem_info *umem)
+{
+ umem->next_buffer = 0;
+}
+
static void enable_busy_poll(struct xsk_socket_info *xsk)
{
int sock_opt;
@@ -354,7 +279,7 @@ static bool ifobj_zc_avail(struct ifobject *ifobject)
exit_with_error(ENOMEM);
}
umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
- ret = xsk_configure_umem(umem, bufs, umem_sz);
+ ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz);
if (ret)
exit_with_error(-ret);
@@ -380,7 +305,6 @@ out:
static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"busy-poll", no_argument, 0, 'b'},
- {"dump-pkts", no_argument, 0, 'D'},
{"verbose", no_argument, 0, 'v'},
{0, 0, 0, 0}
};
@@ -391,7 +315,6 @@ static void usage(const char *prog)
" Usage: %s [OPTIONS]\n"
" Options:\n"
" -i, --interface Use interface\n"
- " -D, --dump-pkts Dump packets L2 - L5\n"
" -v, --verbose Verbose output\n"
" -b, --busy-poll Enable busy poll\n";
@@ -415,7 +338,7 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index);
+ c = getopt_long(argc, argv, "i:vb", long_options, &option_index);
if (c == -1)
break;
@@ -437,9 +360,6 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
interface_nb++;
break;
- case 'D':
- opt_pkt_dump = true;
- break;
case 'v':
opt_verbose = true;
break;
@@ -482,9 +402,6 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
memset(ifobj->umem, 0, sizeof(*ifobj->umem));
ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
- if (ifobj->shared_umem && ifobj->rx_on)
- ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS *
- XSK_UMEM__DEFAULT_FRAME_SIZE;
for (j = 0; j < MAX_SOCKETS; j++) {
memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
@@ -554,24 +471,24 @@ static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *x
static void pkt_stream_reset(struct pkt_stream *pkt_stream)
{
if (pkt_stream)
- pkt_stream->rx_pkt_nb = 0;
+ pkt_stream->current_pkt_nb = 0;
}
-static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
+static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
{
- if (pkt_nb >= pkt_stream->nb_pkts)
+ if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
return NULL;
- return &pkt_stream->pkts[pkt_nb];
+ return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
}
static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
{
- while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) {
+ while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
(*pkts_sent)++;
- if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid)
- return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++];
- pkt_stream->rx_pkt_nb++;
+ if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
+ return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
+ pkt_stream->current_pkt_nb++;
}
return NULL;
}
@@ -616,9 +533,21 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
return pkt_stream;
}
-static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len)
+static u32 ceil_u32(u32 a, u32 b)
+{
+ return (a + b - 1) / b;
+}
+
+static u32 pkt_nb_frags(u32 frame_size, struct pkt *pkt)
+{
+ if (!pkt || !pkt->valid)
+ return 1;
+ return ceil_u32(pkt->len, frame_size);
+}
+
+static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len)
{
- pkt->addr = addr + umem->base_addr;
+ pkt->offset = offset;
pkt->len = len;
if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom)
pkt->valid = false;
@@ -626,6 +555,11 @@ static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 l
pkt->valid = true;
}
+static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
+{
+ return ceil_u32(len, umem->frame_size) * umem->frame_size;
+}
+
static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
{
struct pkt_stream *pkt_stream;
@@ -635,10 +569,13 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
if (!pkt_stream)
exit_with_error(ENOMEM);
+ pkt_stream->nb_pkts = nb_pkts;
+ pkt_stream->max_pkt_len = pkt_len;
for (i = 0; i < nb_pkts; i++) {
- pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
- pkt_len);
- pkt_stream->pkts[i].payload = i;
+ struct pkt *pkt = &pkt_stream->pkts[i];
+
+ pkt_set(umem, pkt, 0, pkt_len);
+ pkt->pkt_nb = i;
}
return pkt_stream;
@@ -669,8 +606,7 @@ static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
- pkt_set(umem, &pkt_stream->pkts[i],
- (i % umem->num_frames) * umem->frame_size + offset, pkt_len);
+ pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len);
ifobj->pkt_stream = pkt_stream;
}
@@ -694,30 +630,31 @@ static void pkt_stream_receive_half(struct test_spec *test)
pkt_stream->pkts[i].valid = false;
}
-static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb)
+static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
{
- struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb);
- struct udphdr *udp_hdr;
- struct ethhdr *eth_hdr;
- struct iphdr *ip_hdr;
- void *data;
+ if (!pkt->valid)
+ return pkt->offset;
+ return pkt->offset + umem_alloc_buffer(umem);
+}
- if (!pkt)
- return NULL;
- if (!pkt->valid || pkt->len < MIN_PKT_SIZE)
- return pkt;
+static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb,
+ u32 bytes_written)
+{
+ void *data = xsk_umem__get_data(ifobject->umem->buffer, addr);
- data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr);
- udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr));
- ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr));
- eth_hdr = (struct ethhdr *)data;
+ if (len < MIN_PKT_SIZE)
+ return;
- gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr);
- gen_ip_hdr(ifobject, ip_hdr);
- gen_udp_csum(udp_hdr, ip_hdr);
- gen_eth_hdr(ifobject, eth_hdr);
+ if (!bytes_written) {
+ gen_eth_hdr(ifobject, data);
- return pkt;
+ len -= PKT_HDR_SIZE;
+ data += PKT_HDR_SIZE;
+ } else {
+ bytes_written -= PKT_HDR_SIZE;
+ }
+
+ write_payload(data, pkt_nb, bytes_written, len);
}
static void __pkt_stream_generate_custom(struct ifobject *ifobj,
@@ -731,10 +668,14 @@ static void __pkt_stream_generate_custom(struct ifobject *ifobj,
exit_with_error(ENOMEM);
for (i = 0; i < nb_pkts; i++) {
- pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr;
- pkt_stream->pkts[i].len = pkts[i].len;
- pkt_stream->pkts[i].payload = i;
- pkt_stream->pkts[i].valid = pkts[i].valid;
+ struct pkt *pkt = &pkt_stream->pkts[i];
+
+ pkt->offset = pkts[i].offset;
+ pkt->len = pkts[i].len;
+ pkt->pkt_nb = i;
+ pkt->valid = pkts[i].valid;
+ if (pkt->len > pkt_stream->max_pkt_len)
+ pkt_stream->max_pkt_len = pkt->len;
}
ifobj->pkt_stream = pkt_stream;
@@ -746,53 +687,62 @@ static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts,
__pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts);
}
-static void pkt_dump(void *pkt, u32 len)
-{
- char s[INET_ADDRSTRLEN];
- struct ethhdr *ethhdr;
- struct udphdr *udphdr;
- struct iphdr *iphdr;
- u32 payload, i;
-
- ethhdr = pkt;
- iphdr = pkt + sizeof(*ethhdr);
- udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr);
-
- /*extract L2 frame */
- fprintf(stdout, "DEBUG>> L2: dst mac: ");
- for (i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ethhdr->h_dest[i]);
-
- fprintf(stdout, "\nDEBUG>> L2: src mac: ");
- for (i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ethhdr->h_source[i]);
-
- /*extract L3 frame */
- fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl);
- fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n",
- inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s)));
- fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n",
- inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s)));
- /*extract L4 frame */
- fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
- fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
- /*extract L5 frame */
- payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE)));
+static void pkt_print_data(u32 *data, u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ u32 seqnum, pkt_nb;
- fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
- fprintf(stdout, "---------------------------------------\n");
+ seqnum = ntohl(*data) & 0xffff;
+ pkt_nb = ntohl(*data) >> 16;
+ fprintf(stdout, "%u:%u ", pkt_nb, seqnum);
+ data++;
+ }
}
-static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
- u64 pkt_stream_addr)
+static void pkt_dump(void *pkt, u32 len, bool eth_header)
+{
+ struct ethhdr *ethhdr = pkt;
+ u32 i, *data;
+
+ if (eth_header) {
+ /*extract L2 frame */
+ fprintf(stdout, "DEBUG>> L2: dst mac: ");
+ for (i = 0; i < ETH_ALEN; i++)
+ fprintf(stdout, "%02X", ethhdr->h_dest[i]);
+
+ fprintf(stdout, "\nDEBUG>> L2: src mac: ");
+ for (i = 0; i < ETH_ALEN; i++)
+ fprintf(stdout, "%02X", ethhdr->h_source[i]);
+
+ data = pkt + PKT_HDR_SIZE;
+ } else {
+ data = pkt;
+ }
+
+ /*extract L5 frame */
+ fprintf(stdout, "\nDEBUG>> L5: seqnum: ");
+ pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
+ fprintf(stdout, "....");
+ if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
+ fprintf(stdout, "\n.... ");
+ pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
+ PKT_DUMP_NB_TO_PRINT);
+ }
+ fprintf(stdout, "\n---------------------------------------\n");
+}
+
+static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
{
u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
- u32 offset = addr % umem->frame_size, expected_offset = 0;
+ u32 offset = addr % umem->frame_size, expected_offset;
+ int pkt_offset = pkt->valid ? pkt->offset : 0;
- if (!pkt_stream->use_addr_for_fill)
- pkt_stream_addr = 0;
+ if (!umem->unaligned_mode)
+ pkt_offset = 0;
- expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
+ expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
if (offset == expected_offset)
return true;
@@ -806,9 +756,9 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
void *data = xsk_umem__get_data(buffer, addr);
struct xdp_info *meta = data - sizeof(struct xdp_info);
- if (meta->count != pkt->payload) {
+ if (meta->count != pkt->pkt_nb) {
ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n",
- __func__, pkt->payload, meta->count);
+ __func__, pkt->pkt_nb, meta->count);
return false;
}
@@ -818,11 +768,11 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
{
void *data = xsk_umem__get_data(buffer, addr);
- struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr));
+ u32 seqnum, pkt_data;
if (!pkt) {
ksft_print_msg("[%s] too many packets received\n", __func__);
- return false;
+ goto error;
}
if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) {
@@ -833,28 +783,23 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
if (pkt->len != len) {
ksft_print_msg("[%s] expected length [%d], got length [%d]\n",
__func__, pkt->len, len);
- return false;
+ goto error;
}
- if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) {
- u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE)));
-
- if (opt_pkt_dump)
- pkt_dump(data, PKT_SIZE);
+ pkt_data = ntohl(*((u32 *)(data + PKT_HDR_SIZE)));
+ seqnum = pkt_data >> 16;
- if (pkt->payload != seqnum) {
- ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n",
- __func__, pkt->payload, seqnum);
- return false;
- }
- } else {
- ksft_print_msg("Invalid frame received: ");
- ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version,
- iphdr->tos);
- return false;
+ if (pkt->pkt_nb != seqnum) {
+ ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n",
+ __func__, pkt->pkt_nb, seqnum);
+ goto error;
}
return true;
+
+error:
+ pkt_dump(data, len, true);
+ return false;
}
static void kick_tx(struct xsk_socket_info *xsk)
@@ -976,7 +921,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
addr = xsk_umem__add_offset_to_addr(addr);
if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) ||
- !is_offset_correct(umem, pkt_stream, addr, pkt->addr) ||
+ !is_offset_correct(umem, pkt, addr) ||
(ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr)))
return TEST_FAILURE;
@@ -992,8 +937,6 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
pthread_mutex_lock(&pacing_mutex);
pkts_in_flight -= pkts_sent;
- if (pkts_in_flight < umem->num_frames)
- pthread_cond_signal(&pacing_cond);
pthread_mutex_unlock(&pacing_mutex);
pkts_sent = 0;
}
@@ -1001,14 +944,21 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
return TEST_PASS;
}
-static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds,
- bool timeout)
+static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout)
{
struct xsk_socket_info *xsk = ifobject->xsk;
+ struct xsk_umem_info *umem = ifobject->umem;
+ u32 i, idx = 0, valid_pkts = 0, buffer_len;
bool use_poll = ifobject->use_poll;
- u32 i, idx = 0, valid_pkts = 0;
int ret;
+ buffer_len = pkt_get_buffer_len(umem, ifobject->pkt_stream->max_pkt_len);
+ /* pkts_in_flight might be negative if many invalid packets are sent */
+ if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) {
+ kick_tx(xsk);
+ return TEST_CONTINUE;
+ }
+
while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
if (use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
@@ -1034,25 +984,21 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
for (i = 0; i < BATCH_SIZE; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
- struct pkt *pkt = pkt_generate(ifobject, *pkt_nb);
+ struct pkt *pkt = pkt_stream_get_next_tx_pkt(ifobject->pkt_stream);
if (!pkt)
break;
- tx_desc->addr = pkt->addr;
+ tx_desc->addr = pkt_get_addr(pkt, umem);
tx_desc->len = pkt->len;
- (*pkt_nb)++;
- if (pkt->valid)
+ if (pkt->valid) {
valid_pkts++;
+ pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb, 0);
+ }
}
pthread_mutex_lock(&pacing_mutex);
pkts_in_flight += valid_pkts;
- /* pkts_in_flight might be negative if many invalid packets are sent */
- if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) {
- kick_tx(xsk);
- pthread_cond_wait(&pacing_cond, &pacing_mutex);
- }
pthread_mutex_unlock(&pacing_mutex);
xsk_ring_prod__submit(&xsk->tx, i);
@@ -1088,18 +1034,21 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk)
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
+ struct pkt_stream *pkt_stream = ifobject->pkt_stream;
bool timeout = !is_umem_valid(test->ifobj_rx);
struct pollfd fds = { };
- u32 pkt_cnt = 0, ret;
+ u32 ret;
fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
fds.events = POLLOUT;
- while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
- ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout);
+ while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
+ ret = __send_pkts(ifobject, &fds, timeout);
+ if (ret == TEST_CONTINUE && !test->fail)
+ continue;
if ((ret || test->fail) && !timeout)
return TEST_FAILURE;
- else if (ret == TEST_PASS && timeout)
+ if (ret == TEST_PASS && timeout)
return ret;
}
@@ -1249,11 +1198,14 @@ static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobje
ifobject->xsk = &ifobject->xsk_arr[0];
ifobject->xskmap = test->ifobj_rx->xskmap;
memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
+ ifobject->umem->base_addr = 0;
}
-static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
+static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
+ bool fill_up)
{
- u32 idx = 0, i, buffers_to_fill;
+ u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
+ u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
int ret;
if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
@@ -1264,22 +1216,33 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
if (ret != buffers_to_fill)
exit_with_error(ENOSPC);
- for (i = 0; i < buffers_to_fill; i++) {
- u64 addr;
- if (pkt_stream->use_addr_for_fill) {
- struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i);
+ while (filled < buffers_to_fill) {
+ struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt); i++) {
+ if (!pkt) {
+ if (!fill_up)
+ break;
+ addr = filled * umem->frame_size + umem->base_addr;
+ } else if (pkt->offset >= 0) {
+ addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
+ } else {
+ addr = pkt->offset + umem_alloc_buffer(umem);
+ }
- if (!pkt)
+ *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
+ if (++filled >= buffers_to_fill)
break;
- addr = pkt->addr;
- } else {
- addr = i * umem->frame_size;
}
-
- *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
}
- xsk_ring_prod__submit(&umem->fq, i);
+ xsk_ring_prod__submit(&umem->fq, filled);
+ xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
+
+ pkt_stream_reset(pkt_stream);
+ umem_reset_alloc(umem);
}
static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
@@ -1300,12 +1263,10 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (bufs == MAP_FAILED)
exit_with_error(errno);
- ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz);
+ ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
if (ret)
exit_with_error(-ret);
- xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
-
xsk_configure_socket(test, ifobject, ifobject->umem, false);
ifobject->xsk = &ifobject->xsk_arr[0];
@@ -1313,6 +1274,8 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (!ifobject->rx_on)
return;
+ xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring);
+
ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
if (ret)
exit_with_error(errno);
@@ -1370,12 +1333,8 @@ static void *worker_testapp_validate_rx(void *arg)
if (!err && ifobject->validation_func)
err = ifobject->validation_func(ifobject);
- if (err) {
+ if (err)
report_failure(test);
- pthread_mutex_lock(&pacing_mutex);
- pthread_cond_signal(&pacing_cond);
- pthread_mutex_unlock(&pacing_mutex);
- }
pthread_exit(NULL);
}
@@ -1402,11 +1361,20 @@ static void handler(int signum)
pthread_exit(NULL);
}
-static bool xdp_prog_changed(struct test_spec *test, struct ifobject *ifobj)
+static bool xdp_prog_changed_rx(struct test_spec *test)
{
+ struct ifobject *ifobj = test->ifobj_rx;
+
return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
}
+static bool xdp_prog_changed_tx(struct test_spec *test)
+{
+ struct ifobject *ifobj = test->ifobj_tx;
+
+ return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
+}
+
static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
struct bpf_map *xskmap, enum test_mode mode)
{
@@ -1433,13 +1401,13 @@ static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_pro
static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
struct ifobject *ifobj_tx)
{
- if (xdp_prog_changed(test, ifobj_rx))
+ if (xdp_prog_changed_rx(test))
xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
if (!ifobj_tx || ifobj_tx->shared_umem)
return;
- if (xdp_prog_changed(test, ifobj_tx))
+ if (xdp_prog_changed_tx(test))
xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
}
@@ -1448,9 +1416,11 @@ static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *i
{
pthread_t t0, t1;
- if (ifobj2)
+ if (ifobj2) {
if (pthread_barrier_init(&barr, NULL, 2))
exit_with_error(errno);
+ pkt_stream_reset(ifobj2->pkt_stream);
+ }
test->current_step++;
pkt_stream_reset(ifobj1->pkt_stream);
@@ -1493,6 +1463,12 @@ static int testapp_validate_traffic(struct test_spec *test)
struct ifobject *ifobj_rx = test->ifobj_rx;
struct ifobject *ifobj_tx = test->ifobj_tx;
+ if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
+ (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
+ ksft_test_result_skip("No huge pages present.\n");
+ return TEST_SKIP;
+ }
+
xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
}
@@ -1502,16 +1478,18 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct
return __testapp_validate_traffic(test, ifobj, NULL);
}
-static void testapp_teardown(struct test_spec *test)
+static int testapp_teardown(struct test_spec *test)
{
int i;
test_spec_set_name(test, "TEARDOWN");
for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
if (testapp_validate_traffic(test))
- return;
+ return TEST_FAILURE;
test_spec_reset(test);
}
+
+ return TEST_PASS;
}
static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
@@ -1526,20 +1504,23 @@ static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
*ifobj2 = tmp_ifobj;
}
-static void testapp_bidi(struct test_spec *test)
+static int testapp_bidi(struct test_spec *test)
{
+ int res;
+
test_spec_set_name(test, "BIDIRECTIONAL");
test->ifobj_tx->rx_on = true;
test->ifobj_rx->tx_on = true;
test->total_steps = 2;
if (testapp_validate_traffic(test))
- return;
+ return TEST_FAILURE;
print_verbose("Switching Tx/Rx vectors\n");
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
- __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
+ res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
+ return res;
}
static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
@@ -1556,160 +1537,139 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj
exit_with_error(errno);
}
-static void testapp_bpf_res(struct test_spec *test)
+static int testapp_bpf_res(struct test_spec *test)
{
test_spec_set_name(test, "BPF_RES");
test->total_steps = 2;
test->nb_sockets = 2;
if (testapp_validate_traffic(test))
- return;
+ return TEST_FAILURE;
swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_headroom(struct test_spec *test)
+static int testapp_headroom(struct test_spec *test)
{
test_spec_set_name(test, "UMEM_HEADROOM");
test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_stats_rx_dropped(struct test_spec *test)
+static int testapp_stats_rx_dropped(struct test_spec *test)
{
test_spec_set_name(test, "STAT_RX_DROPPED");
+ if (test->mode == TEST_MODE_ZC) {
+ ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
+ return TEST_SKIP;
+ }
+
pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
pkt_stream_receive_half(test);
test->ifobj_rx->validation_func = validate_rx_dropped;
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_stats_tx_invalid_descs(struct test_spec *test)
+static int testapp_stats_tx_invalid_descs(struct test_spec *test)
{
test_spec_set_name(test, "STAT_TX_INVALID");
pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
test->ifobj_tx->validation_func = validate_tx_invalid_descs;
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_stats_rx_full(struct test_spec *test)
+static int testapp_stats_rx_full(struct test_spec *test)
{
test_spec_set_name(test, "STAT_RX_FULL");
- pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
+ pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, PKT_SIZE);
- if (!test->ifobj_rx->pkt_stream)
- exit_with_error(ENOMEM);
+ DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
test->ifobj_rx->release_rx = false;
test->ifobj_rx->validation_func = validate_rx_full;
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_stats_fill_empty(struct test_spec *test)
+static int testapp_stats_fill_empty(struct test_spec *test)
{
test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
- pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
+ pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, PKT_SIZE);
- if (!test->ifobj_rx->pkt_stream)
- exit_with_error(ENOMEM);
+ DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->use_fill_ring = false;
test->ifobj_rx->validation_func = validate_fill_empty;
- testapp_validate_traffic(test);
-}
-
-/* Simple test */
-static bool hugepages_present(struct ifobject *ifobject)
-{
- size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size;
- void *bufs;
-
- bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_2MB, -1, 0);
- if (bufs == MAP_FAILED)
- return false;
-
- mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
- munmap(bufs, mmap_sz);
- return true;
+ return testapp_validate_traffic(test);
}
-static bool testapp_unaligned(struct test_spec *test)
+static int testapp_unaligned(struct test_spec *test)
{
- if (!hugepages_present(test->ifobj_tx)) {
- ksft_test_result_skip("No 2M huge pages present.\n");
- return false;
- }
-
test_spec_set_name(test, "UNALIGNED_MODE");
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
- /* Let half of the packets straddle a buffer boundrary */
- pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2);
- test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
- testapp_validate_traffic(test);
+ /* Let half of the packets straddle a 4K buffer boundary */
+ pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2);
- return true;
+ return testapp_validate_traffic(test);
}
-static void testapp_single_pkt(struct test_spec *test)
+static int testapp_single_pkt(struct test_spec *test)
{
- struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}};
+ struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_invalid_desc(struct test_spec *test)
+static int testapp_invalid_desc(struct test_spec *test)
{
- u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ struct xsk_umem_info *umem = test->ifobj_tx->umem;
+ u64 umem_size = umem->num_frames * umem->frame_size;
struct pkt pkts[] = {
/* Zero packet address allowed */
- {0, PKT_SIZE, 0, true},
+ {0, MIN_PKT_SIZE, 0, true},
/* Allowed packet */
- {0x1000, PKT_SIZE, 0, true},
+ {0, MIN_PKT_SIZE, 0, true},
/* Straddling the start of umem */
- {-2, PKT_SIZE, 0, false},
+ {-2, MIN_PKT_SIZE, 0, false},
/* Packet too large */
- {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
+ {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
/* Up to end of umem allowed */
- {umem_size - PKT_SIZE, PKT_SIZE, 0, true},
+ {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
/* After umem ends */
- {umem_size, PKT_SIZE, 0, false},
+ {umem_size, MIN_PKT_SIZE, 0, false},
/* Straddle the end of umem */
- {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false},
- /* Straddle a page boundrary */
- {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
- /* Straddle a 2K boundrary */
- {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true},
+ {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
+ /* Straddle a 4K boundary */
+ {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
+ /* Straddle a 2K boundary */
+ {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
/* Valid packet for synch so that something is received */
- {0x4000, PKT_SIZE, 0, true}};
+ {0, MIN_PKT_SIZE, 0, true}};
- if (test->ifobj_tx->umem->unaligned_mode) {
- /* Crossing a page boundrary allowed */
+ if (umem->unaligned_mode) {
+ /* Crossing a page boundary allowed */
pkts[7].valid = true;
}
- if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
- /* Crossing a 2K frame size boundrary not allowed */
+ if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
+ /* Crossing a 2K frame size boundary not allowed */
pkts[8].valid = false;
}
if (test->ifobj_tx->shared_umem) {
- pkts[4].addr += umem_size;
- pkts[5].addr += umem_size;
- pkts[6].addr += umem_size;
+ pkts[4].offset += umem_size;
+ pkts[5].offset += umem_size;
+ pkts[6].offset += umem_size;
}
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_xdp_drop(struct test_spec *test)
+static int testapp_xdp_drop(struct test_spec *test)
{
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
@@ -1719,10 +1679,10 @@ static void testapp_xdp_drop(struct test_spec *test)
skel_rx->maps.xsk, skel_tx->maps.xsk);
pkt_stream_receive_half(test);
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_xdp_metadata_count(struct test_spec *test)
+static int testapp_xdp_metadata_count(struct test_spec *test)
{
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
@@ -1743,10 +1703,10 @@ static void testapp_xdp_metadata_count(struct test_spec *test)
if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY))
exit_with_error(errno);
- testapp_validate_traffic(test);
+ return testapp_validate_traffic(test);
}
-static void testapp_poll_txq_tmout(struct test_spec *test)
+static int testapp_poll_txq_tmout(struct test_spec *test)
{
test_spec_set_name(test, "POLL_TXQ_FULL");
@@ -1754,14 +1714,14 @@ static void testapp_poll_txq_tmout(struct test_spec *test)
/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
test->ifobj_tx->umem->frame_size = 2048;
pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
- testapp_validate_traffic_single_thread(test, test->ifobj_tx);
+ return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
}
-static void testapp_poll_rxq_tmout(struct test_spec *test)
+static int testapp_poll_rxq_tmout(struct test_spec *test)
{
test_spec_set_name(test, "POLL_RXQ_EMPTY");
test->ifobj_rx->use_poll = true;
- testapp_validate_traffic_single_thread(test, test->ifobj_rx);
+ return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
}
static int xsk_load_xdp_programs(struct ifobject *ifobj)
@@ -1778,25 +1738,30 @@ static void xsk_unload_xdp_programs(struct ifobject *ifobj)
xsk_xdp_progs__destroy(ifobj->xdp_progs);
}
+/* Simple test */
+static bool hugepages_present(void)
+{
+ size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
+ void *bufs;
+
+ bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
+ if (bufs == MAP_FAILED)
+ return false;
+
+ mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
+ munmap(bufs, mmap_sz);
+ return true;
+}
+
static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
- const char *dst_ip, const char *src_ip, const u16 dst_port,
- const u16 src_port, thread_func_t func_ptr)
+ thread_func_t func_ptr)
{
- struct in_addr ip;
int err;
memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
- inet_aton(dst_ip, &ip);
- ifobj->dst_ip = ip.s_addr;
-
- inet_aton(src_ip, &ip);
- ifobj->src_ip = ip.s_addr;
-
- ifobj->dst_port = dst_port;
- ifobj->src_port = src_port;
-
ifobj->func_ptr = func_ptr;
err = xsk_load_xdp_programs(ifobj);
@@ -1804,94 +1769,87 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
printf("Error loading XDP program\n");
exit_with_error(err);
}
+
+ if (hugepages_present())
+ ifobj->unaligned_supp = true;
}
static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
{
+ int ret = TEST_SKIP;
+
switch (type) {
case TEST_TYPE_STATS_RX_DROPPED:
- if (mode == TEST_MODE_ZC) {
- ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
- return;
- }
- testapp_stats_rx_dropped(test);
+ ret = testapp_stats_rx_dropped(test);
break;
case TEST_TYPE_STATS_TX_INVALID_DESCS:
- testapp_stats_tx_invalid_descs(test);
+ ret = testapp_stats_tx_invalid_descs(test);
break;
case TEST_TYPE_STATS_RX_FULL:
- testapp_stats_rx_full(test);
+ ret = testapp_stats_rx_full(test);
break;
case TEST_TYPE_STATS_FILL_EMPTY:
- testapp_stats_fill_empty(test);
+ ret = testapp_stats_fill_empty(test);
break;
case TEST_TYPE_TEARDOWN:
- testapp_teardown(test);
+ ret = testapp_teardown(test);
break;
case TEST_TYPE_BIDI:
- testapp_bidi(test);
+ ret = testapp_bidi(test);
break;
case TEST_TYPE_BPF_RES:
- testapp_bpf_res(test);
+ ret = testapp_bpf_res(test);
break;
case TEST_TYPE_RUN_TO_COMPLETION:
test_spec_set_name(test, "RUN_TO_COMPLETION");
- testapp_validate_traffic(test);
+ ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
- testapp_single_pkt(test);
+ ret = testapp_single_pkt(test);
break;
case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
test->ifobj_tx->umem->frame_size = 2048;
test->ifobj_rx->umem->frame_size = 2048;
- pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE);
- testapp_validate_traffic(test);
+ pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_RX_POLL:
test->ifobj_rx->use_poll = true;
test_spec_set_name(test, "POLL_RX");
- testapp_validate_traffic(test);
+ ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_TX_POLL:
test->ifobj_tx->use_poll = true;
test_spec_set_name(test, "POLL_TX");
- testapp_validate_traffic(test);
+ ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_POLL_TXQ_TMOUT:
- testapp_poll_txq_tmout(test);
+ ret = testapp_poll_txq_tmout(test);
break;
case TEST_TYPE_POLL_RXQ_TMOUT:
- testapp_poll_rxq_tmout(test);
+ ret = testapp_poll_rxq_tmout(test);
break;
case TEST_TYPE_ALIGNED_INV_DESC:
test_spec_set_name(test, "ALIGNED_INV_DESC");
- testapp_invalid_desc(test);
+ ret = testapp_invalid_desc(test);
break;
case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
test->ifobj_tx->umem->frame_size = 2048;
test->ifobj_rx->umem->frame_size = 2048;
- testapp_invalid_desc(test);
+ ret = testapp_invalid_desc(test);
break;
case TEST_TYPE_UNALIGNED_INV_DESC:
- if (!hugepages_present(test->ifobj_tx)) {
- ksft_test_result_skip("No 2M huge pages present.\n");
- return;
- }
test_spec_set_name(test, "UNALIGNED_INV_DESC");
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
- testapp_invalid_desc(test);
+ ret = testapp_invalid_desc(test);
break;
case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: {
u64 page_size, umem_size;
- if (!hugepages_present(test->ifobj_tx)) {
- ksft_test_result_skip("No 2M huge pages present.\n");
- return;
- }
test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE");
/* Odd frame size so the UMEM doesn't end near a page boundary. */
test->ifobj_tx->umem->frame_size = 4001;
@@ -1903,29 +1861,28 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
*/
page_size = sysconf(_SC_PAGESIZE);
umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
- assert(umem_size % page_size > PKT_SIZE);
- assert(umem_size % page_size < page_size - PKT_SIZE);
- testapp_invalid_desc(test);
+ assert(umem_size % page_size > MIN_PKT_SIZE);
+ assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
+ ret = testapp_invalid_desc(test);
break;
}
case TEST_TYPE_UNALIGNED:
- if (!testapp_unaligned(test))
- return;
+ ret = testapp_unaligned(test);
break;
case TEST_TYPE_HEADROOM:
- testapp_headroom(test);
+ ret = testapp_headroom(test);
break;
case TEST_TYPE_XDP_DROP_HALF:
- testapp_xdp_drop(test);
+ ret = testapp_xdp_drop(test);
break;
case TEST_TYPE_XDP_METADATA_COUNT:
- testapp_xdp_metadata_count(test);
+ ret = testapp_xdp_metadata_count(test);
break;
default:
break;
}
- if (!test->fail)
+ if (ret == TEST_PASS)
ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
test->name);
pkt_stream_restore_default(test);
@@ -2030,14 +1987,12 @@ int main(int argc, char **argv)
modes++;
}
- init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2,
- worker_testapp_validate_rx);
- init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1,
- worker_testapp_validate_tx);
+ init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx);
+ init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx);
test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
- tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
- rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
+ tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
if (!tx_pkt_stream_default || !rx_pkt_stream_default)
exit_with_error(ENOMEM);
test.tx_pkt_stream_default = tx_pkt_stream_default;
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index c535aeab2ca3..aaf27e067640 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -30,22 +30,14 @@
#define TEST_PASS 0
#define TEST_FAILURE -1
#define TEST_CONTINUE 1
+#define TEST_SKIP 2
#define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 16
#define MAX_SOCKETS 2
#define MAX_TEST_NAME_SIZE 32
#define MAX_TEARDOWN_ITER 10
-#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
- sizeof(struct udphdr))
-#define MIN_ETH_PKT_SIZE 64
-#define ETH_FCS_SIZE 4
-#define MIN_PKT_SIZE (MIN_ETH_PKT_SIZE - ETH_FCS_SIZE)
-#define PKT_SIZE (MIN_PKT_SIZE)
-#define IP_PKT_SIZE (PKT_SIZE - sizeof(struct ethhdr))
-#define IP_PKT_VER 0x4
-#define IP_PKT_TOS 0x9
-#define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
-#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
+#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */
+#define MIN_PKT_SIZE 64
#define USLEEP_MAX 10000
#define SOCK_RECONF_CTR 10
#define BATCH_SIZE 64
@@ -57,6 +49,7 @@
#define UMEM_HEADROOM_TEST_SIZE 128
#define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
#define HUGEPAGE_SIZE (2 * 1024 * 1024)
+#define PKT_DUMP_NB_TO_PRINT 16
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
@@ -93,13 +86,13 @@ enum test_type {
TEST_TYPE_MAX
};
-static bool opt_pkt_dump;
static bool opt_verbose;
struct xsk_umem_info {
struct xsk_ring_prod fq;
struct xsk_ring_cons cq;
struct xsk_umem *umem;
+ u64 next_buffer;
u32 num_frames;
u32 frame_headroom;
void *buffer;
@@ -118,17 +111,17 @@ struct xsk_socket_info {
};
struct pkt {
- u64 addr;
+ int offset;
u32 len;
- u32 payload;
+ u32 pkt_nb;
bool valid;
};
struct pkt_stream {
u32 nb_pkts;
- u32 rx_pkt_nb;
+ u32 current_pkt_nb;
struct pkt *pkts;
- bool use_addr_for_fill;
+ u32 max_pkt_len;
};
struct ifobject;
@@ -148,11 +141,7 @@ struct ifobject {
struct bpf_program *xdp_prog;
enum test_mode mode;
int ifindex;
- u32 dst_ip;
- u32 src_ip;
u32 bind_flags;
- u16 src_port;
- u16 dst_port;
bool tx_on;
bool rx_on;
bool use_poll;
@@ -161,6 +150,7 @@ struct ifobject {
bool release_rx;
bool shared_umem;
bool use_metadata;
+ bool unaligned_supp;
u8 dst_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
};
@@ -184,7 +174,6 @@ struct test_spec {
pthread_barrier_t barr;
pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
-pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER;
int pkts_in_flight;
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
index 5cdd22048ba7..862e947e17c7 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
@@ -53,7 +53,6 @@ bond_test_enslave_type_change()
# restore ARPHRD_ETHER type by enslaving such device
ip link set dev "$devbond2" master "$devbond0"
check_err $? "could not enslave $devbond2 to $devbond0"
- ip link set dev "$devbond1" nomaster
bond_check_flags "$devbond0"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/egress_vid_classification.sh b/tools/testing/selftests/drivers/net/mlxsw/egress_vid_classification.sh
index 0cf9e47e3209..a5c2aec52898 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/egress_vid_classification.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/egress_vid_classification.sh
@@ -16,10 +16,9 @@
# +----------------|--+ +--|-----------------+
# | |
# +----------------|-------------------------|-----------------+
-# | SW | | |
+# | SW $swp1 + + $swp2 |
+# | | | |
# | +--------------|-------------------------|---------------+ |
-# | | $swp1 + + $swp2 | |
-# | | | | | |
# | | $swp1.10 + + $swp2.10 | |
# | | | |
# | | br0 | |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/extack.sh b/tools/testing/selftests/drivers/net/mlxsw/extack.sh
index 7a0a99c1d22f..6fd422d38fe8 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/extack.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/extack.sh
@@ -35,7 +35,9 @@ netdev_pre_up_test()
{
RET=0
- ip link add name br1 up type bridge vlan_filtering 0 mcast_snooping 0
+ ip link add name br1 type bridge vlan_filtering 0 mcast_snooping 0
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
ip link add name vx1 up type vxlan id 1000 \
local 192.0.2.17 remote 192.0.2.18 \
dstport 4789 nolearning noudpcsum tos inherit ttl 100
@@ -46,7 +48,9 @@ netdev_pre_up_test()
ip link set dev $swp1 master br1
check_err $?
- ip link add name br2 up type bridge vlan_filtering 0 mcast_snooping 0
+ ip link add name br2 type bridge vlan_filtering 0 mcast_snooping 0
+ ip link set dev br2 addrgenmode none
+ ip link set dev br2 up
ip link add name vx2 up type vxlan id 2000 \
local 192.0.2.17 remote 192.0.2.18 \
dstport 4789 nolearning noudpcsum tos inherit ttl 100
@@ -81,7 +85,9 @@ vxlan_vlan_add_test()
{
RET=0
- ip link add name br1 up type bridge vlan_filtering 1 mcast_snooping 0
+ ip link add name br1 type bridge vlan_filtering 1 mcast_snooping 0
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
# Unsupported configuration: mlxsw demands VXLAN with "noudpcsum".
ip link add name vx1 up type vxlan id 1000 \
@@ -117,7 +123,9 @@ vxlan_bridge_create_test()
dstport 4789 tos inherit ttl 100
# Test with VLAN-aware bridge.
- ip link add name br1 up type bridge vlan_filtering 1 mcast_snooping 0
+ ip link add name br1 type bridge vlan_filtering 1 mcast_snooping 0
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
ip link set dev vx1 master br1
@@ -142,8 +150,12 @@ bridge_create_test()
{
RET=0
- ip link add name br1 up type bridge vlan_filtering 1
- ip link add name br2 up type bridge vlan_filtering 1
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
+ ip link add name br2 type bridge vlan_filtering 1
+ ip link set dev br2 addrgenmode none
+ ip link set dev br2 up
ip link set dev $swp1 master br1
check_err $?
diff --git a/tools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1d.sh b/tools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1d.sh
index df2b09966886..7d7f862c809c 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1d.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1d.sh
@@ -15,10 +15,9 @@
# +----------------|--+ +--|-----------------+
# | |
# +----------------|-------------------------|-----------------+
-# | SW | | |
+# | SW $swp1 + + $swp2 |
+# | | | |
# | +--------------|-------------------------|---------------+ |
-# | | $swp1 + + $swp2 | |
-# | | | | | |
# | | $swp1.10 + + $swp2.10 | |
# | | | |
# | | br0 | |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
index e00435753008..e5589e2fca85 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
@@ -165,6 +165,7 @@ mirror_gre_setup_prepare()
simple_if_init $h3
ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 addrgenmode none
ip link set dev br1 up
ip link set dev $swp1 master br1
diff --git a/tools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh b/tools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh
index f02d83e94576..fca0e1e642c6 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh
@@ -83,7 +83,8 @@ h2_destroy()
switch_create()
{
- ip link add name br0 type bridge mcast_snooping 0
+ ip link add name br0 address $(mac_get $swp1) \
+ type bridge mcast_snooping 0
ip link set dev br0 up
ip link set dev $swp1 master br0
diff --git a/tools/testing/selftests/drivers/net/mlxsw/q_in_q_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/q_in_q_veto.sh
index 7edaed8eb86a..00d55b0e98c1 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/q_in_q_veto.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/q_in_q_veto.sh
@@ -48,6 +48,7 @@ create_vlan_upper_on_top_of_bridge()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_protocol $bridge_proto vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link set dev $swp1 master br0
@@ -88,6 +89,7 @@ create_8021ad_vlan_upper_on_top_bridge_port()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev $swp1 master br0
ip link set dev br0 up
@@ -155,6 +157,7 @@ create_vlan_upper_on_top_front_panel_enslaved_to_8021ad_bridge()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link set dev $swp1 master br0
@@ -177,6 +180,7 @@ create_vlan_upper_on_top_lag_enslaved_to_8021ad_bridge()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link add name bond1 type bond mode 802.3ad
@@ -203,6 +207,7 @@ enslave_front_panel_with_vlan_upper_to_8021ad_bridge()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link add name $swp1.100 link $swp1 type vlan id 100
@@ -225,6 +230,7 @@ enslave_lag_with_vlan_upper_to_8021ad_bridge()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link add name bond1 type bond mode 802.3ad
@@ -252,6 +258,7 @@ add_ip_address_to_8021ad_bridge()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link set dev $swp1 master br0
@@ -273,6 +280,7 @@ switch_bridge_protocol_from_8021q_to_8021ad()
ip link add dev br0 type bridge vlan_filtering 1 \
vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link set dev $swp1 master br0
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
index 87c41f5727c9..914c63d6318a 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -65,6 +65,7 @@ h2_destroy()
switch_create()
{
ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 addrgenmode none
ip link set dev br1 up
ip link set dev $swp1 master br1
ip link set dev $swp1 up
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
index 690d8daa71b4..fee74f215cec 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
@@ -138,11 +138,15 @@ switch_create()
vlan_create $swp3 111
vlan_create $swp3 222
- ip link add name br111 up type bridge vlan_filtering 0
+ ip link add name br111 type bridge vlan_filtering 0
+ ip link set dev br111 addrgenmode none
+ ip link set dev br111 up
ip link set dev $swp1.111 master br111
ip link set dev $swp3.111 master br111
- ip link add name br222 up type bridge vlan_filtering 0
+ ip link add name br222 type bridge vlan_filtering 0
+ ip link set dev br222 addrgenmode none
+ ip link set dev br222 up
ip link set dev $swp2.222 master br222
ip link set dev $swp3.222 master br222
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index c8e55fa91660..6d892de43fa8 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -135,11 +135,13 @@ switch_create()
prio bands 8 priomap 7 7 7 7 7 7 7 7
ip link add name br1 type bridge vlan_filtering 0
+ ip link set dev br1 addrgenmode none
ip link set dev br1 up
ip link set dev $swp1 master br1
ip link set dev $swp3 master br1
ip link add name br111 type bridge vlan_filtering 0
+ ip link set dev br111 addrgenmode none
ip link set dev br111 up
ip link set dev $swp2.111 master br111
ip link set dev $swp3.111 master br111
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh
index f0443b1b05b9..60753d46a2d4 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh
@@ -34,6 +34,7 @@ create_vxlan_on_top_of_8021ad_bridge()
ip link add dev br0 type bridge vlan_filtering 1 vlan_protocol 802.1ad \
vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br0 addrgenmode none
ip link set dev br0 up
ip link add name vx100 type vxlan id 1000 local 192.0.2.17 dstport \
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
index 99a332b712f0..4687b0a7dffb 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -444,8 +444,12 @@ offload_indication_setup_create()
{
# Create a simple setup with two bridges, each with a VxLAN device
# and one local port
- ip link add name br0 up type bridge mcast_snooping 0
- ip link add name br1 up type bridge mcast_snooping 0
+ ip link add name br0 type bridge mcast_snooping 0
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
+ ip link add name br1 type bridge mcast_snooping 0
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
ip link set dev $swp1 master br0
ip link set dev $swp2 master br1
@@ -646,8 +650,12 @@ offload_indication_decap_route_test()
RET=0
- ip link add name br0 up type bridge mcast_snooping 0
- ip link add name br1 up type bridge mcast_snooping 0
+ ip link add name br0 type bridge mcast_snooping 0
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
+ ip link add name br1 type bridge mcast_snooping 0
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
ip link set dev $swp1 master br0
ip link set dev $swp2 master br1
ip link set dev vxlan0 master br0
@@ -780,7 +788,9 @@ __offload_indication_join_vxlan_first()
offload_indication_join_vxlan_first()
{
- ip link add dev br0 up type bridge mcast_snooping 0
+ ip link add dev br0 type bridge mcast_snooping 0
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
ip link add name vxlan0 up type vxlan id 10 nolearning $UDPCSUM_FLAFS \
ttl 20 tos inherit local $LOCAL_IP_1 dstport 4789
@@ -815,7 +825,9 @@ __offload_indication_join_vxlan_last()
offload_indication_join_vxlan_last()
{
- ip link add dev br0 up type bridge mcast_snooping 0
+ ip link add dev br0 type bridge mcast_snooping 0
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
ip link add name vxlan0 up type vxlan id 10 nolearning $UDPCSUM_FLAFS \
ttl 20 tos inherit local $LOCAL_IP_1 dstport 4789
@@ -842,6 +854,7 @@ sanitization_vlan_aware_test()
RET=0
ip link add dev br0 type bridge mcast_snooping 0 vlan_filtering 1
+ ip link set dev br0 addrgenmode none
ip link add name vxlan10 up master br0 type vxlan id 10 nolearning \
$UDPCSUM_FLAFS ttl 20 tos inherit local $LOCAL_IP_1 dstport 4789
@@ -915,8 +928,10 @@ offload_indication_vlan_aware_setup_create()
{
# Create a simple setup with two VxLAN devices and a single VLAN-aware
# bridge
- ip link add name br0 up type bridge mcast_snooping 0 vlan_filtering 1 \
+ ip link add name br0 type bridge mcast_snooping 0 vlan_filtering 1 \
vlan_default_pvid 0
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
ip link set dev $swp1 master br0
@@ -1060,8 +1075,10 @@ offload_indication_vlan_aware_decap_route_test()
offload_indication_vlan_aware_join_vxlan_first()
{
- ip link add dev br0 up type bridge mcast_snooping 0 \
+ ip link add dev br0 type bridge mcast_snooping 0 \
vlan_filtering 1 vlan_default_pvid 1
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
ip link add name vxlan0 up type vxlan id 10 nolearning $UDPCSUM_FLAFS \
ttl 20 tos inherit local $LOCAL_IP_1 dstport 4789
@@ -1073,8 +1090,10 @@ offload_indication_vlan_aware_join_vxlan_first()
offload_indication_vlan_aware_join_vxlan_last()
{
- ip link add dev br0 up type bridge mcast_snooping 0 \
+ ip link add dev br0 type bridge mcast_snooping 0 \
vlan_filtering 1 vlan_default_pvid 1
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
ip link add name vxlan0 up type vxlan id 10 nolearning $UDPCSUM_FLAFS \
ttl 20 tos inherit local $LOCAL_IP_1 dstport 4789
@@ -1091,8 +1110,10 @@ offload_indication_vlan_aware_l3vni_test()
RET=0
sysctl_set net.ipv6.conf.default.disable_ipv6 1
- ip link add dev br0 up type bridge mcast_snooping 0 \
+ ip link add dev br0 type bridge mcast_snooping 0 \
vlan_filtering 1 vlan_default_pvid 0
+ ip link set dev br0 addrgenmode none
+ ip link set dev br0 up
ip link add name vxlan0 up type vxlan id 10 nolearning $UDPCSUM_FLAFS \
ttl 20 tos inherit local $LOCAL_IP_1 dstport 4789
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index f27a7338b60e..501854a89cc0 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -29,6 +29,7 @@ reuseport_bpf_numa
reuseport_dualstack
rxtimestamp
sctp_hello
+scm_pidfd
sk_bind_sendto_listen
sk_connect_zero_addr
socket
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c12df57d5539..7f3ab2a93ed6 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -84,6 +84,7 @@ TEST_GEN_FILES += ip_local_port_range
TEST_GEN_FILES += bind_wildcard
TEST_PROGS += test_vxlan_mdb.sh
TEST_PROGS += test_bridge_neigh_suppress.sh
+TEST_PROGS += test_vxlan_nolocalbypass.sh
TEST_FILES := settings
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index 1e4b397cece6..221c387a7d7f 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,3 +1,4 @@
-TEST_GEN_PROGS := diag_uid test_unix_oob unix_connect
+CFLAGS += $(KHDR_INCLUDES)
+TEST_GEN_PROGS := diag_uid test_unix_oob unix_connect scm_pidfd
include ../../lib.mk
diff --git a/tools/testing/selftests/net/af_unix/scm_pidfd.c b/tools/testing/selftests/net/af_unix/scm_pidfd.c
new file mode 100644
index 000000000000..a86222143d79
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/scm_pidfd.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+#define _GNU_SOURCE
+#include <error.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+#include <linux/socket.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/un.h>
+#include <sys/signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "../../kselftest_harness.h"
+
+#define clean_errno() (errno == 0 ? "None" : strerror(errno))
+#define log_err(MSG, ...) \
+ fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", __FILE__, __LINE__, \
+ clean_errno(), ##__VA_ARGS__)
+
+#ifndef SCM_PIDFD
+#define SCM_PIDFD 0x04
+#endif
+
+static void child_die()
+{
+ exit(1);
+}
+
+static int safe_int(const char *numstr, int *converted)
+{
+ char *err = NULL;
+ long sli;
+
+ errno = 0;
+ sli = strtol(numstr, &err, 0);
+ if (errno == ERANGE && (sli == LONG_MAX || sli == LONG_MIN))
+ return -ERANGE;
+
+ if (errno != 0 && sli == 0)
+ return -EINVAL;
+
+ if (err == numstr || *err != '\0')
+ return -EINVAL;
+
+ if (sli > INT_MAX || sli < INT_MIN)
+ return -ERANGE;
+
+ *converted = (int)sli;
+ return 0;
+}
+
+static int char_left_gc(const char *buffer, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (buffer[i] == ' ' || buffer[i] == '\t')
+ continue;
+
+ return i;
+ }
+
+ return 0;
+}
+
+static int char_right_gc(const char *buffer, size_t len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (buffer[i] == ' ' || buffer[i] == '\t' ||
+ buffer[i] == '\n' || buffer[i] == '\0')
+ continue;
+
+ return i + 1;
+ }
+
+ return 0;
+}
+
+static char *trim_whitespace_in_place(char *buffer)
+{
+ buffer += char_left_gc(buffer, strlen(buffer));
+ buffer[char_right_gc(buffer, strlen(buffer))] = '\0';
+ return buffer;
+}
+
+/* borrowed (with all helpers) from pidfd/pidfd_open_test.c */
+static pid_t get_pid_from_fdinfo_file(int pidfd, const char *key, size_t keylen)
+{
+ int ret;
+ char path[512];
+ FILE *f;
+ size_t n = 0;
+ pid_t result = -1;
+ char *line = NULL;
+
+ snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", pidfd);
+
+ f = fopen(path, "re");
+ if (!f)
+ return -1;
+
+ while (getline(&line, &n, f) != -1) {
+ char *numstr;
+
+ if (strncmp(line, key, keylen))
+ continue;
+
+ numstr = trim_whitespace_in_place(line + 4);
+ ret = safe_int(numstr, &result);
+ if (ret < 0)
+ goto out;
+
+ break;
+ }
+
+out:
+ free(line);
+ fclose(f);
+ return result;
+}
+
+static int cmsg_check(int fd)
+{
+ struct msghdr msg = { 0 };
+ struct cmsghdr *cmsg;
+ struct iovec iov;
+ struct ucred *ucred = NULL;
+ int data = 0;
+ char control[CMSG_SPACE(sizeof(struct ucred)) +
+ CMSG_SPACE(sizeof(int))] = { 0 };
+ int *pidfd = NULL;
+ pid_t parent_pid;
+ int err;
+
+ iov.iov_base = &data;
+ iov.iov_len = sizeof(data);
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = control;
+ msg.msg_controllen = sizeof(control);
+
+ err = recvmsg(fd, &msg, 0);
+ if (err < 0) {
+ log_err("recvmsg");
+ return 1;
+ }
+
+ if (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
+ log_err("recvmsg: truncated");
+ return 1;
+ }
+
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_PIDFD) {
+ if (cmsg->cmsg_len < sizeof(*pidfd)) {
+ log_err("CMSG parse: SCM_PIDFD wrong len");
+ return 1;
+ }
+
+ pidfd = (void *)CMSG_DATA(cmsg);
+ }
+
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_CREDENTIALS) {
+ if (cmsg->cmsg_len < sizeof(*ucred)) {
+ log_err("CMSG parse: SCM_CREDENTIALS wrong len");
+ return 1;
+ }
+
+ ucred = (void *)CMSG_DATA(cmsg);
+ }
+ }
+
+ /* send(pfd, "x", sizeof(char), 0) */
+ if (data != 'x') {
+ log_err("recvmsg: data corruption");
+ return 1;
+ }
+
+ if (!pidfd) {
+ log_err("CMSG parse: SCM_PIDFD not found");
+ return 1;
+ }
+
+ if (!ucred) {
+ log_err("CMSG parse: SCM_CREDENTIALS not found");
+ return 1;
+ }
+
+ /* pidfd from SCM_PIDFD should point to the parent process PID */
+ parent_pid =
+ get_pid_from_fdinfo_file(*pidfd, "Pid:", sizeof("Pid:") - 1);
+ if (parent_pid != getppid()) {
+ log_err("wrong SCM_PIDFD %d != %d", parent_pid, getppid());
+ return 1;
+ }
+
+ return 0;
+}
+
+struct sock_addr {
+ char sock_name[32];
+ struct sockaddr_un listen_addr;
+ socklen_t addrlen;
+};
+
+FIXTURE(scm_pidfd)
+{
+ int server;
+ pid_t client_pid;
+ int startup_pipe[2];
+ struct sock_addr server_addr;
+ struct sock_addr *client_addr;
+};
+
+FIXTURE_VARIANT(scm_pidfd)
+{
+ int type;
+ bool abstract;
+};
+
+FIXTURE_VARIANT_ADD(scm_pidfd, stream_pathname)
+{
+ .type = SOCK_STREAM,
+ .abstract = 0,
+};
+
+FIXTURE_VARIANT_ADD(scm_pidfd, stream_abstract)
+{
+ .type = SOCK_STREAM,
+ .abstract = 1,
+};
+
+FIXTURE_VARIANT_ADD(scm_pidfd, dgram_pathname)
+{
+ .type = SOCK_DGRAM,
+ .abstract = 0,
+};
+
+FIXTURE_VARIANT_ADD(scm_pidfd, dgram_abstract)
+{
+ .type = SOCK_DGRAM,
+ .abstract = 1,
+};
+
+FIXTURE_SETUP(scm_pidfd)
+{
+ self->client_addr = mmap(NULL, sizeof(*self->client_addr), PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(MAP_FAILED, self->client_addr);
+}
+
+FIXTURE_TEARDOWN(scm_pidfd)
+{
+ close(self->server);
+
+ kill(self->client_pid, SIGKILL);
+ waitpid(self->client_pid, NULL, 0);
+
+ if (!variant->abstract) {
+ unlink(self->server_addr.sock_name);
+ unlink(self->client_addr->sock_name);
+ }
+}
+
+static void fill_sockaddr(struct sock_addr *addr, bool abstract)
+{
+ char *sun_path_buf = (char *)&addr->listen_addr.sun_path;
+
+ addr->listen_addr.sun_family = AF_UNIX;
+ addr->addrlen = offsetof(struct sockaddr_un, sun_path);
+ snprintf(addr->sock_name, sizeof(addr->sock_name), "scm_pidfd_%d", getpid());
+ addr->addrlen += strlen(addr->sock_name);
+ if (abstract) {
+ *sun_path_buf = '\0';
+ addr->addrlen++;
+ sun_path_buf++;
+ } else {
+ unlink(addr->sock_name);
+ }
+ memcpy(sun_path_buf, addr->sock_name, strlen(addr->sock_name));
+}
+
+static void client(FIXTURE_DATA(scm_pidfd) *self,
+ const FIXTURE_VARIANT(scm_pidfd) *variant)
+{
+ int err;
+ int cfd;
+ socklen_t len;
+ struct ucred peer_cred;
+ int peer_pidfd;
+ pid_t peer_pid;
+ int on = 0;
+
+ cfd = socket(AF_UNIX, variant->type, 0);
+ if (cfd < 0) {
+ log_err("socket");
+ child_die();
+ }
+
+ if (variant->type == SOCK_DGRAM) {
+ fill_sockaddr(self->client_addr, variant->abstract);
+
+ if (bind(cfd, (struct sockaddr *)&self->client_addr->listen_addr, self->client_addr->addrlen)) {
+ log_err("bind");
+ child_die();
+ }
+ }
+
+ if (connect(cfd, (struct sockaddr *)&self->server_addr.listen_addr,
+ self->server_addr.addrlen) != 0) {
+ log_err("connect");
+ child_die();
+ }
+
+ on = 1;
+ if (setsockopt(cfd, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on))) {
+ log_err("Failed to set SO_PASSCRED");
+ child_die();
+ }
+
+ if (setsockopt(cfd, SOL_SOCKET, SO_PASSPIDFD, &on, sizeof(on))) {
+ log_err("Failed to set SO_PASSPIDFD");
+ child_die();
+ }
+
+ close(self->startup_pipe[1]);
+
+ if (cmsg_check(cfd)) {
+ log_err("cmsg_check failed");
+ child_die();
+ }
+
+ /* skip further for SOCK_DGRAM as it's not applicable */
+ if (variant->type == SOCK_DGRAM)
+ return;
+
+ len = sizeof(peer_cred);
+ if (getsockopt(cfd, SOL_SOCKET, SO_PEERCRED, &peer_cred, &len)) {
+ log_err("Failed to get SO_PEERCRED");
+ child_die();
+ }
+
+ len = sizeof(peer_pidfd);
+ if (getsockopt(cfd, SOL_SOCKET, SO_PEERPIDFD, &peer_pidfd, &len)) {
+ log_err("Failed to get SO_PEERPIDFD");
+ child_die();
+ }
+
+ /* pid from SO_PEERCRED should point to the parent process PID */
+ if (peer_cred.pid != getppid()) {
+ log_err("peer_cred.pid != getppid(): %d != %d", peer_cred.pid, getppid());
+ child_die();
+ }
+
+ peer_pid = get_pid_from_fdinfo_file(peer_pidfd,
+ "Pid:", sizeof("Pid:") - 1);
+ if (peer_pid != peer_cred.pid) {
+ log_err("peer_pid != peer_cred.pid: %d != %d", peer_pid, peer_cred.pid);
+ child_die();
+ }
+}
+
+TEST_F(scm_pidfd, test)
+{
+ int err;
+ int pfd;
+ int child_status = 0;
+
+ self->server = socket(AF_UNIX, variant->type, 0);
+ ASSERT_NE(-1, self->server);
+
+ fill_sockaddr(&self->server_addr, variant->abstract);
+
+ err = bind(self->server, (struct sockaddr *)&self->server_addr.listen_addr, self->server_addr.addrlen);
+ ASSERT_EQ(0, err);
+
+ if (variant->type == SOCK_STREAM) {
+ err = listen(self->server, 1);
+ ASSERT_EQ(0, err);
+ }
+
+ err = pipe(self->startup_pipe);
+ ASSERT_NE(-1, err);
+
+ self->client_pid = fork();
+ ASSERT_NE(-1, self->client_pid);
+ if (self->client_pid == 0) {
+ close(self->server);
+ close(self->startup_pipe[0]);
+ client(self, variant);
+ exit(0);
+ }
+ close(self->startup_pipe[1]);
+
+ if (variant->type == SOCK_STREAM) {
+ pfd = accept(self->server, NULL, NULL);
+ ASSERT_NE(-1, pfd);
+ } else {
+ pfd = self->server;
+ }
+
+ /* wait until the child arrives at checkpoint */
+ read(self->startup_pipe[0], &err, sizeof(int));
+ close(self->startup_pipe[0]);
+
+ if (variant->type == SOCK_DGRAM) {
+ err = sendto(pfd, "x", sizeof(char), 0, (struct sockaddr *)&self->client_addr->listen_addr, self->client_addr->addrlen);
+ ASSERT_NE(-1, err);
+ } else {
+ err = send(pfd, "x", sizeof(char), 0);
+ ASSERT_NE(-1, err);
+ }
+
+ close(pfd);
+ waitpid(self->client_pid, &child_status, 0);
+ ASSERT_EQ(0, WIFEXITED(child_status) ? WEXITSTATUS(child_status) : 1);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index ee6880ac3e5e..d32a14ba069a 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -592,6 +592,20 @@ ipv4_ping_novrf()
done
#
+ # out, but don't use gateway if peer is not on link
+ #
+ a=${NSB_IP}
+ log_start
+ run_cmd ping -c 1 -w 1 -r ${a}
+ log_test_addr ${a} $? 0 "ping out (don't route), peer on link"
+
+ a=${NSB_LO_IP}
+ log_start
+ show_hint "Fails since peer is not on link"
+ run_cmd ping -c 1 -w 1 -r ${a}
+ log_test_addr ${a} $? 1 "ping out (don't route), peer not on link"
+
+ #
# in
#
for a in ${NSA_IP} ${NSA_LO_IP}
@@ -1105,6 +1119,59 @@ test_ipv4_md5_vrf__global_server__bind_ifindex0()
set_sysctl net.ipv4.tcp_l3mdev_accept="$old_tcp_l3mdev_accept"
}
+ipv4_tcp_dontroute()
+{
+ local syncookies=$1
+ local nsa_syncookies
+ local nsb_syncookies
+ local a
+
+ #
+ # Link local connection tests (SO_DONTROUTE).
+ # Connections should succeed only when the remote IP address is
+ # on link (doesn't need to be routed through a gateway).
+ #
+
+ nsa_syncookies=$(ip netns exec "${NSA}" sysctl -n net.ipv4.tcp_syncookies)
+ nsb_syncookies=$(ip netns exec "${NSB}" sysctl -n net.ipv4.tcp_syncookies)
+ ip netns exec "${NSA}" sysctl -wq net.ipv4.tcp_syncookies=${syncookies}
+ ip netns exec "${NSB}" sysctl -wq net.ipv4.tcp_syncookies=${syncookies}
+
+ # Test with eth1 address (on link).
+
+ a=${NSB_IP}
+ log_start
+ do_run_cmd nettest -B -N "${NSA}" -O "${NSB}" -r ${a} --client-dontroute
+ log_test_addr ${a} $? 0 "SO_DONTROUTE client, syncookies=${syncookies}"
+
+ a=${NSB_IP}
+ log_start
+ do_run_cmd nettest -B -N "${NSA}" -O "${NSB}" -r ${a} --server-dontroute
+ log_test_addr ${a} $? 0 "SO_DONTROUTE server, syncookies=${syncookies}"
+
+ # Test with loopback address (routed).
+ #
+ # The client would use the eth1 address as source IP by default.
+ # Therefore, we need to use the -c option here, to force the use of the
+ # routed (loopback) address as source IP (so that the server will try
+ # to respond to a routed address and not a link local one).
+
+ a=${NSB_LO_IP}
+ log_start
+ show_hint "Should fail 'Network is unreachable' since server is not on link"
+ do_run_cmd nettest -B -N "${NSA}" -O "${NSB}" -c "${NSA_LO_IP}" -r ${a} --client-dontroute
+ log_test_addr ${a} $? 1 "SO_DONTROUTE client, syncookies=${syncookies}"
+
+ a=${NSB_LO_IP}
+ log_start
+ show_hint "Should timeout since server cannot respond (client is not on link)"
+ do_run_cmd nettest -B -N "${NSA}" -O "${NSB}" -c "${NSA_LO_IP}" -r ${a} --server-dontroute
+ log_test_addr ${a} $? 2 "SO_DONTROUTE server, syncookies=${syncookies}"
+
+ ip netns exec "${NSB}" sysctl -wq net.ipv4.tcp_syncookies=${nsb_syncookies}
+ ip netns exec "${NSA}" sysctl -wq net.ipv4.tcp_syncookies=${nsa_syncookies}
+}
+
ipv4_tcp_novrf()
{
local a
@@ -1224,6 +1291,9 @@ ipv4_tcp_novrf()
log_test_addr ${a} $? 1 "No server, device client, local conn"
[ "$fips_enabled" = "1" ] || ipv4_tcp_md5_novrf
+
+ ipv4_tcp_dontroute 0
+ ipv4_tcp_dontroute 2
}
ipv4_tcp_vrf()
@@ -1594,6 +1664,23 @@ ipv4_udp_novrf()
log_start
run_cmd nettest -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 2 "No server, device client, local conn"
+
+ #
+ # Link local connection tests (SO_DONTROUTE).
+ # Connections should succeed only when the remote IP address is
+ # on link (doesn't need to be routed through a gateway).
+ #
+
+ a=${NSB_IP}
+ log_start
+ do_run_cmd nettest -B -D -N "${NSA}" -O "${NSB}" -r ${a} --client-dontroute
+ log_test_addr ${a} $? 0 "SO_DONTROUTE client"
+
+ a=${NSB_LO_IP}
+ log_start
+ show_hint "Should fail 'Network is unreachable' since server is not on link"
+ do_run_cmd nettest -B -D -N "${NSA}" -O "${NSB}" -r ${a} --client-dontroute
+ log_test_addr ${a} $? 1 "SO_DONTROUTE client"
}
ipv4_udp_vrf()
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index a474c60fe348..770efbe24f0d 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -83,6 +83,8 @@ TEST_PROGS = bridge_igmp.sh \
tc_chains.sh \
tc_flower_router.sh \
tc_flower.sh \
+ tc_flower_l2_miss.sh \
+ tc_flower_cfm.sh \
tc_mpls_l2vpn.sh \
tc_police.sh \
tc_shblocks.sh \
diff --git a/tools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh b/tools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh
index 5148d97a5df8..68ee92df3e07 100755
--- a/tools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh
+++ b/tools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh
@@ -132,6 +132,7 @@ switch_create()
#### BR1 ####
ip link add name br1 type bridge vlan_filtering 1 \
vlan_protocol 802.1ad vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br1 addrgenmode none
# Make sure the bridge uses the MAC address of the local port and not
# that of the VxLAN's device.
ip link set dev br1 address $(mac_get $swp1)
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
index 360ca133bead..6c257ec03756 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
@@ -98,6 +98,7 @@ switch_create()
# Bridge between H1 and H2.
ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 addrgenmode none
ip link set dev br1 up
ip link set dev $swp1 master br1
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
index aec752a22e9e..04fd14b0a9b7 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
@@ -65,7 +65,8 @@ setup_prepare()
vrf_prepare
mirror_gre_topo_create
- ip link add name br2 type bridge vlan_filtering 0
+ ip link add name br2 address $(mac_get $swp3) \
+ type bridge vlan_filtering 0
ip link set dev br2 up
ip link set dev $swp3 master br2
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
index 1b27f2b0f196..f35313c76fac 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
@@ -35,7 +35,8 @@ setup_prepare()
vrf_prepare
mirror_gre_topo_create
- ip link add name br2 type bridge vlan_filtering 0
+ ip link add name br2 address $(mac_get $swp3) \
+ type bridge vlan_filtering 0
ip link set dev br2 up
vlan_create $swp3 555
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index 91e431cd919e..c53148b1dc63 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -140,7 +140,8 @@ switch_create()
ip link set dev $swp3 up
ip link set dev $swp4 up
- ip link add name br1 type bridge vlan_filtering 1
+ ip link add name br1 address $(mac_get $swp3) \
+ type bridge vlan_filtering 1
team_create lag loadbalance $swp3 $swp4
ip link set dev lag master br1
diff --git a/tools/testing/selftests/net/forwarding/mirror_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_topo_lib.sh
index 04979e5962e7..bb1adbb7b98a 100644
--- a/tools/testing/selftests/net/forwarding/mirror_topo_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_topo_lib.sh
@@ -60,6 +60,7 @@ mirror_topo_switch_create()
ip link set dev $swp3 up
ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 addrgenmode none
ip link set dev br1 up
ip link set dev $swp1 master br1
diff --git a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
index 64fbd211d907..af008fbf2725 100755
--- a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
+++ b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
@@ -60,7 +60,9 @@ h2_destroy()
switch_create()
{
- ip link add name br1 up type bridge vlan_filtering 1
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
ip link set dev $swp1 master br1
ip link set dev $swp1 up
ip link set dev $swp2 master br1
diff --git a/tools/testing/selftests/net/forwarding/q_in_vni.sh b/tools/testing/selftests/net/forwarding/q_in_vni.sh
index 4c50c0234bce..798b13525c02 100755
--- a/tools/testing/selftests/net/forwarding/q_in_vni.sh
+++ b/tools/testing/selftests/net/forwarding/q_in_vni.sh
@@ -137,6 +137,7 @@ switch_create()
{
ip link add name br1 type bridge vlan_filtering 1 vlan_protocol 802.1ad \
vlan_default_pvid 0 mcast_snooping 0
+ ip link set dev br1 addrgenmode none
# Make sure the bridge uses the MAC address of the local port and not
# that of the VxLAN's device.
ip link set dev br1 address $(mac_get $swp1)
diff --git a/tools/testing/selftests/net/forwarding/router_bridge.sh b/tools/testing/selftests/net/forwarding/router_bridge.sh
index ebc596a272f7..8ce0aed54ece 100755
--- a/tools/testing/selftests/net/forwarding/router_bridge.sh
+++ b/tools/testing/selftests/net/forwarding/router_bridge.sh
@@ -38,7 +38,8 @@ h2_destroy()
router_create()
{
- ip link add name br1 type bridge vlan_filtering 1
+ ip link add name br1 address $(mac_get $swp1) \
+ type bridge vlan_filtering 1
ip link set dev br1 up
ip link set dev $swp1 master br1
diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
index fa6a88c50750..de2b2d5480dd 100755
--- a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
@@ -1,6 +1,28 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# +------------------------+ +----------------------+
+# | H1 (vrf) | | H2 (vrf) |
+# | + $h1.555 | | + $h2 |
+# | | 192.0.2.1/28 | | | 192.0.2.130/28 |
+# | | 2001:db8:1::1/64 | | | 2001:db8:2::2/64 |
+# | | | | | |
+# | + $h1 | | | |
+# +----|-------------------+ +--|-------------------+
+# | |
+# +----|--------------------------------------------------|-------------------+
+# | SW | | |
+# | +--|-------------------------------+ + $swp2 |
+# | | + $swp1 | 192.0.2.129/28 |
+# | | vid 555 | 2001:db8:2::1/64 |
+# | | | |
+# | | + BR1 (802.1q) | |
+# | | vid 555 pvid untagged | |
+# | | 192.0.2.2/28 | |
+# | | 2001:db8:1::2/64 | |
+# | +----------------------------------+ |
+# +---------------------------------------------------------------------------+
+
ALL_TESTS="
ping_ipv4
ping_ipv6
@@ -41,7 +63,7 @@ h2_destroy()
router_create()
{
- ip link add name br1 type bridge vlan_filtering 1
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0
ip link set dev br1 up
ip link set dev $swp1 master br1
diff --git a/tools/testing/selftests/net/forwarding/skbedit_priority.sh b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
index bde11dc27873..3dd5fcbd3eaa 100755
--- a/tools/testing/selftests/net/forwarding/skbedit_priority.sh
+++ b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
@@ -54,7 +54,9 @@ h2_destroy()
switch_create()
{
- ip link add name br1 up type bridge vlan_filtering 1
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 addrgenmode none
+ ip link set dev br1 up
ip link set dev $swp1 master br1
ip link set dev $swp1 up
ip link set dev $swp2 master br1
diff --git a/tools/testing/selftests/net/forwarding/tc_flower_cfm.sh b/tools/testing/selftests/net/forwarding/tc_flower_cfm.sh
new file mode 100755
index 000000000000..3ca20df952eb
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/tc_flower_cfm.sh
@@ -0,0 +1,206 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="match_cfm_opcode match_cfm_level match_cfm_level_and_opcode"
+NUM_NETIFS=2
+source tc_common.sh
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ tc qdisc add dev $h2 clsact
+}
+
+h2_destroy()
+{
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2
+}
+
+u8_to_hex()
+{
+ local u8=$1; shift
+
+ printf "%02x" $u8
+}
+
+generate_cfm_hdr()
+{
+ local mdl=$1; shift
+ local op=$1; shift
+ local flags=$1; shift
+ local tlv_offset=$1; shift
+
+ local cfm_hdr=$(:
+ )"$(u8_to_hex $((mdl << 5))):"$( : MD level and Version
+ )"$(u8_to_hex $op):"$( : OpCode
+ )"$(u8_to_hex $flags):"$( : Flags
+ )"$(u8_to_hex $tlv_offset)"$( : TLV offset
+ )
+
+ echo $cfm_hdr
+}
+
+match_cfm_opcode()
+{
+ local ethtype="89 02"; readonly ethtype
+ RET=0
+
+ tc filter add dev $h2 ingress protocol cfm pref 1 handle 101 \
+ flower cfm op 47 action drop
+ tc filter add dev $h2 ingress protocol cfm pref 1 handle 102 \
+ flower cfm op 43 action drop
+
+ pkt="$ethtype $(generate_cfm_hdr 7 47 0 32)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+ pkt="$ethtype $(generate_cfm_hdr 6 5 0 4)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match on correct opcode"
+
+ tc_check_packets "dev $h2 ingress" 102 0
+ check_err $? "Matched on the wrong opcode"
+
+ pkt="$ethtype $(generate_cfm_hdr 0 43 0 12)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Matched on the wrong opcode"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct opcode"
+
+ tc filter del dev $h2 ingress protocol cfm pref 1 handle 101 flower
+ tc filter del dev $h2 ingress protocol cfm pref 1 handle 102 flower
+
+ log_test "CFM opcode match test"
+}
+
+match_cfm_level()
+{
+ local ethtype="89 02"; readonly ethtype
+ RET=0
+
+ tc filter add dev $h2 ingress protocol cfm pref 1 handle 101 \
+ flower cfm mdl 5 action drop
+ tc filter add dev $h2 ingress protocol cfm pref 1 handle 102 \
+ flower cfm mdl 3 action drop
+ tc filter add dev $h2 ingress protocol cfm pref 1 handle 103 \
+ flower cfm mdl 0 action drop
+
+ pkt="$ethtype $(generate_cfm_hdr 5 42 0 12)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+ pkt="$ethtype $(generate_cfm_hdr 6 1 0 70)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+ pkt="$ethtype $(generate_cfm_hdr 0 1 0 70)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match on correct level"
+
+ tc_check_packets "dev $h2 ingress" 102 0
+ check_err $? "Matched on the wrong level"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_err $? "Did not match on correct level"
+
+ pkt="$ethtype $(generate_cfm_hdr 3 0 0 4)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Matched on the wrong level"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct level"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_err $? "Matched on the wrong level"
+
+ tc filter del dev $h2 ingress protocol cfm pref 1 handle 101 flower
+ tc filter del dev $h2 ingress protocol cfm pref 1 handle 102 flower
+ tc filter del dev $h2 ingress protocol cfm pref 1 handle 103 flower
+
+ log_test "CFM level match test"
+}
+
+match_cfm_level_and_opcode()
+{
+ local ethtype="89 02"; readonly ethtype
+ RET=0
+
+ tc filter add dev $h2 ingress protocol cfm pref 1 handle 101 \
+ flower cfm mdl 5 op 41 action drop
+ tc filter add dev $h2 ingress protocol cfm pref 1 handle 102 \
+ flower cfm mdl 7 op 42 action drop
+
+ pkt="$ethtype $(generate_cfm_hdr 5 41 0 4)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+ pkt="$ethtype $(generate_cfm_hdr 7 3 0 4)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+ pkt="$ethtype $(generate_cfm_hdr 3 42 0 12)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match on correct level and opcode"
+
+ tc_check_packets "dev $h2 ingress" 102 0
+ check_err $? "Matched on the wrong level and opcode"
+
+ pkt="$ethtype $(generate_cfm_hdr 7 42 0 12)"
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac "$pkt" -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Matched on the wrong level and opcode"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct level and opcode"
+
+ tc filter del dev $h2 ingress protocol cfm pref 1 handle 101 flower
+ tc filter del dev $h2 ingress protocol cfm pref 1 handle 102 flower
+
+ log_test "CFM opcode and level match test"
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+ h1mac=$(mac_get $h1)
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
new file mode 100755
index 000000000000..e22c2d28b6eb
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
@@ -0,0 +1,350 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------+ +----------------------+
+# | H1 (vrf) | | H2 (vrf) |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# | | 2001:db8:1::1/64 | | 2001:db8:1::2/64 | |
+# +----|------------------+ +------------------|---+
+# | |
+# +----|-------------------------------------------------------------------|---+
+# | SW | | |
+# | +-|-------------------------------------------------------------------|-+ |
+# | | + $swp1 BR $swp2 + | |
+# | +-----------------------------------------------------------------------+ |
+# +----------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_l2_miss_unicast
+ test_l2_miss_multicast
+ test_l2_miss_ll_multicast
+ test_l2_miss_broadcast
+"
+
+NUM_NETIFS=4
+source lib.sh
+source tc_common.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/28 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/28 2001:db8:1::2/64
+}
+
+switch_create()
+{
+ ip link add name br1 up type bridge
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ tc qdisc add dev $swp2 clsact
+}
+
+switch_destroy()
+{
+ tc qdisc del dev $swp2 clsact
+
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+ ip link del dev br1
+}
+
+test_l2_miss_unicast()
+{
+ local dmac=00:01:02:03:04:05
+ local dip=192.0.2.2
+ local sip=192.0.2.1
+
+ RET=0
+
+ # Unknown unicast.
+ tc filter add dev $swp2 egress protocol ipv4 handle 101 pref 1 \
+ flower indev $swp1 l2_miss 1 dst_mac $dmac src_ip $sip \
+ dst_ip $dip action pass
+ # Known unicast.
+ tc filter add dev $swp2 egress protocol ipv4 handle 102 pref 1 \
+ flower indev $swp1 l2_miss 0 dst_mac $dmac src_ip $sip \
+ dst_ip $dip action pass
+
+ # Before adding FDB entry.
+ $MZ $h1 -a own -b $dmac -t ip -A $sip -B $dip -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 1
+ check_err $? "Unknown unicast filter was not hit before adding FDB entry"
+
+ tc_check_packets "dev $swp2 egress" 102 0
+ check_err $? "Known unicast filter was hit before adding FDB entry"
+
+ # Adding FDB entry.
+ bridge fdb replace $dmac dev $swp2 master static
+
+ $MZ $h1 -a own -b $dmac -t ip -A $sip -B $dip -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 1
+ check_err $? "Unknown unicast filter was hit after adding FDB entry"
+
+ tc_check_packets "dev $swp2 egress" 102 1
+ check_err $? "Known unicast filter was not hit after adding FDB entry"
+
+ # Deleting FDB entry.
+ bridge fdb del $dmac dev $swp2 master static
+
+ $MZ $h1 -a own -b $dmac -t ip -A $sip -B $dip -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 2
+ check_err $? "Unknown unicast filter was not hit after deleting FDB entry"
+
+ tc_check_packets "dev $swp2 egress" 102 1
+ check_err $? "Known unicast filter was hit after deleting FDB entry"
+
+ tc filter del dev $swp2 egress protocol ipv4 pref 1 handle 102 flower
+ tc filter del dev $swp2 egress protocol ipv4 pref 1 handle 101 flower
+
+ log_test "L2 miss - Unicast"
+}
+
+test_l2_miss_multicast_common()
+{
+ local proto=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local mode=$1; shift
+ local name=$1; shift
+
+ RET=0
+
+ # Unregistered multicast.
+ tc filter add dev $swp2 egress protocol $proto handle 101 pref 1 \
+ flower indev $swp1 l2_miss 1 src_ip $sip dst_ip $dip \
+ action pass
+ # Registered multicast.
+ tc filter add dev $swp2 egress protocol $proto handle 102 pref 1 \
+ flower indev $swp1 l2_miss 0 src_ip $sip dst_ip $dip \
+ action pass
+
+ # Before adding MDB entry.
+ $MZ $mode $h1 -t ip -A $sip -B $dip -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 1
+ check_err $? "Unregistered multicast filter was not hit before adding MDB entry"
+
+ tc_check_packets "dev $swp2 egress" 102 0
+ check_err $? "Registered multicast filter was hit before adding MDB entry"
+
+ # Adding MDB entry.
+ bridge mdb replace dev br1 port $swp2 grp $dip permanent
+
+ $MZ $mode $h1 -t ip -A $sip -B $dip -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 1
+ check_err $? "Unregistered multicast filter was hit after adding MDB entry"
+
+ tc_check_packets "dev $swp2 egress" 102 1
+ check_err $? "Registered multicast filter was not hit after adding MDB entry"
+
+ # Deleting MDB entry.
+ bridge mdb del dev br1 port $swp2 grp $dip
+
+ $MZ $mode $h1 -t ip -A $sip -B $dip -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 2
+ check_err $? "Unregistered multicast filter was not hit after deleting MDB entry"
+
+ tc_check_packets "dev $swp2 egress" 102 1
+ check_err $? "Registered multicast filter was hit after deleting MDB entry"
+
+ tc filter del dev $swp2 egress protocol $proto pref 1 handle 102 flower
+ tc filter del dev $swp2 egress protocol $proto pref 1 handle 101 flower
+
+ log_test "L2 miss - Multicast ($name)"
+}
+
+test_l2_miss_multicast_ipv4()
+{
+ local proto="ipv4"
+ local sip=192.0.2.1
+ local dip=239.1.1.1
+ local mode="-4"
+ local name="IPv4"
+
+ test_l2_miss_multicast_common $proto $sip $dip $mode $name
+}
+
+test_l2_miss_multicast_ipv6()
+{
+ local proto="ipv6"
+ local sip=2001:db8:1::1
+ local dip=ff0e::1
+ local mode="-6"
+ local name="IPv6"
+
+ test_l2_miss_multicast_common $proto $sip $dip $mode $name
+}
+
+test_l2_miss_multicast()
+{
+ # Configure $swp2 as a multicast router port so that it will forward
+ # both registered and unregistered multicast traffic.
+ bridge link set dev $swp2 mcast_router 2
+
+ # Forwarding according to MDB entries only takes place when the bridge
+ # detects that there is a valid querier in the network. Set the bridge
+ # as the querier and assign it a valid IPv6 link-local address to be
+ # used as the source address for MLD queries.
+ ip link set dev br1 type bridge mcast_querier 1
+ ip -6 address add fe80::1/64 nodad dev br1
+ # Wait the default Query Response Interval (10 seconds) for the bridge
+ # to determine that there are no other queriers in the network.
+ sleep 10
+
+ test_l2_miss_multicast_ipv4
+ test_l2_miss_multicast_ipv6
+
+ ip -6 address del fe80::1/64 dev br1
+ ip link set dev br1 type bridge mcast_querier 0
+ bridge link set dev $swp2 mcast_router 1
+}
+
+test_l2_miss_multicast_common2()
+{
+ local name=$1; shift
+ local dmac=$1; shift
+ local dip=224.0.0.1
+ local sip=192.0.2.1
+
+}
+
+test_l2_miss_ll_multicast_common()
+{
+ local proto=$1; shift
+ local dmac=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local mode=$1; shift
+ local name=$1; shift
+
+ RET=0
+
+ tc filter add dev $swp2 egress protocol $proto handle 101 pref 1 \
+ flower indev $swp1 l2_miss 1 dst_mac $dmac src_ip $sip \
+ dst_ip $dip action pass
+
+ $MZ $mode $h1 -a own -b $dmac -t ip -A $sip -B $dip -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 1
+ check_err $? "Filter was not hit"
+
+ tc filter del dev $swp2 egress protocol $proto pref 1 handle 101 flower
+
+ log_test "L2 miss - Link-local multicast ($name)"
+}
+
+test_l2_miss_ll_multicast_ipv4()
+{
+ local proto=ipv4
+ local dmac=01:00:5e:00:00:01
+ local sip=192.0.2.1
+ local dip=224.0.0.1
+ local mode="-4"
+ local name="IPv4"
+
+ test_l2_miss_ll_multicast_common $proto $dmac $sip $dip $mode $name
+}
+
+test_l2_miss_ll_multicast_ipv6()
+{
+ local proto=ipv6
+ local dmac=33:33:00:00:00:01
+ local sip=2001:db8:1::1
+ local dip=ff02::1
+ local mode="-6"
+ local name="IPv6"
+
+ test_l2_miss_ll_multicast_common $proto $dmac $sip $dip $mode $name
+}
+
+test_l2_miss_ll_multicast()
+{
+ test_l2_miss_ll_multicast_ipv4
+ test_l2_miss_ll_multicast_ipv6
+}
+
+test_l2_miss_broadcast()
+{
+ local dmac=ff:ff:ff:ff:ff:ff
+ local smac=00:01:02:03:04:05
+
+ RET=0
+
+ tc filter add dev $swp2 egress protocol all handle 101 pref 1 \
+ flower l2_miss 1 dst_mac $dmac src_mac $smac \
+ action pass
+ tc filter add dev $swp2 egress protocol all handle 102 pref 1 \
+ flower l2_miss 0 dst_mac $dmac src_mac $smac \
+ action pass
+
+ $MZ $h1 -a $smac -b $dmac -c 1 -p 100 -q
+
+ tc_check_packets "dev $swp2 egress" 101 0
+ check_err $? "L2 miss filter was hit when should not"
+
+ tc_check_packets "dev $swp2 egress" 102 1
+ check_err $? "L2 no miss filter was not hit when should"
+
+ tc filter del dev $swp2 egress protocol all pref 1 handle 102 flower
+ tc filter del dev $swp2 egress protocol all pref 1 handle 101 flower
+
+ log_test "L2 miss - Broadcast"
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 773dd770a567..13561e5bc0cd 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -809,7 +809,7 @@ run_tests_disconnect()
cat $cin $cin $cin > "$cin".disconnect
- # force do_transfer to cope with the multiple tranmissions
+ # force do_transfer to cope with the multiple transmissions
sin="$cin.disconnect"
cin="$cin.disconnect"
cin_disconnect="$old_cin"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 0ae8cafde439..e6c9d5451c5b 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -39,6 +39,7 @@ evts_ns1=""
evts_ns2=""
evts_ns1_pid=0
evts_ns2_pid=0
+stats_dumped=0
declare -A all_tests
declare -a only_tests_ids
@@ -49,6 +50,10 @@ TEST_NAME=""
nr_blank=40
export FAILING_LINKS=""
+export test_linkfail=0
+export addr_nr_ns1=0
+export addr_nr_ns2=0
+export sflags=""
# generated using "nfbpf_compile '(ip && (ip[54] & 0xf0) == 0x30) ||
# (ip6 && (ip6[74] & 0xf0) == 0x30)'"
@@ -92,6 +97,7 @@ init_partial()
fi
done
+ stats_dumped=0
check_invert=0
validate_checksum=$checksum
FAILING_LINKS=""
@@ -434,6 +440,9 @@ fail_test()
{
ret=1
failed_tests[${TEST_COUNT}]="${TEST_NAME}"
+
+ [ "${stats_dumped}" = 0 ] && dump_stats
+ stats_dumped=1
}
get_failed_tests_ids()
@@ -584,6 +593,26 @@ wait_rm_addr()
done
}
+rm_sf_count()
+{
+ get_counter "${1}" "MPTcpExtRmSubflow"
+}
+
+# $1: ns, $2: old rm_sf counter in $ns
+wait_rm_sf()
+{
+ local ns="${1}"
+ local old_cnt="${2}"
+ local cnt
+
+ local i
+ for i in $(seq 10); do
+ cnt=$(rm_sf_count ${ns})
+ [ "$cnt" = "${old_cnt}" ] || break
+ sleep 0.1
+ done
+}
+
wait_mpj()
{
local ns="${1}"
@@ -793,135 +822,11 @@ pm_nl_check_endpoint()
fi
}
-do_transfer()
+pm_nl_set_endpoint()
{
local listener_ns="$1"
local connector_ns="$2"
- local cl_proto="$3"
- local srv_proto="$4"
- local connect_addr="$5"
- local test_link_fail="$6"
- local addr_nr_ns1="$7"
- local addr_nr_ns2="$8"
- local speed="$9"
- local sflags="${10}"
-
- local port=$((10000 + TEST_COUNT - 1))
- local cappid
- local userspace_pm=0
-
- :> "$cout"
- :> "$sout"
- :> "$capout"
-
- if [ $capture -eq 1 ]; then
- local capuser
- if [ -z $SUDO_USER ] ; then
- capuser=""
- else
- capuser="-Z $SUDO_USER"
- fi
-
- capfile=$(printf "mp_join-%02u-%s.pcap" "$TEST_COUNT" "${listener_ns}")
-
- echo "Capturing traffic for test $TEST_COUNT into $capfile"
- ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
- cappid=$!
-
- sleep 1
- fi
-
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat -n
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat -n
-
- local extra_args
- if [ $speed = "fast" ]; then
- extra_args="-j"
- elif [ $speed = "slow" ]; then
- extra_args="-r 50"
- elif [[ $speed = "speed_"* ]]; then
- extra_args="-r ${speed:6}"
- fi
-
- if [[ "${addr_nr_ns1}" = "userspace_"* ]]; then
- userspace_pm=1
- addr_nr_ns1=${addr_nr_ns1:10}
- fi
-
- local flags="subflow"
- local extra_cl_args=""
- local extra_srv_args=""
- local trunc_size=""
- if [[ "${addr_nr_ns2}" = "fastclose_"* ]]; then
- if [ ${test_link_fail} -le 1 ]; then
- echo "fastclose tests need test_link_fail argument"
- fail_test
- return 1
- fi
-
- # disconnect
- trunc_size=${test_link_fail}
- local side=${addr_nr_ns2:10}
-
- if [ ${side} = "client" ]; then
- extra_cl_args="-f ${test_link_fail}"
- extra_srv_args="-f -1"
- elif [ ${side} = "server" ]; then
- extra_srv_args="-f ${test_link_fail}"
- extra_cl_args="-f -1"
- else
- echo "wrong/unknown fastclose spec ${side}"
- fail_test
- return 1
- fi
- addr_nr_ns2=0
- elif [[ "${addr_nr_ns2}" = "userspace_"* ]]; then
- userspace_pm=1
- addr_nr_ns2=${addr_nr_ns2:10}
- elif [[ "${addr_nr_ns2}" = "fullmesh_"* ]]; then
- flags="${flags},fullmesh"
- addr_nr_ns2=${addr_nr_ns2:9}
- fi
-
- extra_srv_args="$extra_args $extra_srv_args"
- if [ "$test_link_fail" -gt 1 ];then
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args "::" < "$sinfail" > "$sout" &
- else
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args "::" < "$sin" > "$sout" &
- fi
- local spid=$!
-
- wait_local_port_listen "${listener_ns}" "${port}"
-
- extra_cl_args="$extra_args $extra_cl_args"
- if [ "$test_link_fail" -eq 0 ];then
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_cl_args $connect_addr < "$cin" > "$cout" &
- elif [ "$test_link_fail" -eq 1 ] || [ "$test_link_fail" -eq 2 ];then
- ( cat "$cinfail" ; sleep 2; link_failure $listener_ns ; cat "$cinfail" ) | \
- tee "$cinsent" | \
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_cl_args $connect_addr > "$cout" &
- else
- tee "$cinsent" < "$cinfail" | \
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_cl_args $connect_addr > "$cout" &
- fi
- local cpid=$!
+ local connect_addr="$3"
# let the mptcp subflow be established in background before
# do endpoint manipulation
@@ -933,7 +838,6 @@ do_transfer()
local counter=2
local add_nr_ns1=${addr_nr_ns1}
local id=10
- local tk
while [ $add_nr_ns1 -gt 0 ]; do
local addr
if is_v6 "${connect_addr}"; then
@@ -941,24 +845,7 @@ do_transfer()
else
addr="10.0.$counter.1"
fi
- if [ $userspace_pm -eq 0 ]; then
- pm_nl_add_endpoint $ns1 $addr flags signal
- else
- tk=$(grep "type:1," "$evts_ns1" |
- sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
- ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id
- sleep 1
- sp=$(grep "type:10" "$evts_ns1" |
- sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
- da=$(grep "type:10" "$evts_ns1" |
- sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
- dp=$(grep "type:10" "$evts_ns1" |
- sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
- ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
- ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \
- lport $sp rip $da rport $dp token $tk
- fi
-
+ pm_nl_add_endpoint $ns1 $addr flags signal
counter=$((counter + 1))
add_nr_ns1=$((add_nr_ns1 - 1))
id=$((id + 1))
@@ -1003,7 +890,6 @@ do_transfer()
local add_nr_ns2=${addr_nr_ns2}
local counter=3
local id=20
- local tk da dp sp
while [ $add_nr_ns2 -gt 0 ]; do
local addr
if is_v6 "${connect_addr}"; then
@@ -1011,21 +897,7 @@ do_transfer()
else
addr="10.0.$counter.2"
fi
- if [ $userspace_pm -eq 0 ]; then
- pm_nl_add_endpoint $ns2 $addr flags $flags
- else
- tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
- dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- ip netns exec ${connector_ns} ./pm_nl_ctl csf lip $addr lid $id \
- rip $da rport $dp token $tk
- sleep 1
- sp=$(grep "type:10" "$evts_ns2" |
- sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
- ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id
- ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \
- rip $da rport $dp token $tk
- fi
+ pm_nl_add_endpoint $ns2 $addr flags $flags
counter=$((counter + 1))
add_nr_ns2=$((add_nr_ns2 - 1))
id=$((id + 1))
@@ -1094,6 +966,126 @@ do_transfer()
done
done
fi
+}
+
+do_transfer()
+{
+ local listener_ns="$1"
+ local connector_ns="$2"
+ local cl_proto="$3"
+ local srv_proto="$4"
+ local connect_addr="$5"
+ local speed="$6"
+
+ local port=$((10000 + TEST_COUNT - 1))
+ local cappid
+
+ :> "$cout"
+ :> "$sout"
+ :> "$capout"
+
+ if [ $capture -eq 1 ]; then
+ local capuser
+ if [ -z $SUDO_USER ] ; then
+ capuser=""
+ else
+ capuser="-Z $SUDO_USER"
+ fi
+
+ capfile=$(printf "mp_join-%02u-%s.pcap" "$TEST_COUNT" "${listener_ns}")
+
+ echo "Capturing traffic for test $TEST_COUNT into $capfile"
+ ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
+ cappid=$!
+
+ sleep 1
+ fi
+
+ NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+ nstat -n
+ NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
+ nstat -n
+
+ local extra_args
+ if [ $speed = "fast" ]; then
+ extra_args="-j"
+ elif [ $speed = "slow" ]; then
+ extra_args="-r 50"
+ elif [[ $speed = "speed_"* ]]; then
+ extra_args="-r ${speed:6}"
+ fi
+
+ local flags="subflow"
+ local extra_cl_args=""
+ local extra_srv_args=""
+ local trunc_size=""
+ if [[ "${addr_nr_ns2}" = "fastclose_"* ]]; then
+ if [ ${test_linkfail} -le 1 ]; then
+ echo "fastclose tests need test_linkfail argument"
+ fail_test
+ return 1
+ fi
+
+ # disconnect
+ trunc_size=${test_linkfail}
+ local side=${addr_nr_ns2:10}
+
+ if [ ${side} = "client" ]; then
+ extra_cl_args="-f ${test_linkfail}"
+ extra_srv_args="-f -1"
+ elif [ ${side} = "server" ]; then
+ extra_srv_args="-f ${test_linkfail}"
+ extra_cl_args="-f -1"
+ else
+ echo "wrong/unknown fastclose spec ${side}"
+ fail_test
+ return 1
+ fi
+ addr_nr_ns2=0
+ elif [[ "${addr_nr_ns2}" = "fullmesh_"* ]]; then
+ flags="${flags},fullmesh"
+ addr_nr_ns2=${addr_nr_ns2:9}
+ fi
+
+ extra_srv_args="$extra_args $extra_srv_args"
+ if [ "$test_linkfail" -gt 1 ];then
+ timeout ${timeout_test} \
+ ip netns exec ${listener_ns} \
+ ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
+ $extra_srv_args "::" < "$sinfail" > "$sout" &
+ else
+ timeout ${timeout_test} \
+ ip netns exec ${listener_ns} \
+ ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
+ $extra_srv_args "::" < "$sin" > "$sout" &
+ fi
+ local spid=$!
+
+ wait_local_port_listen "${listener_ns}" "${port}"
+
+ extra_cl_args="$extra_args $extra_cl_args"
+ if [ "$test_linkfail" -eq 0 ];then
+ timeout ${timeout_test} \
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_cl_args $connect_addr < "$cin" > "$cout" &
+ elif [ "$test_linkfail" -eq 1 ] || [ "$test_linkfail" -eq 2 ];then
+ ( cat "$cinfail" ; sleep 2; link_failure $listener_ns ; cat "$cinfail" ) | \
+ tee "$cinsent" | \
+ timeout ${timeout_test} \
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_cl_args $connect_addr > "$cout" &
+ else
+ tee "$cinsent" < "$cinfail" | \
+ timeout ${timeout_test} \
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_cl_args $connect_addr > "$cout" &
+ fi
+ local cpid=$!
+
+ pm_nl_set_endpoint $listener_ns $connector_ns $connect_addr
wait $cpid
local retc=$?
@@ -1124,13 +1116,13 @@ do_transfer()
return 1
fi
- if [ "$test_link_fail" -gt 1 ];then
+ if [ "$test_linkfail" -gt 1 ];then
check_transfer $sinfail $cout "file received by client" $trunc_size
else
check_transfer $sin $cout "file received by client" $trunc_size
fi
retc=$?
- if [ "$test_link_fail" -eq 0 ];then
+ if [ "$test_linkfail" -eq 0 ];then
check_transfer $cin $sout "file received by server" $trunc_size
else
check_transfer $cinsent $sout "file received by server" $trunc_size
@@ -1163,11 +1155,7 @@ run_tests()
local listener_ns="$1"
local connector_ns="$2"
local connect_addr="$3"
- local test_linkfail="${4:-0}"
- local addr_nr_ns1="${5:-0}"
- local addr_nr_ns2="${6:-0}"
- local speed="${7:-fast}"
- local sflags="${8:-""}"
+ local speed="${4:-fast}"
local size
@@ -1211,8 +1199,7 @@ run_tests()
make_file "$sinfail" "server" $size
fi
- do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} \
- ${test_linkfail} ${addr_nr_ns1} ${addr_nr_ns2} ${speed} ${sflags}
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} ${speed}
}
dump_stats()
@@ -1228,7 +1215,6 @@ chk_csum_nr()
local csum_ns1=${1:-0}
local csum_ns2=${2:-0}
local count
- local dump_stats
local extra_msg=""
local allow_multi_errors_ns1=0
local allow_multi_errors_ns2=0
@@ -1253,7 +1239,6 @@ chk_csum_nr()
{ [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
echo "[fail] got $count data checksum error[s] expected $csum_ns1"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1268,11 +1253,9 @@ chk_csum_nr()
{ [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
echo "[fail] got $count data checksum error[s] expected $csum_ns2"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
- [ "${dump_stats}" = 1 ] && dump_stats
echo "$extra_msg"
}
@@ -1283,7 +1266,6 @@ chk_fail_nr()
local fail_rx=$2
local ns_invert=${3:-""}
local count
- local dump_stats
local ns_tx=$ns1
local ns_rx=$ns2
local extra_msg=""
@@ -1316,7 +1298,6 @@ chk_fail_nr()
{ [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
echo "[fail] got $count MP_FAIL[s] TX expected $fail_tx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1332,13 +1313,10 @@ chk_fail_nr()
{ [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
echo "[fail] got $count MP_FAIL[s] RX expected $fail_rx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
- [ "${dump_stats}" = 1 ] && dump_stats
-
echo "$extra_msg"
}
@@ -1348,7 +1326,6 @@ chk_fclose_nr()
local fclose_rx=$2
local ns_invert=$3
local count
- local dump_stats
local ns_tx=$ns2
local ns_rx=$ns1
local extra_msg=" "
@@ -1367,7 +1344,6 @@ chk_fclose_nr()
extra_msg="$extra_msg,tx=$count"
echo "[fail] got $count MP_FASTCLOSE[s] TX expected $fclose_tx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1380,13 +1356,10 @@ chk_fclose_nr()
extra_msg="$extra_msg,rx=$count"
echo "[fail] got $count MP_FASTCLOSE[s] RX expected $fclose_rx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
- [ "${dump_stats}" = 1 ] && dump_stats
-
echo "$extra_msg"
}
@@ -1396,7 +1369,6 @@ chk_rst_nr()
local rst_rx=$2
local ns_invert=${3:-""}
local count
- local dump_stats
local ns_tx=$ns1
local ns_rx=$ns2
local extra_msg=""
@@ -1414,7 +1386,6 @@ chk_rst_nr()
elif [ $count -lt $rst_tx ]; then
echo "[fail] got $count MP_RST[s] TX expected $rst_tx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1426,13 +1397,10 @@ chk_rst_nr()
elif [ "$count" -lt "$rst_rx" ]; then
echo "[fail] got $count MP_RST[s] RX expected $rst_rx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
- [ "${dump_stats}" = 1 ] && dump_stats
-
echo "$extra_msg"
}
@@ -1441,7 +1409,6 @@ chk_infi_nr()
local infi_tx=$1
local infi_rx=$2
local count
- local dump_stats
printf "%-${nr_blank}s %s" " " "itx"
count=$(get_counter ${ns2} "MPTcpExtInfiniteMapTx")
@@ -1450,7 +1417,6 @@ chk_infi_nr()
elif [ "$count" != "$infi_tx" ]; then
echo "[fail] got $count infinite map[s] TX expected $infi_tx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1462,12 +1428,9 @@ chk_infi_nr()
elif [ "$count" != "$infi_rx" ]; then
echo "[fail] got $count infinite map[s] RX expected $infi_rx"
fail_test
- dump_stats=1
else
echo "[ ok ]"
fi
-
- [ "${dump_stats}" = 1 ] && dump_stats
}
chk_join_nr()
@@ -1482,7 +1445,6 @@ chk_join_nr()
local infi_nr=${8:-0}
local corrupted_pkts=${9:-0}
local count
- local dump_stats
local with_cookie
local title="${TEST_NAME}"
@@ -1497,7 +1459,6 @@ chk_join_nr()
elif [ "$count" != "$syn_nr" ]; then
echo "[fail] got $count JOIN[s] syn expected $syn_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1516,7 +1477,6 @@ chk_join_nr()
else
echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
fail_test
- dump_stats=1
fi
else
echo -n "[ ok ]"
@@ -1529,11 +1489,9 @@ chk_join_nr()
elif [ "$count" != "$ack_nr" ]; then
echo "[fail] got $count JOIN[s] ack expected $ack_nr"
fail_test
- dump_stats=1
else
echo "[ ok ]"
fi
- [ "${dump_stats}" = 1 ] && dump_stats
if [ $validate_checksum -eq 1 ]; then
chk_csum_nr $csum_ns1 $csum_ns2
chk_fail_nr $fail_nr $fail_nr
@@ -1593,7 +1551,6 @@ chk_add_nr()
local mis_syn_nr=${7:-0}
local mis_ack_nr=${8:-0}
local count
- local dump_stats
local timeout
timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
@@ -1607,7 +1564,6 @@ chk_add_nr()
elif [ "$count" != "$add_nr" ] && { [ "$timeout" -gt 1 ] || [ "$count" -lt "$add_nr" ]; }; then
echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1619,7 +1575,6 @@ chk_add_nr()
elif [ "$count" != "$echo_nr" ]; then
echo "[fail] got $count ADD_ADDR echo[s] expected $echo_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1632,7 +1587,6 @@ chk_add_nr()
elif [ "$count" != "$port_nr" ]; then
echo "[fail] got $count ADD_ADDR[s] with a port-number expected $port_nr"
fail_test
- dump_stats=1
else
echo "[ ok ]"
fi
@@ -1645,7 +1599,6 @@ chk_add_nr()
echo "[fail] got $count JOIN[s] syn with a different \
port-number expected $syn_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1658,7 +1611,6 @@ chk_add_nr()
echo "[fail] got $count JOIN[s] synack with a different \
port-number expected $syn_ack_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1671,7 +1623,6 @@ chk_add_nr()
echo "[fail] got $count JOIN[s] ack with a different \
port-number expected $ack_nr"
fail_test
- dump_stats=1
else
echo "[ ok ]"
fi
@@ -1684,7 +1635,6 @@ chk_add_nr()
echo "[fail] got $count JOIN[s] syn with a mismatched \
port-number expected $mis_syn_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1697,15 +1647,46 @@ chk_add_nr()
echo "[fail] got $count JOIN[s] ack with a mismatched \
port-number expected $mis_ack_nr"
fail_test
- dump_stats=1
else
echo "[ ok ]"
fi
else
echo ""
fi
+}
- [ "${dump_stats}" = 1 ] && dump_stats
+chk_add_tx_nr()
+{
+ local add_tx_nr=$1
+ local echo_tx_nr=$2
+ local timeout
+ local count
+
+ timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
+
+ printf "%-${nr_blank}s %s" " " "add TX"
+ count=$(get_counter ${ns1} "MPTcpExtAddAddrTx")
+ if [ -z "$count" ]; then
+ echo -n "[skip]"
+ # if the test configured a short timeout tolerate greater then expected
+ # add addrs options, due to retransmissions
+ elif [ "$count" != "$add_tx_nr" ] && { [ "$timeout" -gt 1 ] || [ "$count" -lt "$add_tx_nr" ]; }; then
+ echo "[fail] got $count ADD_ADDR[s] TX, expected $add_tx_nr"
+ fail_test
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - echo TX "
+ count=$(get_counter ${ns2} "MPTcpExtEchoAddTx")
+ if [ -z "$count" ]; then
+ echo "[skip]"
+ elif [ "$count" != "$echo_tx_nr" ]; then
+ echo "[fail] got $count ADD_ADDR echo[s] TX, expected $echo_tx_nr"
+ fail_test
+ else
+ echo "[ ok ]"
+ fi
}
chk_rm_nr()
@@ -1715,7 +1696,6 @@ chk_rm_nr()
local invert
local simult
local count
- local dump_stats
local addr_ns=$ns1
local subflow_ns=$ns2
local extra_msg=""
@@ -1743,7 +1723,6 @@ chk_rm_nr()
elif [ "$count" != "$rm_addr_nr" ]; then
echo "[fail] got $count RM_ADDR[s] expected $rm_addr_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1767,27 +1746,38 @@ chk_rm_nr()
else
echo "[fail] got $count RM_SUBFLOW[s] expected in range [$rm_subflow_nr:$((rm_subflow_nr*2))]"
fail_test
- dump_stats=1
fi
elif [ "$count" != "$rm_subflow_nr" ]; then
echo "[fail] got $count RM_SUBFLOW[s] expected $rm_subflow_nr"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
- [ "${dump_stats}" = 1 ] && dump_stats
-
echo "$extra_msg"
}
+chk_rm_tx_nr()
+{
+ local rm_addr_tx_nr=$1
+
+ printf "%-${nr_blank}s %s" " " "rm TX "
+ count=$(get_counter ${ns2} "MPTcpExtRmAddrTx")
+ if [ -z "$count" ]; then
+ echo "[skip]"
+ elif [ "$count" != "$rm_addr_tx_nr" ]; then
+ echo "[fail] got $count RM_ADDR[s] expected $rm_addr_tx_nr"
+ fail_test
+ else
+ echo "[ ok ]"
+ fi
+}
+
chk_prio_nr()
{
local mp_prio_nr_tx=$1
local mp_prio_nr_rx=$2
local count
- local dump_stats
printf "%-${nr_blank}s %s" " " "ptx"
count=$(get_counter ${ns1} "MPTcpExtMPPrioTx")
@@ -1796,7 +1786,6 @@ chk_prio_nr()
elif [ "$count" != "$mp_prio_nr_tx" ]; then
echo "[fail] got $count MP_PRIO[s] TX expected $mp_prio_nr_tx"
fail_test
- dump_stats=1
else
echo -n "[ ok ]"
fi
@@ -1808,12 +1797,9 @@ chk_prio_nr()
elif [ "$count" != "$mp_prio_nr_rx" ]; then
echo "[fail] got $count MP_PRIO[s] RX expected $mp_prio_nr_rx"
fail_test
- dump_stats=1
else
echo "[ ok ]"
fi
-
- [ "${dump_stats}" = 1 ] && dump_stats
}
chk_subflow_nr()
@@ -1845,37 +1831,31 @@ chk_subflow_nr()
ss -N $ns1 -tOni
ss -N $ns1 -tOni | grep token
ip -n $ns1 mptcp endpoint
- dump_stats
fi
}
chk_mptcp_info()
{
- local nr_info=$1
- local info
+ local info1=$1
+ local exp1=$2
+ local info2=$3
+ local exp2=$4
local cnt1
local cnt2
local dump_stats
- if [[ $nr_info = "subflows_"* ]]; then
- info="subflows"
- nr_info=${nr_info:9}
- else
- echo "[fail] unsupported argument: $nr_info"
- fail_test
- return 1
- fi
-
- printf "%-${nr_blank}s %-30s" " " "mptcp_info $info=$nr_info"
+ printf "%-${nr_blank}s %-30s" " " "mptcp_info $info1:$info2=$exp1:$exp2"
- cnt1=$(ss -N $ns1 -inmHM | grep "$info:" |
- sed -n 's/.*\('"$info"':\)\([[:digit:]]*\).*$/\2/p;q')
+ cnt1=$(ss -N $ns1 -inmHM | grep "$info1:" |
+ sed -n 's/.*\('"$info1"':\)\([[:digit:]]*\).*$/\2/p;q')
+ cnt2=$(ss -N $ns2 -inmHM | grep "$info2:" |
+ sed -n 's/.*\('"$info2"':\)\([[:digit:]]*\).*$/\2/p;q')
+ # 'ss' only display active connections and counters that are not 0.
[ -z "$cnt1" ] && cnt1=0
- cnt2=$(ss -N $ns2 -inmHM | grep "$info:" |
- sed -n 's/.*\('"$info"':\)\([[:digit:]]*\).*$/\2/p;q')
[ -z "$cnt2" ] && cnt2=0
- if [ "$cnt1" != "$nr_info" ] || [ "$cnt2" != "$nr_info" ]; then
- echo "[fail] got $cnt1:$cnt2 $info expected $nr_info"
+
+ if [ "$cnt1" != "$exp1" ] || [ "$cnt2" != "$exp2" ]; then
+ echo "[fail] got $cnt1:$cnt2 $info1:$info2 expected $exp1:$exp2"
fail_test
dump_stats=1
else
@@ -1885,7 +1865,6 @@ chk_mptcp_info()
if [ "$dump_stats" = 1 ]; then
ss -N $ns1 -inmHM
ss -N $ns2 -inmHM
- dump_stats
fi
}
@@ -2009,7 +1988,7 @@ subflows_error_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 0 0 0
fi
@@ -2020,7 +1999,7 @@ subflows_error_tests()
pm_nl_set_limits $ns2 0 2
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
fi
@@ -2031,7 +2010,7 @@ subflows_error_tests()
pm_nl_set_limits $ns2 0 2
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
fi
@@ -2043,7 +2022,7 @@ subflows_error_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
+ run_tests $ns1 $ns2 10.0.1.1 slow &
# mpj subflow will be in TW after the reset
wait_attempt_fail $ns2
@@ -2063,6 +2042,7 @@ signal_address_tests()
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
+ chk_add_tx_nr 1 1
chk_add_nr 1 1
fi
@@ -2141,7 +2121,7 @@ signal_address_tests()
# the peer could possibly miss some addr notification, allow retransmission
ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
# It is not directly linked to the commit introducing this
# symbol but for the parent one which is linked anyway.
@@ -2173,7 +2153,8 @@ link_failure_tests()
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow
pm_nl_add_endpoint $ns2 10.0.4.2 dev ns2eth4 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 1
+ test_linkfail=1 \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_stale_nr $ns2 1 5 1
@@ -2188,7 +2169,8 @@ link_failure_tests()
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow
pm_nl_add_endpoint $ns2 10.0.4.2 dev ns2eth4 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 2
+ test_linkfail=2 \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_stale_nr $ns2 1 -1 1
@@ -2201,9 +2183,9 @@ link_failure_tests()
pm_nl_set_limits $ns1 0 2
pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
pm_nl_set_limits $ns2 1 2
- FAILING_LINKS="1"
pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
- run_tests $ns1 $ns2 10.0.1.1 1
+ FAILING_LINKS="1" test_linkfail=1 \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
chk_add_nr 1 1
chk_link_usage $ns2 ns2eth3 $cinsent 0
@@ -2217,8 +2199,8 @@ link_failure_tests()
pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
pm_nl_set_limits $ns2 1 2
pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
- FAILING_LINKS="1 2"
- run_tests $ns1 $ns2 10.0.1.1 1
+ FAILING_LINKS="1 2" test_linkfail=1 \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
chk_add_nr 1 1
chk_stale_nr $ns2 2 4 2
@@ -2233,8 +2215,8 @@ link_failure_tests()
pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
- FAILING_LINKS="1 2"
- run_tests $ns1 $ns2 10.0.1.1 2
+ FAILING_LINKS="1 2" test_linkfail=2 \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
chk_add_nr 1 1
chk_stale_nr $ns2 1 -1 2
@@ -2249,8 +2231,9 @@ add_addr_timeout_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
+ chk_add_tx_nr 4 4
chk_add_nr 4 0
fi
@@ -2259,7 +2242,7 @@ add_addr_timeout_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
- run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 1 1 1
chk_add_nr 4 0
fi
@@ -2270,7 +2253,7 @@ add_addr_timeout_tests()
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_set_limits $ns2 2 2
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 speed_10
+ run_tests $ns1 $ns2 10.0.1.1 speed_10
chk_join_nr 2 2 2
chk_add_nr 8 0
fi
@@ -2281,7 +2264,7 @@ add_addr_timeout_tests()
pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_set_limits $ns2 2 2
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 speed_10
+ run_tests $ns1 $ns2 10.0.1.1 speed_10
chk_join_nr 1 1 1
chk_add_nr 8 0
fi
@@ -2294,8 +2277,10 @@ remove_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow
+ addr_nr_ns2=-1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
+ chk_rm_tx_nr 1
chk_rm_nr 1 1
fi
@@ -2305,7 +2290,8 @@ remove_tests()
pm_nl_set_limits $ns2 0 2
pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 -2 slow
+ addr_nr_ns2=-2 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
chk_rm_nr 2 2
fi
@@ -2315,7 +2301,8 @@ remove_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
+ addr_nr_ns1=-1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
@@ -2327,7 +2314,8 @@ remove_tests()
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_set_limits $ns2 1 2
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 -1 -1 slow
+ addr_nr_ns1=-1 addr_nr_ns2=-1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
chk_add_nr 1 1
chk_rm_nr 1 1
@@ -2340,7 +2328,8 @@ remove_tests()
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 speed_10
+ addr_nr_ns1=-1 addr_nr_ns2=-2 \
+ run_tests $ns1 $ns2 10.0.1.1 speed_10
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 2 2
@@ -2353,7 +2342,8 @@ remove_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10
+ addr_nr_ns1=-3 \
+ run_tests $ns1 $ns2 10.0.1.1 speed_10
chk_join_nr 3 3 3
chk_add_nr 3 3
chk_rm_nr 3 3 invert
@@ -2366,7 +2356,8 @@ remove_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10
+ addr_nr_ns1=-3 \
+ run_tests $ns1 $ns2 10.0.1.1 speed_10
chk_join_nr 1 1 1
chk_add_nr 3 3
chk_rm_nr 3 1 invert
@@ -2379,7 +2370,8 @@ remove_tests()
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+ addr_nr_ns1=-8 addr_nr_ns2=-8 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 1 3 invert simult
@@ -2392,10 +2384,12 @@ remove_tests()
pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow id 150
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+ addr_nr_ns1=-8 addr_nr_ns2=-8 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 3 3 3
if mptcp_lib_kversion_ge 5.18; then
+ chk_rm_tx_nr 0
chk_rm_nr 0 3 simult
else
chk_rm_nr 3 3
@@ -2409,7 +2403,8 @@ remove_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+ addr_nr_ns1=-8 addr_nr_ns2=-8 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 3 3 3
chk_add_nr 3 3
chk_rm_nr 3 3 invert simult
@@ -2422,7 +2417,8 @@ remove_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1 0 -8 0 slow
+ addr_nr_ns1=-8 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_add_nr 3 3
chk_rm_nr 3 1 invert
@@ -2433,7 +2429,8 @@ remove_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 -9 slow
+ addr_nr_ns2=-9 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_rm_nr 1 1
fi
@@ -2443,7 +2440,8 @@ remove_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 10.0.1.1 0 -9 0 slow
+ addr_nr_ns1=-9 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
@@ -2456,7 +2454,8 @@ add_tests()
if reset "add single subflow"; then
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
- run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow
+ addr_nr_ns2=1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
fi
@@ -2464,7 +2463,8 @@ add_tests()
if reset "add signal address"; then
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
+ addr_nr_ns1=1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
fi
@@ -2473,7 +2473,8 @@ add_tests()
if reset "add multiple subflows"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 0 2
- run_tests $ns1 $ns2 10.0.1.1 0 0 2 slow
+ addr_nr_ns2=2 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
fi
@@ -2481,7 +2482,8 @@ add_tests()
if reset "add multiple subflows IPv6"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 0 2
- run_tests $ns1 $ns2 dead:beef:1::1 0 0 2 slow
+ addr_nr_ns2=2 \
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 2 2 2
fi
@@ -2489,7 +2491,8 @@ add_tests()
if reset "add multiple addresses IPv6"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
- run_tests $ns1 $ns2 dead:beef:1::1 0 2 0 slow
+ addr_nr_ns1=2 \
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 2 2 2
chk_add_nr 2 2
fi
@@ -2502,14 +2505,14 @@ ipv6_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 dead:beef:3::2 dev ns2eth3 flags subflow
- run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 1 1 1
fi
# add_address, unused IPv6
if reset "unused signal address IPv6"; then
pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
- run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 0 0 0
chk_add_nr 1 1
fi
@@ -2519,7 +2522,7 @@ ipv6_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
fi
@@ -2529,7 +2532,8 @@ ipv6_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 dead:beef:1::1 0 -1 0 slow
+ addr_nr_ns1=-1 \
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
@@ -2541,7 +2545,8 @@ ipv6_tests()
pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
pm_nl_set_limits $ns2 1 2
pm_nl_add_endpoint $ns2 dead:beef:3::2 dev ns2eth3 flags subflow
- run_tests $ns1 $ns2 dead:beef:1::1 0 -1 -1 slow
+ addr_nr_ns1=-1 addr_nr_ns2=-1 \
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 2 2 2
chk_add_nr 1 1
chk_rm_nr 1 1
@@ -2642,7 +2647,7 @@ mixed_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 0 0 0
fi
@@ -2652,7 +2657,7 @@ mixed_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
- run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
+ run_tests $ns1 $ns2 dead:beef:2::1 slow
chk_join_nr 1 1 1
fi
@@ -2663,7 +2668,7 @@ mixed_tests()
pm_nl_set_limits $ns2 1 4
pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow,fullmesh
pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
- run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
+ run_tests $ns1 $ns2 dead:beef:2::1 slow
chk_join_nr 1 1 1
fi
@@ -2675,7 +2680,8 @@ mixed_tests()
pm_nl_set_limits $ns2 2 4
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
- run_tests $ns1 $ns2 dead:beef:1::1 0 0 fullmesh_1 slow
+ addr_nr_ns2=fullmesh_1 \
+ run_tests $ns1 $ns2 dead:beef:1::1 slow
chk_join_nr 4 4 4
fi
}
@@ -2688,7 +2694,8 @@ backup_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup
+ sflags=nobackup \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_prio_nr 0 1
fi
@@ -2699,7 +2706,8 @@ backup_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ sflags=backup \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_prio_nr 1 1
@@ -2711,7 +2719,8 @@ backup_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ sflags=backup \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_prio_nr 1 1
@@ -2720,7 +2729,7 @@ backup_tests()
if reset "mpc backup" &&
continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 0 0 0
chk_prio_nr 0 1
fi
@@ -2729,7 +2738,7 @@ backup_tests()
continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 0 0 0
chk_prio_nr 1 1
fi
@@ -2737,7 +2746,8 @@ backup_tests()
if reset "mpc switch to backup" &&
continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ sflags=backup \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 0 0 0
chk_prio_nr 0 1
fi
@@ -2746,7 +2756,8 @@ backup_tests()
continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ sflags=backup \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 0 0 0
chk_prio_nr 1 1
fi
@@ -2835,7 +2846,8 @@ add_addr_ports_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 10.0.1.1 0 -1 0 slow
+ addr_nr_ns1=-1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 1
chk_add_nr 1 1 1
chk_rm_nr 1 1 invert
@@ -2851,7 +2863,8 @@ add_addr_ports_tests()
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
pm_nl_set_limits $ns2 1 2
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 -1 -1 slow
+ addr_nr_ns1=-1 addr_nr_ns2=-1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
chk_add_nr 1 1 1
chk_rm_nr 1 1
@@ -2864,7 +2877,8 @@ add_addr_ports_tests()
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 -8 -2 slow
+ addr_nr_ns1=-8 addr_nr_ns2=-2 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 1 3 invert simult
@@ -3066,7 +3080,8 @@ fullmesh_tests()
pm_nl_set_limits $ns2 1 4
pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,fullmesh
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,fullmesh
- run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
+ addr_nr_ns1=1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 4 4 4
chk_add_nr 1 1
fi
@@ -3078,7 +3093,8 @@ fullmesh_tests()
pm_nl_set_limits $ns1 1 3
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow
+ addr_nr_ns2=fullmesh_1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 3 3 3
chk_add_nr 1 1
fi
@@ -3090,7 +3106,8 @@ fullmesh_tests()
pm_nl_set_limits $ns1 2 5
pm_nl_set_limits $ns2 1 5
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_2 slow
+ addr_nr_ns2=fullmesh_2 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 5 5 5
chk_add_nr 1 1
fi
@@ -3103,7 +3120,8 @@ fullmesh_tests()
pm_nl_set_limits $ns1 2 4
pm_nl_set_limits $ns2 1 4
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_2 slow
+ addr_nr_ns2=fullmesh_2 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 4 4 4
chk_add_nr 1 1
fi
@@ -3114,7 +3132,8 @@ fullmesh_tests()
pm_nl_set_limits $ns1 4 4
pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
pm_nl_set_limits $ns2 4 4
- run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow fullmesh
+ addr_nr_ns2=1 sflags=fullmesh \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
chk_rm_nr 0 1
fi
@@ -3125,7 +3144,8 @@ fullmesh_tests()
pm_nl_set_limits $ns1 4 4
pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow,fullmesh
pm_nl_set_limits $ns2 4 4
- run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow nofullmesh
+ addr_nr_ns2=fullmesh_1 sflags=nofullmesh \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
chk_rm_nr 0 1
fi
@@ -3136,7 +3156,8 @@ fullmesh_tests()
pm_nl_set_limits $ns1 4 4
pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
pm_nl_set_limits $ns2 4 4
- run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow backup,fullmesh
+ addr_nr_ns2=1 sflags=backup,fullmesh \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
chk_prio_nr 0 1
chk_rm_nr 0 1
@@ -3148,7 +3169,8 @@ fullmesh_tests()
pm_nl_set_limits $ns1 4 4
pm_nl_set_limits $ns2 4 4
pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup,fullmesh
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup,nofullmesh
+ sflags=nobackup,nofullmesh \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 2 2 2
chk_prio_nr 0 1
chk_rm_nr 0 1
@@ -3158,14 +3180,16 @@ fullmesh_tests()
fastclose_tests()
{
if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
- run_tests $ns1 $ns2 10.0.1.1 1024 0 fastclose_client
+ test_linkfail=1024 addr_nr_ns2=fastclose_client \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
chk_fclose_nr 1 1
chk_rst_nr 1 1 invert
fi
if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
- run_tests $ns1 $ns2 10.0.1.1 1024 0 fastclose_server
+ test_linkfail=1024 addr_nr_ns2=fastclose_server \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
chk_fclose_nr 1 1 invert
chk_rst_nr 1 1
@@ -3183,7 +3207,8 @@ fail_tests()
{
# single subflow
if reset_with_fail "Infinite map" 1; then
- run_tests $ns1 $ns2 10.0.1.1 128
+ test_linkfail=128 \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
chk_fail_nr 1 -1 invert
fi
@@ -3194,11 +3219,77 @@ fail_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 1024
+ test_linkfail=1024 \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1 1 0 1 1 0 "$(pedit_action_pkts)"
fi
}
+userspace_pm_add_addr()
+{
+ local addr=$1
+ local id=$2
+ local tk
+
+ tk=$(grep "type:1," "$evts_ns1" |
+ sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+ ip netns exec $ns1 ./pm_nl_ctl ann $addr token $tk id $id
+ sleep 1
+}
+
+userspace_pm_rm_sf_addr_ns1()
+{
+ local addr=$1
+ local id=$2
+ local tk sp da dp
+
+ tk=$(grep "type:1," "$evts_ns1" |
+ sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+ sp=$(grep "type:10" "$evts_ns1" |
+ sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ da=$(grep "type:10" "$evts_ns1" |
+ sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+ dp=$(grep "type:10" "$evts_ns1" |
+ sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
+ ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id
+ ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \
+ lport $sp rip $da rport $dp token $tk
+ wait_rm_addr $ns1 1
+ wait_rm_sf $ns1 1
+}
+
+userspace_pm_add_sf()
+{
+ local addr=$1
+ local id=$2
+ local tk da dp
+
+ tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
+ dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ ip netns exec $ns2 ./pm_nl_ctl csf lip $addr lid $id \
+ rip $da rport $dp token $tk
+ sleep 1
+}
+
+userspace_pm_rm_sf_addr_ns2()
+{
+ local addr=$1
+ local id=$2
+ local tk da dp sp
+
+ tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
+ dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ sp=$(grep "type:10" "$evts_ns2" |
+ sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id
+ ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \
+ rip $da rport $dp token $tk
+ wait_rm_addr $ns2 1
+ wait_rm_sf $ns2 1
+}
+
userspace_tests()
{
# userspace pm type prevents add_addr
@@ -3254,7 +3345,8 @@ userspace_tests()
pm_nl_set_limits $ns1 1 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ sflags=backup \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 1 1 0
chk_prio_nr 0 0
fi
@@ -3267,7 +3359,8 @@ userspace_tests()
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow
+ addr_nr_ns2=-1 \
+ run_tests $ns1 $ns2 10.0.1.1 slow
chk_join_nr 0 0 0
chk_rm_nr 0 0
fi
@@ -3277,11 +3370,19 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns1
pm_nl_set_limits $ns2 1 1
- run_tests $ns1 $ns2 10.0.1.1 0 userspace_1 0 slow
+ run_tests $ns1 $ns2 10.0.1.1 speed_10 &
+ local tests_pid=$!
+ wait_mpj $ns1
+ userspace_pm_add_addr 10.0.2.1 10
chk_join_nr 1 1 1
chk_add_nr 1 1
+ chk_mptcp_info subflows 1 subflows 1
+ chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
+ userspace_pm_rm_sf_addr_ns1 10.0.2.1 10
chk_rm_nr 1 1 invert
+ chk_mptcp_info subflows 0 subflows 0
kill_events_pids
+ wait $tests_pid
fi
# userspace pm create destroy subflow
@@ -3289,10 +3390,17 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
- run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
+ run_tests $ns1 $ns2 10.0.1.1 speed_10 &
+ local tests_pid=$!
+ wait_mpj $ns2
+ userspace_pm_add_sf 10.0.3.2 20
chk_join_nr 1 1 1
+ chk_mptcp_info subflows 1 subflows 1
+ userspace_pm_rm_sf_addr_ns2 10.0.3.2 20
chk_rm_nr 1 1
+ chk_mptcp_info subflows 0 subflows 0
kill_events_pids
+ wait $tests_pid
fi
}
@@ -3305,11 +3413,13 @@ endpoint_tests()
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow 2>/dev/null &
+ run_tests $ns1 $ns2 10.0.1.1 slow 2>/dev/null &
wait_mpj $ns1
pm_nl_check_endpoint 1 "creation" \
$ns2 10.0.2.2 id 1 flags implicit
+ chk_mptcp_info subflows 1 subflows 1
+ chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
pm_nl_add_endpoint $ns2 10.0.2.2 id 33
pm_nl_check_endpoint 0 "ID change is prevented" \
@@ -3326,21 +3436,22 @@ endpoint_tests()
pm_nl_set_limits $ns1 1 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
+ test_linkfail=4 \
+ run_tests $ns1 $ns2 10.0.1.1 speed_20 2>/dev/null &
wait_mpj $ns2
chk_subflow_nr needtitle "before delete" 2
- chk_mptcp_info subflows_1
+ chk_mptcp_info subflows 1 subflows 1
pm_nl_del_endpoint $ns2 2 10.0.2.2
sleep 0.5
chk_subflow_nr "" "after delete" 1
- chk_mptcp_info subflows_0
+ chk_mptcp_info subflows 0 subflows 0
pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
wait_mpj $ns2
chk_subflow_nr "" "after re-add" 2
- chk_mptcp_info subflows_1
+ chk_mptcp_info subflows 1 subflows 1
kill_tests_wait
fi
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
index b35148edbf02..926b0be87c99 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
@@ -51,6 +51,11 @@ struct mptcp_info {
__u8 mptcpi_local_addr_used;
__u8 mptcpi_local_addr_max;
__u8 mptcpi_csum_enabled;
+ __u32 mptcpi_retransmits;
+ __u64 mptcpi_bytes_retrans;
+ __u64 mptcpi_bytes_sent;
+ __u64 mptcpi_bytes_received;
+ __u64 mptcpi_bytes_acked;
};
struct mptcp_subflow_data {
@@ -81,10 +86,41 @@ struct mptcp_subflow_addrs {
#define MPTCP_SUBFLOW_ADDRS 3
#endif
+#ifndef MPTCP_FULL_INFO
+struct mptcp_subflow_info {
+ __u32 id;
+ struct mptcp_subflow_addrs addrs;
+};
+
+struct mptcp_full_info {
+ __u32 size_tcpinfo_kernel; /* must be 0, set by kernel */
+ __u32 size_tcpinfo_user;
+ __u32 size_sfinfo_kernel; /* must be 0, set by kernel */
+ __u32 size_sfinfo_user;
+ __u32 num_subflows; /* must be 0, set by kernel (real subflow count) */
+ __u32 size_arrays_user; /* max subflows that userspace is interested in;
+ * the buffers at subflow_info/tcp_info
+ * are respectively at least:
+ * size_arrays * size_sfinfo_user
+ * size_arrays * size_tcpinfo_user
+ * bytes wide
+ */
+ __aligned_u64 subflow_info;
+ __aligned_u64 tcp_info;
+ struct mptcp_info mptcp_info;
+};
+
+#define MPTCP_FULL_INFO 4
+#endif
+
struct so_state {
struct mptcp_info mi;
+ struct mptcp_info last_sample;
+ struct tcp_info tcp_info;
+ struct mptcp_subflow_addrs addrs;
uint64_t mptcpi_rcv_delta;
uint64_t tcpi_rcv_delta;
+ bool pkt_stats_avail;
};
#ifndef MIN
@@ -322,8 +358,9 @@ static void do_getsockopt_mptcp_info(struct so_state *s, int fd, size_t w)
if (ret < 0)
die_perror("getsockopt MPTCP_INFO");
- assert(olen == sizeof(i));
+ s->pkt_stats_avail = olen >= sizeof(i);
+ s->last_sample = i;
if (s->mi.mptcpi_write_seq == 0)
s->mi = i;
@@ -362,6 +399,8 @@ static void do_getsockopt_tcp_info(struct so_state *s, int fd, size_t r, size_t
olen -= sizeof(struct mptcp_subflow_data);
assert(olen == ti.d.size_user);
+ s->tcp_info = ti.ti[0];
+
if (ti.ti[0].tcpi_bytes_sent == w &&
ti.ti[0].tcpi_bytes_received == r)
goto done;
@@ -383,7 +422,7 @@ done:
do_getsockopt_bogus_sf_data(fd, MPTCP_TCPINFO);
}
-static void do_getsockopt_subflow_addrs(int fd)
+static void do_getsockopt_subflow_addrs(struct so_state *s, int fd)
{
struct sockaddr_storage remote, local;
socklen_t olen, rlen, llen;
@@ -431,6 +470,7 @@ static void do_getsockopt_subflow_addrs(int fd)
assert(memcmp(&local, &addrs.addr[0].ss_local, sizeof(local)) == 0);
assert(memcmp(&remote, &addrs.addr[0].ss_remote, sizeof(remote)) == 0);
+ s->addrs = addrs.addr[0];
memset(&addrs, 0, sizeof(addrs));
@@ -451,13 +491,70 @@ static void do_getsockopt_subflow_addrs(int fd)
do_getsockopt_bogus_sf_data(fd, MPTCP_SUBFLOW_ADDRS);
}
+static void do_getsockopt_mptcp_full_info(struct so_state *s, int fd)
+{
+ size_t data_size = sizeof(struct mptcp_full_info);
+ struct mptcp_subflow_info sfinfo[2];
+ struct tcp_info tcp_info[2];
+ struct mptcp_full_info mfi;
+ socklen_t olen;
+ int ret;
+
+ memset(&mfi, 0, data_size);
+ memset(tcp_info, 0, sizeof(tcp_info));
+ memset(sfinfo, 0, sizeof(sfinfo));
+
+ mfi.size_tcpinfo_user = sizeof(struct tcp_info);
+ mfi.size_sfinfo_user = sizeof(struct mptcp_subflow_info);
+ mfi.size_arrays_user = 2;
+ mfi.subflow_info = (unsigned long)&sfinfo[0];
+ mfi.tcp_info = (unsigned long)&tcp_info[0];
+ olen = data_size;
+
+ ret = getsockopt(fd, SOL_MPTCP, MPTCP_FULL_INFO, &mfi, &olen);
+ if (ret < 0) {
+ if (errno == EOPNOTSUPP) {
+ perror("MPTCP_FULL_INFO test skipped");
+ return;
+ }
+ xerror("getsockopt MPTCP_FULL_INFO");
+ }
+
+ assert(olen <= data_size);
+ assert(mfi.size_tcpinfo_kernel > 0);
+ assert(mfi.size_tcpinfo_user ==
+ MIN(mfi.size_tcpinfo_kernel, sizeof(struct tcp_info)));
+ assert(mfi.size_sfinfo_kernel > 0);
+ assert(mfi.size_sfinfo_user ==
+ MIN(mfi.size_sfinfo_kernel, sizeof(struct mptcp_subflow_info)));
+ assert(mfi.num_subflows == 1);
+
+ /* Tolerate future extension to mptcp_info struct and running newer
+ * test on top of older kernel.
+ * Anyway any kernel supporting MPTCP_FULL_INFO must at least include
+ * the following in mptcp_info.
+ */
+ assert(olen > (socklen_t)__builtin_offsetof(struct mptcp_full_info, tcp_info));
+ assert(mfi.mptcp_info.mptcpi_subflows == 0);
+ assert(mfi.mptcp_info.mptcpi_bytes_sent == s->last_sample.mptcpi_bytes_sent);
+ assert(mfi.mptcp_info.mptcpi_bytes_received == s->last_sample.mptcpi_bytes_received);
+
+ assert(sfinfo[0].id == 1);
+ assert(tcp_info[0].tcpi_bytes_sent == s->tcp_info.tcpi_bytes_sent);
+ assert(tcp_info[0].tcpi_bytes_received == s->tcp_info.tcpi_bytes_received);
+ assert(!memcmp(&sfinfo->addrs, &s->addrs, sizeof(struct mptcp_subflow_addrs)));
+}
+
static void do_getsockopts(struct so_state *s, int fd, size_t r, size_t w)
{
do_getsockopt_mptcp_info(s, fd, w);
do_getsockopt_tcp_info(s, fd, r, w);
- do_getsockopt_subflow_addrs(fd);
+ do_getsockopt_subflow_addrs(s, fd);
+
+ if (r)
+ do_getsockopt_mptcp_full_info(s, fd);
}
static void connect_one_server(int fd, int pipefd)
@@ -562,6 +659,23 @@ static void process_one_client(int fd, int pipefd)
do_getsockopts(&s, fd, ret, ret2);
if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
+
+ /* be nice when running on top of older kernel */
+ if (s.pkt_stats_avail) {
+ if (s.last_sample.mptcpi_bytes_sent != ret2)
+ xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
+ s.last_sample.mptcpi_bytes_sent, ret2,
+ s.last_sample.mptcpi_bytes_sent - ret2);
+ if (s.last_sample.mptcpi_bytes_received != ret)
+ xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
+ s.last_sample.mptcpi_bytes_received, ret,
+ s.last_sample.mptcpi_bytes_received - ret);
+ if (s.last_sample.mptcpi_bytes_acked != ret)
+ xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
+ s.last_sample.mptcpi_bytes_acked, ret2,
+ s.last_sample.mptcpi_bytes_acked - ret2);
+ }
+
close(fd);
}
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index ee9a72982705..39a0e01f8554 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -76,7 +76,9 @@ struct sock_args {
has_grp:1,
has_expected_laddr:1,
has_expected_raddr:1,
- bind_test_only:1;
+ bind_test_only:1,
+ client_dontroute:1,
+ server_dontroute:1;
unsigned short port;
@@ -611,6 +613,18 @@ static int set_dsfield(int sd, int version, int dsfield)
return 0;
}
+static int set_dontroute(int sd)
+{
+ unsigned int one = 1;
+
+ if (setsockopt(sd, SOL_SOCKET, SO_DONTROUTE, &one, sizeof(one)) < 0) {
+ log_err_errno("setsockopt(SO_DONTROUTE)");
+ return -1;
+ }
+
+ return 0;
+}
+
static int str_to_uint(const char *str, int min, int max, unsigned int *value)
{
int number;
@@ -1351,6 +1365,14 @@ static int msock_init(struct sock_args *args, int server)
if (set_dsfield(sd, AF_INET, args->dsfield) != 0)
goto out_err;
+ if (server) {
+ if (args->server_dontroute && set_dontroute(sd) != 0)
+ goto out_err;
+ } else {
+ if (args->client_dontroute && set_dontroute(sd) != 0)
+ goto out_err;
+ }
+
if (args->dev && bind_to_device(sd, args->dev) != 0)
goto out_err;
else if (args->use_setsockopt &&
@@ -1482,6 +1504,9 @@ static int lsock_init(struct sock_args *args)
if (set_dsfield(sd, args->version, args->dsfield) != 0)
goto err;
+ if (args->server_dontroute && set_dontroute(sd) != 0)
+ goto err;
+
if (args->dev && bind_to_device(sd, args->dev) != 0)
goto err;
else if (args->use_setsockopt &&
@@ -1698,6 +1723,9 @@ static int connectsock(void *addr, socklen_t alen, struct sock_args *args)
if (set_dsfield(sd, args->version, args->dsfield) != 0)
goto err;
+ if (args->client_dontroute && set_dontroute(sd) != 0)
+ goto err;
+
if (args->dev && bind_to_device(sd, args->dev) != 0)
goto err;
else if (args->use_setsockopt &&
@@ -1905,10 +1933,14 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
#define GETOPT_STR "sr:l:c:Q:p:t:g:P:DRn:M:X:m:d:I:BN:O:SUCi6xL:0:1:2:3:Fbqf"
#define OPT_FORCE_BIND_KEY_IFINDEX 1001
#define OPT_NO_BIND_KEY_IFINDEX 1002
+#define OPT_CLIENT_DONTROUTE 1003
+#define OPT_SERVER_DONTROUTE 1004
static struct option long_opts[] = {
{"force-bind-key-ifindex", 0, 0, OPT_FORCE_BIND_KEY_IFINDEX},
{"no-bind-key-ifindex", 0, 0, OPT_NO_BIND_KEY_IFINDEX},
+ {"client-dontroute", 0, 0, OPT_CLIENT_DONTROUTE},
+ {"server-dontroute", 0, 0, OPT_SERVER_DONTROUTE},
{0, 0, 0, 0}
};
@@ -1954,6 +1986,12 @@ static void print_usage(char *prog)
" --no-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX off\n"
" --force-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX on\n"
" (default: only if -I is passed)\n"
+ " --client-dontroute: don't use gateways for client socket: send\n"
+ " packets only if destination is on link (see\n"
+ " SO_DONTROUTE in socket(7))\n"
+ " --server-dontroute: don't use gateways for server socket: send\n"
+ " packets only if destination is on link (see\n"
+ " SO_DONTROUTE in socket(7))\n"
"\n"
" -g grp multicast group (e.g., 239.1.1.1)\n"
" -i interactive mode (default is echo and terminate)\n"
@@ -2076,6 +2114,12 @@ int main(int argc, char *argv[])
case OPT_NO_BIND_KEY_IFINDEX:
args.bind_key_ifindex = -1;
break;
+ case OPT_CLIENT_DONTROUTE:
+ args.client_dontroute = 1;
+ break;
+ case OPT_SERVER_DONTROUTE:
+ args.server_dontroute = 1;
+ break;
case 'X':
args.client_pw = optarg;
break;
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 383ac6fc037d..ba286d680fd9 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -860,6 +860,7 @@ EOF
fi
# clean up any leftovers
+ echo 0 > /sys/bus/netdevsim/del_device
$probed && rmmod netdevsim
if [ $ret -ne 0 ]; then
diff --git a/tools/testing/selftests/net/test_vxlan_nolocalbypass.sh b/tools/testing/selftests/net/test_vxlan_nolocalbypass.sh
new file mode 100755
index 000000000000..f75212bf142c
--- /dev/null
+++ b/tools/testing/selftests/net/test_vxlan_nolocalbypass.sh
@@ -0,0 +1,240 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking the [no]localbypass VXLAN device option. The test
+# configures two VXLAN devices in the same network namespace and a tc filter on
+# the loopback device that drops encapsulated packets. The test sends packets
+# from the first VXLAN device and verifies that by default these packets are
+# received by the second VXLAN device. The test then enables the nolocalbypass
+# option and verifies that packets are no longer received by the second VXLAN
+# device.
+
+ret=0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+TESTS="
+ nolocalbypass
+"
+VERBOSE=0
+PAUSE_ON_FAIL=no
+PAUSE=no
+
+################################################################################
+# Utilities
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ printf "TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "$VERBOSE" = "1" ]; then
+ echo " rc=$rc, expected $expected"
+ fi
+
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+
+ if [ "${PAUSE}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+
+ [ "$VERBOSE" = "1" ] && echo
+}
+
+run_cmd()
+{
+ local cmd="$1"
+ local out
+ local stderr="2>/dev/null"
+
+ if [ "$VERBOSE" = "1" ]; then
+ printf "COMMAND: $cmd\n"
+ stderr=
+ fi
+
+ out=$(eval $cmd $stderr)
+ rc=$?
+ if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+ echo " $out"
+ fi
+
+ return $rc
+}
+
+tc_check_packets()
+{
+ local ns=$1; shift
+ local id=$1; shift
+ local handle=$1; shift
+ local count=$1; shift
+ local pkts
+
+ sleep 0.1
+ pkts=$(tc -n $ns -j -s filter show $id \
+ | jq ".[] | select(.options.handle == $handle) | \
+ .options.actions[0].stats.packets")
+ [[ $pkts == $count ]]
+}
+
+################################################################################
+# Setup
+
+setup()
+{
+ ip netns add ns1
+
+ ip -n ns1 link set dev lo up
+ ip -n ns1 address add 192.0.2.1/32 dev lo
+ ip -n ns1 address add 198.51.100.1/32 dev lo
+
+ ip -n ns1 link add name vx0 up type vxlan id 100 local 198.51.100.1 \
+ dstport 4789 nolearning
+ ip -n ns1 link add name vx1 up type vxlan id 100 dstport 4790
+}
+
+cleanup()
+{
+ ip netns del ns1 &> /dev/null
+}
+
+################################################################################
+# Tests
+
+nolocalbypass()
+{
+ local smac=00:01:02:03:04:05
+ local dmac=00:0a:0b:0c:0d:0e
+
+ run_cmd "bridge -n ns1 fdb add $dmac dev vx0 self static dst 192.0.2.1 port 4790"
+
+ run_cmd "tc -n ns1 qdisc add dev vx1 clsact"
+ run_cmd "tc -n ns1 filter add dev vx1 ingress pref 1 handle 101 proto all flower src_mac $smac dst_mac $dmac action pass"
+
+ run_cmd "tc -n ns1 qdisc add dev lo clsact"
+ run_cmd "tc -n ns1 filter add dev lo ingress pref 1 handle 101 proto ip flower ip_proto udp dst_port 4790 action drop"
+
+ run_cmd "ip -n ns1 -d -j link show dev vx0 | jq -e '.[][\"linkinfo\"][\"info_data\"][\"localbypass\"] == true'"
+ log_test $? 0 "localbypass enabled"
+
+ run_cmd "ip netns exec ns1 mausezahn vx0 -a $smac -b $dmac -c 1 -p 100 -q"
+
+ tc_check_packets "ns1" "dev vx1 ingress" 101 1
+ log_test $? 0 "Packet received by local VXLAN device - localbypass"
+
+ run_cmd "ip -n ns1 link set dev vx0 type vxlan nolocalbypass"
+
+ run_cmd "ip -n ns1 -d -j link show dev vx0 | jq -e '.[][\"linkinfo\"][\"info_data\"][\"localbypass\"] == false'"
+ log_test $? 0 "localbypass disabled"
+
+ run_cmd "ip netns exec ns1 mausezahn vx0 -a $smac -b $dmac -c 1 -p 100 -q"
+
+ tc_check_packets "ns1" "dev vx1 ingress" 101 1
+ log_test $? 0 "Packet not received by local VXLAN device - nolocalbypass"
+
+ run_cmd "ip -n ns1 link set dev vx0 type vxlan localbypass"
+
+ run_cmd "ip -n ns1 -d -j link show dev vx0 | jq -e '.[][\"linkinfo\"][\"info_data\"][\"localbypass\"] == true'"
+ log_test $? 0 "localbypass enabled"
+
+ run_cmd "ip netns exec ns1 mausezahn vx0 -a $smac -b $dmac -c 1 -p 100 -q"
+
+ tc_check_packets "ns1" "dev vx1 ingress" 101 2
+ log_test $? 0 "Packet received by local VXLAN device - localbypass"
+}
+
+################################################################################
+# Usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+ -p Pause on fail
+ -P Pause after each test before cleanup
+ -v Verbose mode (show commands and output)
+EOF
+}
+
+################################################################################
+# Main
+
+trap cleanup EXIT
+
+while getopts ":t:pPvh" opt; do
+ case $opt in
+ t) TESTS=$OPTARG ;;
+ p) PAUSE_ON_FAIL=yes;;
+ P) PAUSE=yes;;
+ v) VERBOSE=$(($VERBOSE + 1));;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+# Make sure we don't pause twice.
+[ "${PAUSE}" = "yes" ] && PAUSE_ON_FAIL=no
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip;
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v bridge)" ]; then
+ echo "SKIP: Could not run test without bridge tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "SKIP: Could not run test without mausezahn tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v jq)" ]; then
+ echo "SKIP: Could not run test without jq tool"
+ exit $ksft_skip
+fi
+
+ip link help vxlan 2>&1 | grep -q "localbypass"
+if [ $? -ne 0 ]; then
+ echo "SKIP: iproute2 ip too old, missing VXLAN nolocalbypass support"
+ exit $ksft_skip
+fi
+
+cleanup
+
+for t in $TESTS
+do
+ setup; $t; cleanup;
+done
+
+if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index ff36844d14b4..a3c57004344c 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -15,6 +15,7 @@
#include <linux/tcp.h>
#include <linux/socket.h>
+#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/sendfile.h>
#include <sys/socket.h>
@@ -1646,6 +1647,136 @@ TEST_F(tls_err, timeo)
}
}
+TEST_F(tls_err, poll_partial_rec)
+{
+ struct pollfd pfd = { };
+ ssize_t rec_len;
+ char rec[256];
+ char buf[128];
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ pfd.fd = self->cfd2;
+ pfd.events = POLLIN;
+ EXPECT_EQ(poll(&pfd, 1, 1), 0);
+
+ memrnd(buf, sizeof(buf));
+ EXPECT_EQ(send(self->fd, buf, sizeof(buf), 0), sizeof(buf));
+ rec_len = recv(self->cfd, rec, sizeof(rec), 0);
+ EXPECT_GT(rec_len, sizeof(buf));
+
+ /* Write 100B, not the full record ... */
+ EXPECT_EQ(send(self->fd2, rec, 100, 0), 100);
+ /* ... no full record should mean no POLLIN */
+ pfd.fd = self->cfd2;
+ pfd.events = POLLIN;
+ EXPECT_EQ(poll(&pfd, 1, 1), 0);
+ /* Now write the rest, and it should all pop out of the other end. */
+ EXPECT_EQ(send(self->fd2, rec + 100, rec_len - 100, 0), rec_len - 100);
+ pfd.fd = self->cfd2;
+ pfd.events = POLLIN;
+ EXPECT_EQ(poll(&pfd, 1, 1), 1);
+ EXPECT_EQ(recv(self->cfd2, rec, sizeof(rec), 0), sizeof(buf));
+ EXPECT_EQ(memcmp(buf, rec, sizeof(buf)), 0);
+}
+
+TEST_F(tls_err, epoll_partial_rec)
+{
+ struct epoll_event ev, events[10];
+ ssize_t rec_len;
+ char rec[256];
+ char buf[128];
+ int epollfd;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ epollfd = epoll_create1(0);
+ ASSERT_GE(epollfd, 0);
+
+ memset(&ev, 0, sizeof(ev));
+ ev.events = EPOLLIN;
+ ev.data.fd = self->cfd2;
+ ASSERT_GE(epoll_ctl(epollfd, EPOLL_CTL_ADD, self->cfd2, &ev), 0);
+
+ EXPECT_EQ(epoll_wait(epollfd, events, 10, 0), 0);
+
+ memrnd(buf, sizeof(buf));
+ EXPECT_EQ(send(self->fd, buf, sizeof(buf), 0), sizeof(buf));
+ rec_len = recv(self->cfd, rec, sizeof(rec), 0);
+ EXPECT_GT(rec_len, sizeof(buf));
+
+ /* Write 100B, not the full record ... */
+ EXPECT_EQ(send(self->fd2, rec, 100, 0), 100);
+ /* ... no full record should mean no POLLIN */
+ EXPECT_EQ(epoll_wait(epollfd, events, 10, 0), 0);
+ /* Now write the rest, and it should all pop out of the other end. */
+ EXPECT_EQ(send(self->fd2, rec + 100, rec_len - 100, 0), rec_len - 100);
+ EXPECT_EQ(epoll_wait(epollfd, events, 10, 0), 1);
+ EXPECT_EQ(recv(self->cfd2, rec, sizeof(rec), 0), sizeof(buf));
+ EXPECT_EQ(memcmp(buf, rec, sizeof(buf)), 0);
+
+ close(epollfd);
+}
+
+TEST_F(tls_err, poll_partial_rec_async)
+{
+ struct pollfd pfd = { };
+ ssize_t rec_len;
+ char rec[256];
+ char buf[128];
+ char token;
+ int p[2];
+ int ret;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ ASSERT_GE(pipe(p), 0);
+
+ memrnd(buf, sizeof(buf));
+ EXPECT_EQ(send(self->fd, buf, sizeof(buf), 0), sizeof(buf));
+ rec_len = recv(self->cfd, rec, sizeof(rec), 0);
+ EXPECT_GT(rec_len, sizeof(buf));
+
+ ret = fork();
+ ASSERT_GE(ret, 0);
+
+ if (ret) {
+ int status, pid2;
+
+ close(p[1]);
+ usleep(1000); /* Give child a head start */
+
+ EXPECT_EQ(send(self->fd2, rec, 100, 0), 100);
+
+ EXPECT_EQ(read(p[0], &token, 1), 1); /* Barrier #1 */
+
+ EXPECT_EQ(send(self->fd2, rec + 100, rec_len - 100, 0),
+ rec_len - 100);
+
+ pid2 = wait(&status);
+ EXPECT_EQ(pid2, ret);
+ EXPECT_EQ(status, 0);
+ } else {
+ close(p[0]);
+
+ /* Child should sleep in poll(), never get a wake */
+ pfd.fd = self->cfd2;
+ pfd.events = POLLIN;
+ EXPECT_EQ(poll(&pfd, 1, 5), 0);
+
+ EXPECT_EQ(write(p[1], &token, 1), 1); /* Barrier #1 */
+
+ pfd.fd = self->cfd2;
+ pfd.events = POLLIN;
+ EXPECT_EQ(poll(&pfd, 1, 5), 1);
+
+ exit(!_metadata->passed);
+ }
+}
+
TEST(non_established) {
struct tls12_crypto_info_aes_gcm_256 tls12;
struct sockaddr_in addr;
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index cfa9562f3cd8..e9438a1862ad 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -110,7 +110,7 @@ static long ppb_to_scaled_ppm(int ppb)
static int64_t pctns(struct ptp_clock_time *t)
{
- return t->sec * 1000000000LL + t->nsec;
+ return t->sec * NSEC_PER_SEC + t->nsec;
}
static void usage(char *progname)
@@ -134,6 +134,7 @@ static void usage(char *progname)
" 1 - external time stamp\n"
" 2 - periodic output\n"
" -n val shift the ptp clock time by 'val' nanoseconds\n"
+ " -o val phase offset (in nanoseconds) to be provided to the PHC servo\n"
" -p val enable output with a period of 'val' nanoseconds\n"
" -H val set output phase to 'val' nanoseconds (requires -p)\n"
" -w val set output pulse width to 'val' nanoseconds (requires -p)\n"
@@ -167,6 +168,7 @@ int main(int argc, char *argv[])
int adjfreq = 0x7fffffff;
int adjtime = 0;
int adjns = 0;
+ int adjphase = 0;
int capabilities = 0;
int extts = 0;
int flagtest = 0;
@@ -188,7 +190,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "cd:e:f:ghH:i:k:lL:n:p:P:sSt:T:w:z"))) {
+ while (EOF != (c = getopt(argc, argv, "cd:e:f:ghH:i:k:lL:n:o:p:P:sSt:T:w:z"))) {
switch (c) {
case 'c':
capabilities = 1;
@@ -228,6 +230,9 @@ int main(int argc, char *argv[])
case 'n':
adjns = atoi(optarg);
break;
+ case 'o':
+ adjphase = atoi(optarg);
+ break;
case 'p':
perout = atoll(optarg);
break;
@@ -287,7 +292,8 @@ int main(int argc, char *argv[])
" %d pulse per second\n"
" %d programmable pins\n"
" %d cross timestamping\n"
- " %d adjust_phase\n",
+ " %d adjust_phase\n"
+ " %d maximum phase adjustment (ns)\n",
caps.max_adj,
caps.n_alarm,
caps.n_ext_ts,
@@ -295,7 +301,8 @@ int main(int argc, char *argv[])
caps.pps,
caps.n_pins,
caps.cross_timestamping,
- caps.adjust_phase);
+ caps.adjust_phase,
+ caps.max_phase_adj);
}
}
@@ -317,7 +324,7 @@ int main(int argc, char *argv[])
tx.time.tv_usec = adjns;
while (tx.time.tv_usec < 0) {
tx.time.tv_sec -= 1;
- tx.time.tv_usec += 1000000000;
+ tx.time.tv_usec += NSEC_PER_SEC;
}
if (clock_adjtime(clkid, &tx) < 0) {
@@ -327,6 +334,18 @@ int main(int argc, char *argv[])
}
}
+ if (adjphase) {
+ memset(&tx, 0, sizeof(tx));
+ tx.modes = ADJ_OFFSET | ADJ_NANO;
+ tx.offset = adjphase;
+
+ if (clock_adjtime(clkid, &tx) < 0) {
+ perror("clock_adjtime");
+ } else {
+ puts("phase adjustment okay");
+ }
+ }
+
if (gettime) {
if (clock_gettime(clkid, &ts)) {
perror("clock_gettime");
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json b/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json
new file mode 100644
index 000000000000..c4c778e83da2
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json
@@ -0,0 +1,25 @@
+[
+ {
+ "id": "c2b4",
+ "name": "soft lockup alarm will be not generated after delete the prio 0 filter of the chain",
+ "category": [
+ "filter",
+ "chain"
+ ],
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true",
+ "$TC qdisc add dev $DUMMY root handle 1: htb default 1",
+ "$TC chain add dev $DUMMY",
+ "$TC filter del dev $DUMMY chain 0 parent 1: prio 0"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DUMMY chain 0 parent 1:",
+ "expExitCode": "2",
+ "verifyCmd": "$TC chain ls dev $DUMMY",
+ "matchPattern": "chain parent 1: chain 0",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY root handle 1: htb default 1",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
index 44fbfc6caec7..e3d2de5c184f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
@@ -155,5 +155,28 @@
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
]
- }
+ },
+ {
+ "id": "0531",
+ "name": "Replace mq with invalid parent ID",
+ "category": [
+ "qdisc",
+ "mq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 16\" > /sys/bus/netdevsim/new_device",
+ "$TC qdisc add dev $ETH root handle ffff: mq"
+ ],
+ "cmdUnderTest": "$TC qdisc replace dev $ETH parent ffff:fff1 handle ffff: mq",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $ETH",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent ffff",
+ "matchCount": "16",
+ "teardown": [
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ }
]