summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-12-19 03:46:07 +0300
committerJakub Kicinski <kuba@kernel.org>2023-12-19 03:46:08 +0300
commitc49b292d031e385abf764ded32cd953c77e73f2d (patch)
tree3f13748b32a3c273c5315286a3acaf45447a5437 /drivers/net/ethernet/intel/ice
parent0ee28c9ae042e77100fae2cd82a54750668aafce (diff)
parent8e432e6197cef6250dfd6fdffd41c06613c874ca (diff)
downloadlinux-c49b292d031e385abf764ded32cd953c77e73f2d.tar.xz
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2023-12-18 This PR is larger than usual and contains changes in various parts of the kernel. The main changes are: 1) Fix kCFI bugs in BPF, from Peter Zijlstra. End result: all forms of indirect calls from BPF into kernel and from kernel into BPF work with CFI enabled. This allows BPF to work with CONFIG_FINEIBT=y. 2) Introduce BPF token object, from Andrii Nakryiko. It adds an ability to delegate a subset of BPF features from privileged daemon (e.g., systemd) through special mount options for userns-bound BPF FS to a trusted unprivileged application. The design accommodates suggestions from Christian Brauner and Paul Moore. Example: $ sudo mkdir -p /sys/fs/bpf/token $ sudo mount -t bpf bpffs /sys/fs/bpf/token \ -o delegate_cmds=prog_load:MAP_CREATE \ -o delegate_progs=kprobe \ -o delegate_attachs=xdp 3) Various verifier improvements and fixes, from Andrii Nakryiko, Andrei Matei. - Complete precision tracking support for register spills - Fix verification of possibly-zero-sized stack accesses - Fix access to uninit stack slots - Track aligned STACK_ZERO cases as imprecise spilled registers. It improves the verifier "instructions processed" metric from single digit to 50-60% for some programs. - Fix verifier retval logic 4) Support for VLAN tag in XDP hints, from Larysa Zaremba. 5) Allocate BPF trampoline via bpf_prog_pack mechanism, from Song Liu. End result: better memory utilization and lower I$ miss for calls to BPF via BPF trampoline. 6) Fix race between BPF prog accessing inner map and parallel delete, from Hou Tao. 7) Add bpf_xdp_get_xfrm_state() kfunc, from Daniel Xu. It allows BPF interact with IPSEC infra. The intent is to support software RSS (via XDP) for the upcoming ipsec pcpu work. Experiments on AWS demonstrate single tunnel pcpu ipsec reaching line rate on 100G ENA nics. 8) Expand bpf_cgrp_storage to support cgroup1 non-attach, from Yafang Shao. 9) BPF file verification via fsverity, from Song Liu. It allows BPF progs get fsverity digest. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (164 commits) bpf: Ensure precise is reset to false in __mark_reg_const_zero() selftests/bpf: Add more uprobe multi fail tests bpf: Fail uprobe multi link with negative offset selftests/bpf: Test the release of map btf s390/bpf: Fix indirect trampoline generation selftests/bpf: Temporarily disable dummy_struct_ops test on s390 x86/cfi,bpf: Fix bpf_exception_cb() signature bpf: Fix dtor CFI cfi: Add CFI_NOSEAL() x86/cfi,bpf: Fix bpf_struct_ops CFI x86/cfi,bpf: Fix bpf_callback_t CFI x86/cfi,bpf: Fix BPF JIT call cfi: Flip headers selftests/bpf: Add test for abnormal cnt during multi-kprobe attachment selftests/bpf: Don't use libbpf_get_error() in kprobe_multi_test selftests/bpf: Add test for abnormal cnt during multi-uprobe attachment bpf: Limit the number of kprobes when attaching program to multiple kprobes bpf: Limit the number of uprobes when attaching program to multiple uprobes bpf: xdp: Register generic_kfunc_set with XDP programs selftests/bpf: utilize string values for delegate_xxx mount options ... ==================== Link: https://lore.kernel.org/r/20231219000520.34178-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h412
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c17
11 files changed, 508 insertions, 273 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 50304e4a4fb0..2defac6d9168 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -1008,4 +1008,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
+
+extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index edad5f9ab16c..b25b7f415965 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -527,6 +527,19 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
return 0;
}
+static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
+{
+ void *ctx_ptr = &ring->pkt_ctx;
+ struct xsk_cb_desc desc = {};
+
+ XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
+ desc.src = &ctx_ptr;
+ desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
+ sizeof(struct xdp_buff);
+ desc.bytes = sizeof(ctx_ptr);
+ xsk_pool_fill_cb(ring->xsk_pool, &desc);
+}
+
/**
* ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured
@@ -561,6 +574,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ ice_xsk_pool_fill_cb(ring);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
@@ -583,6 +597,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
ring->xdp.data = NULL;
+ ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 89f986a75cc8..d384ddfcb83e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -673,6 +673,212 @@ struct ice_tlan_ctx {
* Use the enum ice_rx_l2_ptype to decode the packet type
* ENDIF
*/
+#define ICE_PTYPES \
+ /* L2 Packet types */ \
+ ICE_PTT_UNUSED_ENTRY(0), \
+ ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \
+ ICE_PTT_UNUSED_ENTRY(2), \
+ ICE_PTT_UNUSED_ENTRY(3), \
+ ICE_PTT_UNUSED_ENTRY(4), \
+ ICE_PTT_UNUSED_ENTRY(5), \
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT_UNUSED_ENTRY(8), \
+ ICE_PTT_UNUSED_ENTRY(9), \
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT_UNUSED_ENTRY(12), \
+ ICE_PTT_UNUSED_ENTRY(13), \
+ ICE_PTT_UNUSED_ENTRY(14), \
+ ICE_PTT_UNUSED_ENTRY(15), \
+ ICE_PTT_UNUSED_ENTRY(16), \
+ ICE_PTT_UNUSED_ENTRY(17), \
+ ICE_PTT_UNUSED_ENTRY(18), \
+ ICE_PTT_UNUSED_ENTRY(19), \
+ ICE_PTT_UNUSED_ENTRY(20), \
+ ICE_PTT_UNUSED_ENTRY(21), \
+ \
+ /* Non Tunneled IPv4 */ \
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(25), \
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> IPv4 */ \
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(32), \
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> IPv6 */ \
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(39), \
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT */ \
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 --> GRE/NAT --> IPv4 */ \
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(47), \
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> IPv6 */ \
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(54), \
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC */ \
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(62), \
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(69), \
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */ \
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(77), \
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(84), \
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* Non Tunneled IPv6 */ \
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(91), \
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> IPv4 */ \
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(98), \
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> IPv6 */ \
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(105), \
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT */ \
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> IPv4 */ \
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(113), \
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> IPv6 */ \
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(120), \
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC */ \
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(128), \
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(135), \
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */ \
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(143), \
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(150), \
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+#define ICE_NUM_DEFINED_PTYPES 154
/* macro to make the table lines short, use explicit indexing with [PTYPE] */
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
@@ -695,212 +901,10 @@ struct ice_tlan_ctx {
/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
- /* L2 Packet types */
- ICE_PTT_UNUSED_ENTRY(0),
- ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- ICE_PTT_UNUSED_ENTRY(2),
- ICE_PTT_UNUSED_ENTRY(3),
- ICE_PTT_UNUSED_ENTRY(4),
- ICE_PTT_UNUSED_ENTRY(5),
- ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(8),
- ICE_PTT_UNUSED_ENTRY(9),
- ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(12),
- ICE_PTT_UNUSED_ENTRY(13),
- ICE_PTT_UNUSED_ENTRY(14),
- ICE_PTT_UNUSED_ENTRY(15),
- ICE_PTT_UNUSED_ENTRY(16),
- ICE_PTT_UNUSED_ENTRY(17),
- ICE_PTT_UNUSED_ENTRY(18),
- ICE_PTT_UNUSED_ENTRY(19),
- ICE_PTT_UNUSED_ENTRY(20),
- ICE_PTT_UNUSED_ENTRY(21),
-
- /* Non Tunneled IPv4 */
- ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(25),
- ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(32),
- ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(39),
- ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(47),
- ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(54),
- ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(62),
- ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(69),
- ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(77),
- ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(84),
- ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(91),
- ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(98),
- ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(105),
- ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(113),
- ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(120),
- ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(128),
- ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(135),
- ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(143),
- ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(150),
- ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+ ICE_PTYPES
/* unused entries */
- [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 9b0c04d595ce..b97d116650be 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3426,6 +3426,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
netdev->netdev_ops = &ice_netdev_ops;
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
+ netdev->xdp_metadata_ops = &ice_xdp_md_ops;
ice_set_ethtool_ops(netdev);
if (vsi->type != ICE_VSI_PF)
@@ -6094,6 +6095,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
}
/**
+ * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
+ * @vsi: PF's VSI
+ * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
+ *
+ * Store current stripped VLAN proto in ring packet context,
+ * so it can be accessed more efficiently by packet processing code.
+ */
+static void
+ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
+{
+ u16 i;
+
+ ice_for_each_alloc_rxq(vsi, i)
+ vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
+}
+
+/**
* ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
* @vsi: PF's VSI
* @features: features used to determine VLAN offload settings
@@ -6135,6 +6153,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
if (strip_err || insert_err)
return -EIO;
+ ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
+ htons(vlan_ethertype) : 0);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index e9e59f4b5580..239cf8a2ee80 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2129,30 +2129,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
}
/**
- * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
- * @rx_ring: Ring to get the VSI info
+ * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
* @rx_desc: Receive descriptor
- * @skb: Particular skb to send timestamp with
+ * @pkt_ctx: Packet context to get the cached time
*
* The driver receives a notification in the receive descriptor with timestamp.
- * The timestamp is in ns, so we must convert the result first.
*/
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
{
- struct skb_shared_hwtstamps *hwtstamps;
u64 ts_ns, cached_time;
u32 ts_high;
if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
- return;
+ return 0;
- cached_time = READ_ONCE(rx_ring->cached_phctime);
+ cached_time = READ_ONCE(pkt_ctx->cached_phctime);
/* Do not report a timestamp if we don't have a cached PHC time */
if (!cached_time)
- return;
+ return 0;
/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
* PHC value, rather than accessing the PF. This also allows us to
@@ -2163,9 +2159,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
+ return ts_ns;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index d79281061409..032653a7a133 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -298,9 +298,8 @@ void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx);
void ice_ptp_reset(struct ice_pf *pf);
void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf);
@@ -329,9 +328,14 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
{
return true;
}
-static inline void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+
+static inline u64
+ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
+{
+ return 0;
+}
+
static inline void ice_ptp_reset(struct ice_pf *pf) { }
static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9e97ea863068..59617f055e35 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -557,13 +557,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
* @rx_buf: Rx buffer to store the XDP action
+ * @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf)
+ struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -571,6 +572,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (!xdp_prog)
goto exit;
+ ice_xdp_meta_set_desc(xdp, eop_desc);
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
@@ -1180,8 +1183,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -1241,7 +1243,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
@@ -1276,7 +1278,7 @@ construct_skb:
continue;
}
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
+ vlan_tci = ice_get_vlan_tci(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb))
@@ -1286,14 +1288,11 @@ construct_skb:
total_rx_bytes += skb->len;
/* populate checksum, VLAN, and protocol */
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
-
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
/* update budget accounting */
total_rx_pkts++;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index daf7b9dbb143..b3379ff73674 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -257,6 +257,20 @@ enum ice_rx_dtype {
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
+struct ice_pkt_ctx {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+};
+
+struct ice_xdp_buff {
+ struct xdp_buff xdp_buff;
+ const union ice_32b_rx_flex_desc *eop_desc;
+ const struct ice_pkt_ctx *pkt_ctx;
+};
+
+/* Required for compatibility with xdp_buffs from xsk_pool */
+static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
+
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -298,7 +312,6 @@ enum ice_dynamic_itr {
/* descriptor ring, associated with a VSI */
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -310,13 +323,24 @@ struct ice_rx_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 next_to_alloc;
- /* CL2 - 2nd cacheline starts here */
+
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- struct xdp_buff xdp;
+ /* CL2 - 2nd cacheline starts here */
+ union {
+ struct ice_xdp_buff xdp_ext;
+ struct xdp_buff xdp;
+ };
/* CL3 - 3rd cacheline starts here */
+ union {
+ struct ice_pkt_ctx pkt_ctx;
+ struct {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+ };
+ };
struct bpf_prog *xdp_prog;
u16 rx_offset;
@@ -332,9 +356,9 @@ struct ice_rx_ring {
/* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
struct ice_tx_ring *xdp_ring;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
dma_addr_t dma; /* physical address of ring */
- u64 cached_phctime;
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7e06373e14d9..839e5da24ad5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -63,28 +63,42 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
}
/**
- * ice_rx_hash - set the hash value in the skb
+ * ice_get_rx_hash - get RX hash value from descriptor
+ * @rx_desc: specific descriptor
+ *
+ * Returns hash, if present, 0 otherwise.
+ */
+static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *nic_mdid;
+
+ if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC))
+ return 0;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ return le32_to_cpu(nic_mdid->rss_hash);
+}
+
+/**
+ * ice_rx_hash_to_skb - set the hash value in the skb
* @rx_ring: descriptor ring
* @rx_desc: specific descriptor
* @skb: pointer to current skb
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 rx_ptype)
+ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u16 rx_ptype)
{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
u32 hash;
if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
return;
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+ hash = ice_get_rx_hash(rx_desc);
+ if (likely(hash))
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
}
/**
@@ -171,11 +185,38 @@ checksum_fail:
}
/**
+ * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb
+ * @rx_ring: Ring to get the VSI info
+ * @rx_desc: Receive descriptor
+ * @skb: Particular skb to send timestamp with
+ *
+ * The timestamp is in ns, so we must convert the result first.
+ */
+static void
+ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns);
+}
+
+/**
+ * ice_get_ptype - Read HW packet type from the descriptor
+ * @rx_desc: RX descriptor
+ */
+static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc)
+{
+ return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+}
+
+/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -184,9 +225,11 @@ checksum_fail:
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype)
+ struct sk_buff *skb)
{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+ u16 ptype = ice_get_ptype(rx_desc);
+
+ ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -194,28 +237,24 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
if (rx_ring->ptp_rx)
- ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+ ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
}
/**
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
+ * @vlan_tci: VLAN TCI for packet
*
* This function sends the completed packet (via. skb) up the stack using
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci)
{
- netdev_features_t features = rx_ring->netdev->features;
- bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+ if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto)
+ __vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto,
+ vlan_tci);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -464,3 +503,125 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
spin_unlock(&xdp_ring->tx_lock);
}
}
+
+/**
+ * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler
+ * @ctx: XDP buff pointer
+ * @ts_ns: destination address
+ *
+ * Copy HW timestamp (if available) to the destination address.
+ */
+static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
+ xdp_ext->pkt_ctx);
+ if (!*ts_ns)
+ return -ENODATA;
+
+ return 0;
+}
+
+/* Define a ptype index -> XDP hash type lookup table.
+ * It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
+ * avoiding possible copy-paste errors.
+ */
+#undef ICE_PTT
+#undef ICE_PTT_UNUSED_ENTRY
+
+#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
+
+#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
+
+/* A few supplementary definitions for when XDP hash types do not coincide
+ * with what can be generated from ptype definitions
+ * by means of preprocessor concatenation.
+ */
+#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
+#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
+
+static const enum xdp_rss_hash_type
+ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
+ ICE_PTYPES
+};
+
+#undef XDP_RSS_L3_NONE
+#undef XDP_RSS_L4_NONE
+#undef XDP_RSS_TYPE_PAY2
+#undef XDP_RSS_TYPE_PAY3
+#undef XDP_RSS_TYPE_PAY4
+
+#undef ICE_PTT
+#undef ICE_PTT_UNUSED_ENTRY
+
+/**
+ * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
+ * @eop_desc: End of Packet descriptor
+ */
+static enum xdp_rss_hash_type
+ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
+{
+ u16 ptype = ice_get_ptype(eop_desc);
+
+ if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
+ return 0;
+
+ return ice_ptype_to_xdp_hash[ptype];
+}
+
+/**
+ * ice_xdp_rx_hash - RX hash XDP hint handler
+ * @ctx: XDP buff pointer
+ * @hash: hash destination address
+ * @rss_type: XDP hash type destination address
+ *
+ * Copy RX hash (if available) and its type to the destination address.
+ */
+static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *hash = ice_get_rx_hash(xdp_ext->eop_desc);
+ *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
+ if (!likely(*hash))
+ return -ENODATA;
+
+ return 0;
+}
+
+/**
+ * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler
+ * @ctx: XDP buff pointer
+ * @vlan_proto: destination address for VLAN protocol
+ * @vlan_tci: destination address for VLAN TCI
+ *
+ * Copy VLAN tag (if was stripped) and corresponding protocol
+ * to the destination address.
+ */
+static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
+ if (!*vlan_proto)
+ return -ENODATA;
+
+ *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
+ if (!*vlan_tci)
+ return -ENODATA;
+
+ return 0;
+}
+
+const struct xdp_metadata_ops ice_xdp_md_ops = {
+ .xmo_rx_timestamp = ice_xdp_rx_hw_ts,
+ .xmo_rx_hash = ice_xdp_rx_hash,
+ .xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag,
+};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 115969ecdf7b..762047508619 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -84,7 +84,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
- * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
* The OS and current PF implementation only support stripping a single VLAN tag
@@ -92,7 +92,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
* one is found return the tag, else return 0 to mean no VLAN tag was found.
*/
static inline u16
-ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
{
u16 stat_err_bits;
@@ -148,7 +148,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype);
+ struct sk_buff *skb);
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
+
+static inline void
+ice_xdp_meta_set_desc(struct xdp_buff *xdp,
+ union ice_32b_rx_flex_desc *eop_desc)
+{
+ struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
+ xdp_buff);
+
+ xdp_ext->eop_desc = eop_desc;
+}
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 99954508184f..5d1ae8e4058a 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -458,6 +458,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
+ /* Put private info that changes on a per-packet basis
+ * into xdp_buff_xsk->cb.
+ */
+ ice_xdp_meta_set_desc(*xdp, rx_desc);
+
rx_desc++;
xdp++;
}
@@ -863,8 +868,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -942,13 +946,10 @@ construct_skb:
total_rx_bytes += skb->len;
total_rx_packets++;
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
-
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
+ vlan_tci = ice_get_vlan_tci(rx_desc);
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
}
rx_ring->next_to_clean = ntc;