summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-03-22 20:36:56 +0300
committerJakub Kicinski <kuba@kernel.org>2022-03-22 21:18:49 +0300
commit0db8640df59512dbd423c32077919f10cf35ebc6 (patch)
treea8e0d6806763ce5c3c84a5a2c29a1cbdb58f5129 /drivers
parent4a0cb83ba6e0cd73a50fa4f84736846bf0029f2b (diff)
parent7f0059b58f0257d895fafd2f2e3afe3bbdf21e64 (diff)
downloadlinux-0db8640df59512dbd423c32077919f10cf35ebc6.tar.xz
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2022-03-21 v2 We've added 137 non-merge commits during the last 17 day(s) which contain a total of 143 files changed, 7123 insertions(+), 1092 deletions(-). The main changes are: 1) Custom SEC() handling in libbpf, from Andrii. 2) subskeleton support, from Delyan. 3) Use btf_tag to recognize __percpu pointers in the verifier, from Hao. 4) Fix net.core.bpf_jit_harden race, from Hou. 5) Fix bpf_sk_lookup remote_port on big-endian, from Jakub. 6) Introduce fprobe (multi kprobe) _without_ arch bits, from Masami. The arch specific bits will come later. 7) Introduce multi_kprobe bpf programs on top of fprobe, from Jiri. 8) Enable non-atomic allocations in local storage, from Joanne. 9) Various var_off ptr_to_btf_id fixed, from Kumar. 10) bpf_ima_file_hash helper, from Roberto. 11) Add "live packet" mode for XDP in BPF_PROG_RUN, from Toke. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (137 commits) selftests/bpf: Fix kprobe_multi test. Revert "rethook: x86: Add rethook x86 implementation" Revert "arm64: rethook: Add arm64 rethook implementation" Revert "powerpc: Add rethook support" Revert "ARM: rethook: Add rethook arm implementation" bpftool: Fix a bug in subskeleton code generation bpf: Fix bpf_prog_pack when PMU_SIZE is not defined bpf: Fix bpf_prog_pack for multi-node setup bpf: Fix warning for cast from restricted gfp_t in verifier bpf, arm: Fix various typos in comments libbpf: Close fd in bpf_object__reuse_map bpftool: Fix print error when show bpf map bpf: Fix kprobe_multi return probe backtrace Revert "bpf: Add support to inline bpf_get_func_ip helper on x86" bpf: Simplify check in btf_parse_hdr() selftests/bpf/test_lirc_mode2.sh: Exit with proper code bpf: Check for NULL return from bpf_get_btf_vmlinux selftests/bpf: Test skipping stacktrace bpf: Adjust BPF stack helper functions to accommodate skip > 0 bpf: Select proper size for bpf_prog_pack ... ==================== Link: https://lore.kernel.org/r/20220322050159.5507-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/veth.c192
1 files changed, 131 insertions, 61 deletions
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 58b20ea171dd..1b5714926d81 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -433,21 +433,6 @@ static void veth_set_multicast_list(struct net_device *dev)
{
}
-static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
- int buflen)
-{
- struct sk_buff *skb;
-
- skb = build_skb(head, buflen);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, headroom);
- skb_put(skb, len);
-
- return skb;
-}
-
static int veth_select_rxq(struct net_device *dev)
{
return smp_processor_id() % dev->real_num_rx_queues;
@@ -494,7 +479,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame *frame = frames[i];
void *ptr = veth_xdp_to_ptr(frame);
- if (unlikely(frame->len > max_len ||
+ if (unlikely(xdp_get_frame_len(frame) > max_len ||
__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
@@ -695,72 +680,143 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
}
}
-static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
- struct sk_buff *skb,
- struct veth_xdp_tx_bq *bq,
- struct veth_stats *stats)
+static void veth_xdp_get(struct xdp_buff *xdp)
{
- u32 pktlen, headroom, act, metalen, frame_sz;
- void *orig_data, *orig_data_end;
- struct bpf_prog *xdp_prog;
- int mac_len, delta, off;
- struct xdp_buff xdp;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ int i;
- skb_prepare_for_gro(skb);
+ get_page(virt_to_page(xdp->data));
+ if (likely(!xdp_buff_has_frags(xdp)))
+ return;
- rcu_read_lock();
- xdp_prog = rcu_dereference(rq->xdp_prog);
- if (unlikely(!xdp_prog)) {
- rcu_read_unlock();
- goto out;
- }
+ for (i = 0; i < sinfo->nr_frags; i++)
+ __skb_frag_ref(&sinfo->frags[i]);
+}
- mac_len = skb->data - skb_mac_header(skb);
- pktlen = skb->len + mac_len;
- headroom = skb_headroom(skb) - mac_len;
+static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ struct xdp_buff *xdp,
+ struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ u32 frame_sz;
if (skb_shared(skb) || skb_head_is_locked(skb) ||
- skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
+ skb_shinfo(skb)->nr_frags) {
+ u32 size, len, max_head_size, off;
struct sk_buff *nskb;
- int size, head_off;
- void *head, *start;
struct page *page;
+ int i, head_off;
- size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (size > PAGE_SIZE)
+ /* We need a private copy of the skb and data buffers since
+ * the ebpf program can modify it. We segment the original skb
+ * into order-0 pages without linearize it.
+ *
+ * Make sure we have enough space for linear and paged area
+ */
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
+ VETH_XDP_HEADROOM);
+ if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
goto drop;
+ /* Allocate skb head */
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
goto drop;
- head = page_address(page);
- start = head + VETH_XDP_HEADROOM;
- if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
- page_frag_free(head);
+ nskb = build_skb(page_address(page), PAGE_SIZE);
+ if (!nskb) {
+ put_page(page);
goto drop;
}
- nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
- skb->len, PAGE_SIZE);
- if (!nskb) {
- page_frag_free(head);
+ skb_reserve(nskb, VETH_XDP_HEADROOM);
+ size = min_t(u32, skb->len, max_head_size);
+ if (skb_copy_bits(skb, 0, nskb->data, size)) {
+ consume_skb(nskb);
goto drop;
}
+ skb_put(nskb, size);
skb_copy_header(nskb, skb);
head_off = skb_headroom(nskb) - skb_headroom(skb);
skb_headers_offset_update(nskb, head_off);
+
+ /* Allocate paged area of new skb */
+ off = size;
+ len = skb->len - off;
+
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ size = min_t(u32, len, PAGE_SIZE);
+ skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
+ if (skb_copy_bits(skb, off, page_address(page),
+ size)) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ len -= size;
+ off += size;
+ }
+
consume_skb(skb);
skb = nskb;
+ } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
+ pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
+ goto drop;
}
/* SKB "head" area always have tailroom for skb_shared_info */
frame_sz = skb_end_pointer(skb) - skb->head;
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
- xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
+ xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
+ skb_headlen(skb), true);
+
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
+ *pskb = skb;
+
+ return 0;
+drop:
+ consume_skb(skb);
+ *pskb = NULL;
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
+ struct sk_buff *skb,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
+{
+ void *orig_data, *orig_data_end;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 act, metalen;
+ int off;
+
+ skb_prepare_for_gro(skb);
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (unlikely(!xdp_prog)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+ if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb))
+ goto drop;
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -771,7 +827,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
case XDP_PASS:
break;
case XDP_TX:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
@@ -783,7 +839,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
@@ -806,18 +862,27 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
/* check if bpf_xdp_adjust_head was used */
- delta = orig_data - xdp.data;
- off = mac_len + delta;
+ off = orig_data - xdp.data;
if (off > 0)
__skb_push(skb, off);
else if (off < 0)
__skb_pull(skb, -off);
- skb->mac_header -= delta;
+
+ skb_reset_mac_header(skb);
/* check if bpf_xdp_adjust_tail was used */
off = xdp.data_end - orig_data_end;
if (off != 0)
__skb_put(skb, off); /* positive on grow, negative on shrink */
+
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(&xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
skb->protocol = eth_type_trans(skb, rq->dev);
metalen = xdp.data - xdp.data_meta;
@@ -833,7 +898,7 @@ xdp_drop:
return NULL;
err_xdp:
rcu_read_unlock();
- page_frag_free(xdp.data);
+ xdp_return_buff(&xdp);
xdp_xmit:
return NULL;
}
@@ -855,7 +920,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
/* ndo_xdp_xmit */
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- stats->xdp_bytes += frame->len;
+ stats->xdp_bytes += xdp_get_frame_len(frame);
frame = veth_xdp_rcv_one(rq, frame, bq, stats);
if (frame) {
/* XDP_PASS */
@@ -1463,9 +1528,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
- peer->hard_header_len -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
+ peer->hard_header_len;
+ /* Allow increasing the max_mtu if the program supports
+ * XDP fragments.
+ */
+ if (prog->aux->xdp_has_frags)
+ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
+
if (peer->mtu > max_mtu) {
NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
err = -ERANGE;