summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/microsoft
diff options
context:
space:
mode:
authorHaiyang Zhang <haiyangz@microsoft.com>2022-01-29 05:03:38 +0300
committerDavid S. Miller <davem@davemloft.net>2022-01-31 18:39:58 +0300
commita6bf5703f17bdbd775c0e6837dd2d5b1c344e28c (patch)
treeefe4a10c13242abc0e09af9bb0b817ec063c2758 /drivers/net/ethernet/microsoft
parentd356abb95b9883198b1ba0db678659369701e17d (diff)
downloadlinux-a6bf5703f17bdbd775c0e6837dd2d5b1c344e28c.tar.xz
net: mana: Reuse XDP dropped page
Reuse the dropped page in RX path to save page allocation overhead. Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/microsoft')
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c15
2 files changed, 14 insertions, 2 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index 8ead960f898d..d36405af9432 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -310,6 +310,7 @@ struct mana_rxq {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ struct page *xdp_save_page;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 12067bf5b7d6..69e791e6abc4 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1059,7 +1059,9 @@ drop_xdp:
u64_stats_update_end(&rx_stats->syncp);
drop:
- free_page((unsigned long)buf_va);
+ WARN_ON_ONCE(rxq->xdp_save_page);
+ rxq->xdp_save_page = virt_to_page(buf_va);
+
++ndev->stats.rx_dropped;
return;
@@ -1116,7 +1118,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- new_page = alloc_page(GFP_ATOMIC);
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_page) {
+ new_page = rxq->xdp_save_page;
+ rxq->xdp_save_page = NULL;
+ } else {
+ new_page = alloc_page(GFP_ATOMIC);
+ }
if (new_page) {
da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@@ -1403,6 +1411,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
mana_deinit_cq(apc, &rxq->rx_cq);
+ if (rxq->xdp_save_page)
+ __free_page(rxq->xdp_save_page);
+
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];