summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-08-23 00:51:42 +0300
committerDavid S. Miller <davem@davemloft.net>2023-08-23 13:45:54 +0300
commite3b3a87967cef1fa157d93fd726960b1b812401d (patch)
treeac93b81fe6faa56646534c5570257a214021112d /drivers/net/ethernet/broadcom/bnxt/bnxt.c
parent2e0c8ee2b56ffaf4ceac934d75f131a9c9becf72 (diff)
downloadlinux-e3b3a87967cef1fa157d93fd726960b1b812401d.tar.xz
bnxt: use the NAPI skb allocation cache
All callers of build_skb() (*) in bnxt are in NAPI context. The budget checking is somewhat convoluted because in the shared completion queue cases Rx packets are discarded by netpoll by forcing an error (E). But that happens before skb allocation. Only a call chain starting at __bnxt_poll_work() can lead to an skb allocation and it checks budget (b). * bnxt_rx_multi_page_skb * bnxt_rx_skb ` bp->rx_skb_func * bnxt_tpa_end ` bnxt_rx_pkt E bnxt_force_rx_discard E bnxt_poll_nitroa0 b __bnxt_poll_work Use napi_build_skb() to take advantage of the skb cache. In iperf tests with HW-GRO enabled it barely makes a difference but in cases where HW-GRO is not as effective (or disabled) it can give even a >10% boost (20.7Gbps -> 23.1Gbps). Signed-off-by: Jakub Kicinski <kuba@kernel.org> Reviewed-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5d6ea2782c2f..5cc0dbe12132 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -994,7 +994,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir);
- skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+ skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@@ -1069,7 +1069,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return NULL;
}
- skb = build_skb(data, bp->rx_buf_size);
+ skb = napi_build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
@@ -1677,7 +1677,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
- skb = build_skb(data, bp->rx_buf_size);
+ skb = napi_build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);