summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
authorSomnath Kotur <somnath.kotur@broadcom.com>2024-04-02 12:37:49 +0300
committerJakub Kicinski <kuba@kernel.org>2024-04-04 19:13:19 +0300
commitfba2e4e5dbab399eb8801801471ac69f9baeba98 (patch)
tree4adf6f69876da975d7f2d1a5310147d446e7f6ca /drivers/net/ethernet/broadcom
parent8635ae8e99a670b38198b7561c6c57b13418f108 (diff)
downloadlinux-fba2e4e5dbab399eb8801801471ac69f9baeba98.tar.xz
bnxt_en: Allocate page pool per numa node
Driver's Page Pool allocation code looks at the node local to the PCIe device to determine where to allocate memory. In scenarios where the core count per NUMA node is low (< default rings) it makes sense to exhaust page pool allocations on Node 0 first and then moving on to allocating page pools for the remaining rings from Node 1. With this patch, and the following configuration on the NIC $ ethtool -L ens1f0np0 combined 16 (core count/node = 12, first 12 rings on node#0, last 4 rings node#1) and traffic redirected to a ring on node#1 , we see a performance improvement of ~20% Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com> Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com> Reviewed-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: Pavan Chebbi <pavan.chebbi@broadcom.com> Acked-by: Paolo Abeni <pabeni@redhat.com> Link: https://lore.kernel.org/r/20240402093753.331120-4-pavan.chebbi@broadcom.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 44b9332c147e..9fca1dee6486 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3559,14 +3559,15 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr)
+ struct bnxt_rx_ring_info *rxr,
+ int numa_node)
{
struct page_pool_params pp = { 0 };
pp.pool_size = bp->rx_agg_ring_size;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size;
- pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.nid = numa_node;
pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
@@ -3586,7 +3587,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc = 0, agg_rings = 0;
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring)
return -ENOMEM;
@@ -3597,10 +3599,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ int cpu_node;
ring = &rxr->rx_ring_struct;
- rc = bnxt_alloc_rx_page_pool(bp, rxr);
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;