summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
diff options
context:
space:
mode:
authorRatheesh Kannoth <rkannoth@marvell.com>2023-05-22 05:04:04 +0300
committerPaolo Abeni <pabeni@redhat.com>2023-05-23 11:47:50 +0300
commitb2e3406a38f0f48b1dfb81e5bb73d243ff6af179 (patch)
tree622a4581fc410076753dc96333710c8c59447ab8 /drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
parent62a41dc7166385e3ebf1d5795103bc8e3794838b (diff)
downloadlinux-b2e3406a38f0f48b1dfb81e5bb73d243ff6af179.tar.xz
octeontx2-pf: Add support for page pool
Page pool for each rx queue enhance rx side performance by reclaiming buffers back to each queue specific pool. DMA mapping is done only for first allocation of buffers. As subsequent buffers allocation avoid DMA mapping, it results in performance improvement. Image | Performance ------------ | ------------ Vannila | 3Mpps | with this | 42Mpps change | --------------------------- Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com> Link: https://lore.kernel.org/r/20230522020404.152020-1-rkannoth@marvell.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e1883c3edda3..db3fcab1c8cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1555,7 +1555,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
struct msg_req *req;
+ int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1584,7 +1586,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
cq = &qset->cq[qidx];
if (cq->cq_type == CQ_RX)
- otx2_cleanup_rx_cqes(pf, cq);
+ otx2_cleanup_rx_cqes(pf, cq, qidx);
else
otx2_cleanup_tx_cqes(pf, cq);
}
@@ -1594,6 +1596,13 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+ for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
+ pool = &pf->qset.pool[pool_id];
+ page_pool_destroy(pool->page_pool);
+ pool->page_pool = NULL;
+ }
+
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */