summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
diff options
context:
space:
mode:
authorSubbaraya Sundeep <sbhatta@marvell.com>2023-05-13 11:51:38 +0300
committerDavid S. Miller <davem@davemloft.net>2023-05-15 11:31:07 +0300
commitab6dddd2a669a0ecc2ce07485c7a15fadbb5a0aa (patch)
treee311ed4fc54e616e385e927866c4e1a7b77c00b8 /drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
parent508c58f76ca510956625c945f9b8eb104f2c8208 (diff)
downloadlinux-ab6dddd2a669a0ecc2ce07485c7a15fadbb5a0aa.tar.xz
octeontx2-pf: qos send queues management
Current implementation is such that the number of Send queues (SQs) are decided on the device probe which is equal to the number of online cpus. These SQs are allocated and deallocated in interface open and c lose calls respectively. This patch defines new APIs for initializing and deinitializing Send queues dynamically and allocates more number of transmit queues for QOS feature. Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com> Signed-off-by: Hariprasad Kelam <hkelam@marvell.com> Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com> Reviewed-by: Simon Horman <simon.horman@corigine.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7045fedfd73a..e288f46b23a8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -464,12 +464,13 @@ process_cqe:
break;
}
- if (cq->cq_type == CQ_XDP) {
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+
+ if (cq->cq_type == CQ_XDP)
otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
- } else {
- otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget,
- &tx_pkts, &tx_bytes);
- }
+ else
+ otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
+ cqe, budget, &tx_pkts, &tx_bytes);
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
@@ -486,7 +487,11 @@ process_cqe:
if (likely(tx_pkts)) {
struct netdev_queue *txq;
- txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+
+ if (qidx >= pfvf->hw.tx_queues)
+ qidx -= pfvf->hw.xdp_queues;
+ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
smp_mb();
@@ -736,7 +741,8 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
sqe_hdr->aura = sq->aura_id;
/* Post a CQE Tx after pkt transmission */
sqe_hdr->pnc = 1;
- sqe_hdr->sq = qidx;
+ sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
+ qidx + pfvf->hw.xdp_queues : qidx;
}
sqe_hdr->total = skb->len;
/* Set SQE identifier which will be used later for freeing SKB */
@@ -1221,8 +1227,10 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
struct sg_list *sg;
+ int qidx;
- sq = &pfvf->qset.sq[cq->cint_idx];
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;