summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c299
1 files changed, 279 insertions, 20 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4ad707e758b9..0d745ae1cc9a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -191,6 +191,18 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
return NULL;
}
+int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
+{
+ if (hw->cap.nix_multiple_dwrr_mtu)
+ return NIX_AF_DWRR_MTUX(smq_link_type);
+
+ if (smq_link_type == SMQ_LINK_TYPE_SDP)
+ return NIX_AF_DWRR_SDP_MTU;
+
+ /* Here it's same reg for RPM and LBK */
+ return NIX_AF_DWRR_RPM_MTU;
+}
+
u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
{
dwrr_mtu &= 0x1FULL;
@@ -1691,6 +1703,42 @@ exit:
return true;
}
+static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ u64 tlx_parent = 0, tlx_schedule = 0;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL2:
+ tlx_parent = NIX_AF_TL2X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ tlx_parent = NIX_AF_TL3X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ tlx_parent = NIX_AF_TL4X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ /* no need to reset SMQ_CFG as HW clears this CSR
+ * on SMQ flush
+ */
+ tlx_parent = NIX_AF_MDQX_PARENT(schq);
+ tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
+ break;
+ default:
+ return;
+ }
+
+ if (tlx_parent)
+ rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
+
+ if (tlx_schedule)
+ rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
+}
+
/* Disable shaping of pkts by a scheduler queue
* at a given scheduler level.
*/
@@ -1878,7 +1926,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
- if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
+ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
/* If contiguous queues are needed, check for availability */
@@ -2039,6 +2088,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
}
for (idx = 0; idx < req->schq[lvl]; idx++) {
@@ -2048,6 +2098,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
}
}
@@ -2064,9 +2115,121 @@ exit:
return rc;
}
+static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
+ struct nix_smq_flush_ctx *smq_flush_ctx)
+{
+ struct nix_smq_tree_ctx *smq_tree_ctx;
+ u64 parent_off, regval;
+ u16 schq;
+ int lvl;
+
+ smq_flush_ctx->smq = smq;
+
+ schq = smq;
+ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ smq_flush_ctx->tl1_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
+ smq_tree_ctx->pir_off = 0;
+ smq_tree_ctx->pir_val = 0;
+ parent_off = 0;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+ smq_flush_ctx->tl2_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
+ parent_off = NIX_AF_TL2X_PARENT(schq);
+ } else if (lvl == NIX_TXSCH_LVL_TL3) {
+ smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
+ parent_off = NIX_AF_TL3X_PARENT(schq);
+ } else if (lvl == NIX_TXSCH_LVL_TL4) {
+ smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
+ parent_off = NIX_AF_TL4X_PARENT(schq);
+ } else if (lvl == NIX_TXSCH_LVL_MDQ) {
+ smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
+ parent_off = NIX_AF_MDQX_PARENT(schq);
+ }
+ /* save cir/pir register values */
+ smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
+ if (smq_tree_ctx->pir_off)
+ smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
+
+ /* get parent txsch node */
+ if (parent_off) {
+ regval = rvu_read64(rvu, blkaddr, parent_off);
+ schq = (regval >> 16) & 0x1FF;
+ }
+ }
+}
+
+static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+ struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
+{
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u64 regoff;
+ int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ /* loop through all TL2s with matching PF_FUNC */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
+ /* skip the smq(flush) TL2 */
+ if (tl2 == smq_flush_ctx->tl2_schq)
+ continue;
+ /* skip unused TL2s */
+ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
+ continue;
+ /* skip if PF_FUNC doesn't match */
+ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
+ (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ /* enable/disable XOFF */
+ regoff = NIX_AF_TL2X_SW_XOFF(tl2);
+ if (enable)
+ rvu_write64(rvu, blkaddr, regoff, 0x1);
+ else
+ rvu_write64(rvu, blkaddr, regoff, 0x0);
+ }
+}
+
+static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
+ struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
+{
+ u64 cir_off, pir_off, cir_val, pir_val;
+ struct nix_smq_tree_ctx *smq_tree_ctx;
+ int lvl;
+
+ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ cir_off = smq_tree_ctx->cir_off;
+ cir_val = smq_tree_ctx->cir_val;
+ pir_off = smq_tree_ctx->pir_off;
+ pir_val = smq_tree_ctx->pir_val;
+
+ if (enable) {
+ rvu_write64(rvu, blkaddr, cir_off, cir_val);
+ if (lvl != NIX_TXSCH_LVL_TL1)
+ rvu_write64(rvu, blkaddr, pir_off, pir_val);
+ } else {
+ rvu_write64(rvu, blkaddr, cir_off, 0x0);
+ if (lvl != NIX_TXSCH_LVL_TL1)
+ rvu_write64(rvu, blkaddr, pir_off, 0x0);
+ }
+ }
+}
+
static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
+ struct nix_smq_flush_ctx *smq_flush_ctx;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
int err, restore_tx_en = 0;
@@ -2086,6 +2249,14 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
lmac_id, true);
}
+ /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
+ smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
+ if (!smq_flush_ctx)
+ return -ENOMEM;
+ nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
@@ -2100,8 +2271,14 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
if (err)
- dev_err(rvu->dev,
- "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
+ dev_info(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
+ nixlf, smq);
+
+ /* clear XOFF on TL2s */
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
+ kfree(smq_flush_ctx);
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
@@ -2143,6 +2320,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
}
nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
@@ -2181,6 +2359,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
@@ -2240,6 +2419,9 @@ static int nix_txschq_free_one(struct rvu *rvu,
*/
nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
@@ -2249,6 +2431,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
goto err;
}
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
@@ -2403,17 +2587,19 @@ static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
-static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
- u16 pcifunc, struct nix_txsch *txsch)
+void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
+ struct nix_txsch *txsch, bool enable)
{
struct rvu_hwinfo *hw = rvu->hw;
int lbk_link_start, lbk_links;
u8 pf = rvu_get_pf(pcifunc);
int schq;
+ u64 cfg;
if (!is_pf_cgxmapped(rvu, pf))
return;
+ cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
lbk_link_start = hw->cgx_links;
for (schq = 0; schq < txsch->schq.max; schq++) {
@@ -2427,8 +2613,7 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq,
lbk_link_start +
- lbk_links),
- BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+ lbk_links), cfg);
}
}
@@ -2534,8 +2719,6 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_write64(rvu, blkaddr, reg, regval);
}
- rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
- &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
return 0;
}
@@ -3146,10 +3329,16 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
}
/* Setup a default value of 8192 as DWRR MTU */
- if (rvu->hw->cap.nix_common_dwrr_mtu) {
- rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
+ if (rvu->hw->cap.nix_common_dwrr_mtu ||
+ rvu->hw->cap.nix_multiple_dwrr_mtu) {
+ rvu_write64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
convert_bytes_to_dwrr_mtu(8192));
- rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
+ rvu_write64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
+ convert_bytes_to_dwrr_mtu(8192));
+ rvu_write64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
convert_bytes_to_dwrr_mtu(8192));
}
@@ -3247,19 +3436,28 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
rsp->min_mtu = NIC_HW_MIN_FRS;
- if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ if (!rvu->hw->cap.nix_common_dwrr_mtu &&
+ !rvu->hw->cap.nix_multiple_dwrr_mtu) {
/* Return '1' on OTx2 */
rsp->rpm_dwrr_mtu = 1;
rsp->sdp_dwrr_mtu = 1;
+ rsp->lbk_dwrr_mtu = 1;
return 0;
}
- dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
+ dwrr_mtu = rvu_read64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
- dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
+ dwrr_mtu = rvu_read64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+ dwrr_mtu = rvu_read64(rvu, blkaddr,
+ nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
+ rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
return 0;
}
@@ -3308,6 +3506,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
+ u32 l3_l4_src_dst;
int l4_key_offset = 0;
if (!alg)
@@ -3335,6 +3534,15 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
* group_member - Enabled when protocol is part of a group.
*/
+ /* Last 4 bits (31:28) are reserved to specify SRC, DST
+ * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
+ * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
+ * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
+ */
+ l3_l4_src_dst = flow_cfg;
+ /* Reset these 4 bits, so that these won't be part of key */
+ flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
+
keyoff_marker = 0; max_key_off = 0; group_member = 0;
nr_field = 0; key_off = 0; field_marker = 1;
field = &tmp; max_bit_pos = fls(flow_cfg);
@@ -3372,6 +3580,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 3; /* SIP, 4 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 3) {
+ field->bytesm1 = 7; /* SIP + DIP, 8B */
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 16; /* DIP off */
+ field->bytesm1 = 3; /* DIP, 4 bytes */
+ }
+ }
+
field->ltype_mask = 0xF; /* Match only IPv4 */
keyoff_marker = false;
break;
@@ -3385,6 +3609,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 15; /* SIP, 16 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 15) {
+ /* SIP + DIP, 32 bytes */
+ field->bytesm1 = 31;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 24; /* DIP off */
+ field->bytesm1 = 15; /* DIP,16 bytes */
+ }
+ }
field->ltype_mask = 0xF; /* Match only IPv6 */
break;
case NIX_FLOW_KEY_TYPE_TCP:
@@ -3400,6 +3640,21 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
+ field->bytesm1 = 1; /* SRC, 2 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
+ /* Both SRC + DST */
+ if (field->bytesm1 == 1) {
+ /* SRC + DST, 4 bytes */
+ field->bytesm1 = 3;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 2; /* DST off */
+ field->bytesm1 = 1; /* DST, 2 bytes */
+ }
+ }
+
/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
* so no need to change the ltype_match, just change
* the lid for inner protocols
@@ -4068,6 +4323,11 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
else
cfg &= ~BIT_ULL(40);
+ if (req->len_verify & NIX_RX_DROP_RE)
+ cfg |= BIT_ULL(32);
+ else
+ cfg &= ~BIT_ULL(32);
+
if (req->csum_verify & BIT(0))
cfg |= BIT_ULL(37);
else
@@ -4080,10 +4340,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
{
- /* CN10k supports 72KB FIFO size and max packet size of 64k */
- if (rvu->hw->lbk_bufsize == 0x12000)
- return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
-
return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
}
@@ -4269,8 +4525,11 @@ static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
* Check if HW uses a common MTU for all DWRR quantum configs.
* On OcteonTx2 this register field is '0'.
*/
- if (((hw_const >> 56) & 0x10) == 0x10)
+ if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
hw->cap.nix_common_dwrr_mtu = true;
+
+ if (hw_const & BIT_ULL(61))
+ hw->cap.nix_multiple_dwrr_mtu = true;
}
static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)