summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
diff options
context:
space:
mode:
authorSunil Goutham <sgoutham@marvell.com>2019-11-14 08:26:29 +0300
committerDavid S. Miller <davem@davemloft.net>2019-11-15 05:09:16 +0300
commit5d9b976d4480dc0dcfa3719b645636d2f0f9f156 (patch)
tree1d38379c77e0f09aa22723f4a7fea767d56badc6 /drivers/net/ethernet/marvell/octeontx2/af/cgx.c
parent206ff848a1abaa1755310fdb4b20a3303ccf23d9 (diff)
downloadlinux-5d9b976d4480dc0dcfa3719b645636d2f0f9f156.tar.xz
octeontx2-af: Support fixed transmit scheduler topology
CN96xx initial silicon doesn't support all features pertaining to NIX transmit scheduling and shaping. - It supports a fixed topology of 1:1 mapped transmit limiters at all levels. - Supports DWRR only at SMQ/MDQ and TL1. - Doesn't support shaping and coloring. This patch adds HW capability structure by which each variant and skew of silicon can be differentiated by their supported features. And adds support for A0 silicon's transmit scheduler capabilities or rather limitations. Signed-off-by: Sunil Goutham <sgoutham@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/cgx.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c50
1 files changed, 50 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index d94e68254c43..5ca788691911 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -291,6 +291,35 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_promisc_config);
+/* Enable or disable forwarding received pause frames to Tx block */
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
+
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
@@ -331,6 +360,27 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg, last;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ last = cfg;
+ if (enable)
+ cfg |= DATA_PKT_TX_EN;
+ else
+ cfg &= ~DATA_PKT_TX_EN;
+
+ if (cfg != last)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return !!(last & DATA_PKT_TX_EN);
+}
+EXPORT_SYMBOL(cgx_lmac_tx_enable);
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{