summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/stmicro
diff options
context:
space:
mode:
authorVincent Whitchurch <vincent.whitchurch@axis.com>2022-02-24 14:38:29 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-03-08 21:12:50 +0300
commitbb20939bc8d1001ac07c71a2937e2160697da76d (patch)
tree14ff4eccf5a21d059322fdc2d4054537dec1b779 /drivers/net/ethernet/stmicro
parente753b639363d06959120687183c4bd6bc1f4115b (diff)
downloadlinux-bb20939bc8d1001ac07c71a2937e2160697da76d.tar.xz
net: stmmac: only enable DMA interrupts when ready
[ Upstream commit 087a7b944c5db409f7c1a68bf4896c56ba54eaff ] In this driver's ->ndo_open() callback, it enables DMA interrupts, starts the DMA channels, then requests interrupts with request_irq(), and then finally enables napi. If RX DMA interrupts are received before napi is enabled, no processing is done because napi_schedule_prep() will return false. If the network has a lot of broadcast/multicast traffic, then the RX ring could fill up completely before napi is enabled. When this happens, no further RX interrupts will be delivered, and the driver will fail to receive any packets. Fix this by only enabling DMA interrupts after all other initialization is complete. Fixes: 523f11b5d4fd72efb ("net: stmmac: move hardware setup for stmmac_open to new function") Reported-by: Lars Persson <larper@axis.com> Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/stmicro')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4595282a9790..73f66170829a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2268,6 +2268,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2903,8 +2920,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3756,6 +3775,7 @@ static int stmmac_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -6514,8 +6534,10 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6575,6 +6597,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -7453,6 +7476,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();