summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cadence
diff options
context:
space:
mode:
authorSujoy Ray <sujoy.ray@intel.com>2022-05-04 21:01:19 +0300
committerSujoy Ray <sujoy.ray@intel.com>2022-05-12 17:46:40 +0300
commitefe6d9649b1d6b85b50cef64745df2e6749a8a45 (patch)
treee9aca55fa1fa29fea638ee52832fa9691fdd6f02 /drivers/net/ethernet/cadence
parentab95859fee776e58934d2b0cc1f4e93810e66508 (diff)
parent49caedb668e476c100d727f2174724e0610a2b92 (diff)
downloadlinux-efe6d9649b1d6b85b50cef64745df2e6749a8a45.tar.xz
Merge commit '49caedb668e476c100d727f2174724e0610a2b92' of https://github.com/openbmc/linux into openbmc/dev-5.15-intel-bump_v5.15.36
Signed-off-by: Sujoy Ray <sujoy.ray@intel.com>
Diffstat (limited to 'drivers/net/ethernet/cadence')
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c35
1 files changed, 33 insertions, 2 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d13fb1d31821..217c1a0f8940 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1606,7 +1606,14 @@ static int macb_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- /* Packets received while interrupts were disabled */
+ /* RSR bits only seem to propagate to raise interrupts when
+ * interrupts are enabled at the time, so if bits are already
+ * set due to packets received while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here. This has been seen to happen
+ * around 30% of the time under heavy network load.
+ */
status = macb_readl(bp, RSR);
if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1614,6 +1621,22 @@ static int macb_poll(struct napi_struct *napi, int budget)
napi_reschedule(napi);
} else {
queue_writel(queue, IER, bp->rx_intr_mask);
+
+ /* In rare cases, packets could have been received in
+ * the window between the check above and re-enabling
+ * interrupts. Therefore, a double-check is required
+ * to avoid losing a wakeup. This can potentially race
+ * with the interrupt handler doing the same actions
+ * if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ status = macb_readl(bp, RSR);
+ if (unlikely(status)) {
+ queue_writel(queue, IDR, bp->rx_intr_mask);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
+ napi_schedule(napi);
+ }
}
}
@@ -1666,6 +1689,7 @@ static void macb_tx_restart(struct macb_queue *queue)
unsigned int head = queue->tx_head;
unsigned int tail = queue->tx_tail;
struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1673,6 +1697,13 @@ static void macb_tx_restart(struct macb_queue *queue)
if (head == tail)
return;
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+ if (tbqp == head_idx)
+ return;
+
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
@@ -4739,7 +4770,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif