summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2022-08-30 18:32:48 +0300
committerDavid S. Miller <davem@davemloft.net>2022-08-31 16:05:12 +0300
commit54669e2f17cb5a4c41ade89427f074dc22cecb17 (patch)
treef34b0fa20540c63d8ac6885cf2b61d805831e572
parentf9cad07b840ec8a8eb54928908d694b6e262631c (diff)
downloadlinux-54669e2f17cb5a4c41ade89427f074dc22cecb17.tar.xz
thunderbolt: Add back Intel Falcon Ridge end-to-end flow control workaround
As we are now enabling full end-to-end flow control to the Thunderbolt networking driver, in order for it to work properly on second generation Thunderbolt hardware (Falcon Ridge), we need to add back the workaround that was removed with commit 53f13319d131 ("thunderbolt: Get rid of E2E workaround"). However, this time we only apply it for Falcon Ridge controllers as a form of an additional quirk. For non-Falcon Ridge this does nothing. While there fix a typo 'reqister' -> 'register' in the comment. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/thunderbolt/nhi.c49
1 files changed, 42 insertions, 7 deletions
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index cb8c9c4ae93a..b5cd9673e15d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -28,7 +28,11 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
#define RING_FIRST_USABLE_HOPID 1
-
+/*
+ * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
+ * transferred.
+ */
+#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
@@ -38,7 +42,9 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
+/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT BIT(0)
+#define QUIRK_E2E BIT(1)
static int ring_interrupt_index(struct tb_ring *ring)
{
@@ -458,8 +464,18 @@ static void ring_release_msix(struct tb_ring *ring)
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
+ unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0;
+ if (nhi->quirks & QUIRK_E2E) {
+ start_hop = RING_FIRST_USABLE_HOPID + 1;
+ if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+ dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
+ ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
+ ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
+ }
+ }
+
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
@@ -469,7 +485,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
* Automatically allocate HopID from the non-reserved
* range 1 .. hop_count - 1.
*/
- for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
@@ -484,6 +500,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
}
}
+ if (ring->hop > 0 && ring->hop < start_hop) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
@@ -1097,12 +1118,26 @@ static void nhi_shutdown(struct tb_nhi *nhi)
static void nhi_check_quirks(struct tb_nhi *nhi)
{
- /*
- * Intel hardware supports auto clear of the interrupt status
- * reqister right after interrupt is being issued.
- */
- if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL)
+ if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ /*
+ * Intel hardware supports auto clear of the interrupt
+ * status register right after interrupt is being
+ * issued.
+ */
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ /*
+ * Falcon Ridge controller needs the end-to-end
+ * flow control workaround to avoid losing Rx
+ * packets when RING_FLAG_E2E is set.
+ */
+ nhi->quirks |= QUIRK_E2E;
+ break;
+ }
+ }
}
static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)