thunderbolt: Add back Intel Falcon Ridge end-to-end flow control workaround

[ Upstream commit 54669e2f17 ]

As we are now enabling full end-to-end flow control to the Thunderbolt
networking driver, in order for it to work properly on second generation
Thunderbolt hardware (Falcon Ridge), we need to add back the workaround
that was removed with commit 53f13319d1 ("thunderbolt: Get rid of E2E
workaround"). However, this time we only apply it for Falcon Ridge
controllers as a form of an additional quirk. For non-Falcon Ridge this
does nothing.

While there fix a typo 'reqister' -> 'register' in the comment.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Mika Westerberg 2022-08-30 18:32:48 +03:00 коммит произвёл Greg Kroah-Hartman
Родитель b1b4144508
Коммит 716c526d66
1 изменённых файлов: 42 добавлений и 7 удалений

Просмотреть файл

@ -25,7 +25,11 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
#define RING_FIRST_USABLE_HOPID 1 #define RING_FIRST_USABLE_HOPID 1
/*
* Used with QUIRK_E2E to specify an unused HopID the Rx credits are
* transferred.
*/
#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/* /*
* Minimal number of vectors when we use MSI-X. Two for control channel * Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths. * Rx/Tx and the rest four are for cross domain DMA paths.
@ -35,7 +39,9 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */ #define NHI_MAILBOX_TIMEOUT 500 /* ms */
/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT BIT(0) #define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
static int ring_interrupt_index(struct tb_ring *ring) static int ring_interrupt_index(struct tb_ring *ring)
{ {
@ -455,8 +461,18 @@ static void ring_release_msix(struct tb_ring *ring)
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{ {
unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0; int ret = 0;
if (nhi->quirks & QUIRK_E2E) {
start_hop = RING_FIRST_USABLE_HOPID + 1;
if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
}
}
spin_lock_irq(&nhi->lock); spin_lock_irq(&nhi->lock);
if (ring->hop < 0) { if (ring->hop < 0) {
@ -466,7 +482,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
* Automatically allocate HopID from the non-reserved * Automatically allocate HopID from the non-reserved
* range 1 .. hop_count - 1. * range 1 .. hop_count - 1.
*/ */
for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) { if (ring->is_tx) {
if (!nhi->tx_rings[i]) { if (!nhi->tx_rings[i]) {
ring->hop = i; ring->hop = i;
@ -481,6 +497,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
} }
} }
if (ring->hop > 0 && ring->hop < start_hop) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
goto err_unlock;
}
if (ring->hop < 0 || ring->hop >= nhi->hop_count) { if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL; ret = -EINVAL;
@ -1094,12 +1115,26 @@ static void nhi_shutdown(struct tb_nhi *nhi)
static void nhi_check_quirks(struct tb_nhi *nhi) static void nhi_check_quirks(struct tb_nhi *nhi)
{ {
/* if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
* Intel hardware supports auto clear of the interrupt status /*
* reqister right after interrupt is being issued. * Intel hardware supports auto clear of the interrupt
*/ * status register right after interrupt is being
if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) * issued.
*/
nhi->quirks |= QUIRK_AUTO_CLEAR_INT; nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
/*
* Falcon Ridge controller needs the end-to-end
* flow control workaround to avoid losing Rx
* packets when RING_FLAG_E2E is set.
*/
nhi->quirks |= QUIRK_E2E;
break;
}
}
} }
static int nhi_init_msi(struct tb_nhi *nhi) static int nhi_init_msi(struct tb_nhi *nhi)