Merge branches 'pci/enumeration', 'pci/hotplug', 'pci/misc', 'pci/ntb', 'pci/thunderbolt' and 'pci/virtualization' into next
* pci/enumeration: x86/PCI: Refine PCI support check in pcibios_init() * pci/hotplug: PCI: acpiphp_ibm: Avoid uninitialized variable reference * pci/misc: PCI: Fix spelling errors * pci/ntb: PCI: Add DMA alias quirk for mic_x200_dma PCI: Add support for multiple DMA aliases PCI: Move informational printk to pci_add_dma_alias() PCI: Add pci_add_dma_alias() to abstract implementation * pci/thunderbolt: thunderbolt: Support 1st gen Light Ridge controller thunderbolt: Fix typos and magic number PCI: Add Intel Thunderbolt device IDs * pci/virtualization: PCI: Work around Intel Sunrise Point PCH incorrect ACS capability PCI: Reverse standard ACS vs device-specific ACS enabling PCI: Mark Intel i40e NIC INTx masking as broken
This commit is contained in:
Коммит
7f768544c2
|
@ -516,7 +516,7 @@ void __init pcibios_set_cache_line_size(void)
|
||||||
|
|
||||||
int __init pcibios_init(void)
|
int __init pcibios_init(void)
|
||||||
{
|
{
|
||||||
if (!raw_pci_ops) {
|
if (!raw_pci_ops && !raw_pci_ext_ops) {
|
||||||
printk(KERN_WARNING "PCI: System does not support PCI\n");
|
printk(KERN_WARNING "PCI: System does not support PCI\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -660,8 +660,8 @@ static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Look for aliases to or from the given device for exisiting groups. The
|
* Look for aliases to or from the given device for existing groups. DMA
|
||||||
* dma_alias_devfn only supports aliases on the same bus, therefore the search
|
* aliases are only supported on the same bus, therefore the search
|
||||||
* space is quite small (especially since we're really only looking at pcie
|
* space is quite small (especially since we're really only looking at pcie
|
||||||
* device, and therefore only expect multiple slots on the root complex or
|
* device, and therefore only expect multiple slots on the root complex or
|
||||||
* downstream switch ports). It's conceivable though that a pair of
|
* downstream switch ports). It's conceivable though that a pair of
|
||||||
|
@ -686,11 +686,7 @@ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* We alias them or they alias us */
|
/* We alias them or they alias us */
|
||||||
if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
|
if (pci_devs_are_dma_aliases(pdev, tmp)) {
|
||||||
pdev->dma_alias_devfn == tmp->devfn) ||
|
|
||||||
((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
|
|
||||||
tmp->dma_alias_devfn == pdev->devfn)) {
|
|
||||||
|
|
||||||
group = get_pci_alias_group(tmp, devfns);
|
group = get_pci_alias_group(tmp, devfns);
|
||||||
if (group) {
|
if (group) {
|
||||||
pci_dev_put(tmp);
|
pci_dev_put(tmp);
|
||||||
|
|
|
@ -819,7 +819,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
err = nwl_pcie_bridge_init(pcie);
|
err = nwl_pcie_bridge_init(pcie);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(pcie->dev, "HW Initalization failed\n");
|
dev_err(pcie->dev, "HW Initialization failed\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -138,6 +138,8 @@ static union apci_descriptor *ibm_slot_from_id(int id)
|
||||||
char *table;
|
char *table;
|
||||||
|
|
||||||
size = ibm_get_table_from_acpi(&table);
|
size = ibm_get_table_from_acpi(&table);
|
||||||
|
if (size < 0)
|
||||||
|
return NULL;
|
||||||
des = (union apci_descriptor *)table;
|
des = (union apci_descriptor *)table;
|
||||||
if (memcmp(des->header.sig, "aPCI", 4) != 0)
|
if (memcmp(des->header.sig, "aPCI", 4) != 0)
|
||||||
goto ibm_slot_done;
|
goto ibm_slot_done;
|
||||||
|
|
|
@ -2389,7 +2389,7 @@ out:
|
||||||
return offset + ent_size;
|
return offset + ent_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enhanced Allocation Initalization */
|
/* Enhanced Allocation Initialization */
|
||||||
void pci_ea_init(struct pci_dev *dev)
|
void pci_ea_init(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int ea;
|
int ea;
|
||||||
|
@ -2547,7 +2547,7 @@ void pci_request_acs(void)
|
||||||
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
|
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
|
||||||
* @dev: the PCI device
|
* @dev: the PCI device
|
||||||
*/
|
*/
|
||||||
static int pci_std_enable_acs(struct pci_dev *dev)
|
static void pci_std_enable_acs(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int pos;
|
int pos;
|
||||||
u16 cap;
|
u16 cap;
|
||||||
|
@ -2555,7 +2555,7 @@ static int pci_std_enable_acs(struct pci_dev *dev)
|
||||||
|
|
||||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
|
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
|
||||||
if (!pos)
|
if (!pos)
|
||||||
return -ENODEV;
|
return;
|
||||||
|
|
||||||
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
|
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
|
||||||
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
|
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
|
||||||
|
@ -2573,8 +2573,6 @@ static int pci_std_enable_acs(struct pci_dev *dev)
|
||||||
ctrl |= (cap & PCI_ACS_UF);
|
ctrl |= (cap & PCI_ACS_UF);
|
||||||
|
|
||||||
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
|
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2586,10 +2584,10 @@ void pci_enable_acs(struct pci_dev *dev)
|
||||||
if (!pci_acs_enable)
|
if (!pci_acs_enable)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!pci_std_enable_acs(dev))
|
if (!pci_dev_specific_enable_acs(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pci_dev_specific_enable_acs(dev);
|
pci_std_enable_acs(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
|
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
|
||||||
|
@ -4578,6 +4576,37 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_add_dma_alias - Add a DMA devfn alias for a device
|
||||||
|
* @dev: the PCI device for which alias is added
|
||||||
|
* @devfn: alias slot and function
|
||||||
|
*
|
||||||
|
* This helper encodes 8-bit devfn as bit number in dma_alias_mask.
|
||||||
|
* It should be called early, preferably as PCI fixup header quirk.
|
||||||
|
*/
|
||||||
|
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
|
||||||
|
{
|
||||||
|
if (!dev->dma_alias_mask)
|
||||||
|
dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
|
||||||
|
sizeof(long), GFP_KERNEL);
|
||||||
|
if (!dev->dma_alias_mask) {
|
||||||
|
dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
set_bit(devfn, dev->dma_alias_mask);
|
||||||
|
dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
|
||||||
|
PCI_SLOT(devfn), PCI_FUNC(devfn));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
|
||||||
|
{
|
||||||
|
return (dev1->dma_alias_mask &&
|
||||||
|
test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
|
||||||
|
(dev2->dma_alias_mask &&
|
||||||
|
test_bit(dev1->devfn, dev2->dma_alias_mask));
|
||||||
|
}
|
||||||
|
|
||||||
bool pci_device_is_present(struct pci_dev *pdev)
|
bool pci_device_is_present(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
u32 v;
|
u32 v;
|
||||||
|
|
|
@ -1537,6 +1537,7 @@ static void pci_release_dev(struct device *dev)
|
||||||
pcibios_release_device(pci_dev);
|
pcibios_release_device(pci_dev);
|
||||||
pci_bus_put(pci_dev->bus);
|
pci_bus_put(pci_dev->bus);
|
||||||
kfree(pci_dev->driver_override);
|
kfree(pci_dev->driver_override);
|
||||||
|
kfree(pci_dev->dma_alias_mask);
|
||||||
kfree(pci_dev);
|
kfree(pci_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3150,6 +3150,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
|
||||||
quirk_broken_intx_masking);
|
quirk_broken_intx_masking);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
|
||||||
|
* DisINTx can be set but the interrupt status bit is non-functional.
|
||||||
|
*/
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
|
||||||
static void quirk_no_bus_reset(struct pci_dev *dev)
|
static void quirk_no_bus_reset(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
|
dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
|
||||||
|
@ -3185,6 +3218,29 @@ static void quirk_no_pm_reset(struct pci_dev *dev)
|
||||||
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
|
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
|
||||||
PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
|
PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Thunderbolt controllers with broken MSI hotplug signaling:
|
||||||
|
* Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
|
||||||
|
* of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
|
||||||
|
*/
|
||||||
|
static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
if (pdev->is_hotplug_bridge &&
|
||||||
|
(pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
|
||||||
|
pdev->revision <= 1))
|
||||||
|
pdev->no_msi = 1;
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
|
||||||
|
quirk_thunderbolt_hotplug_msi);
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
|
||||||
|
quirk_thunderbolt_hotplug_msi);
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
|
||||||
|
quirk_thunderbolt_hotplug_msi);
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
|
||||||
|
quirk_thunderbolt_hotplug_msi);
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
|
||||||
|
quirk_thunderbolt_hotplug_msi);
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
/*
|
/*
|
||||||
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
|
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
|
||||||
|
@ -3232,7 +3288,8 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
|
||||||
acpi_execute_simple_method(SXIO, NULL, 0);
|
acpi_execute_simple_method(SXIO, NULL, 0);
|
||||||
acpi_execute_simple_method(SXLV, NULL, 0);
|
acpi_execute_simple_method(SXLV, NULL, 0);
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
|
DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
|
||||||
|
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
|
||||||
quirk_apple_poweroff_thunderbolt);
|
quirk_apple_poweroff_thunderbolt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3266,9 +3323,11 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
|
||||||
if (!nhi)
|
if (!nhi)
|
||||||
goto out;
|
goto out;
|
||||||
if (nhi->vendor != PCI_VENDOR_ID_INTEL
|
if (nhi->vendor != PCI_VENDOR_ID_INTEL
|
||||||
|| (nhi->device != 0x1547 && nhi->device != 0x156c)
|
|| (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
|
||||||
|| nhi->subsystem_vendor != 0x2222
|
nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
|
||||||
|| nhi->subsystem_device != 0x1111)
|
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
|
||||||
|
|| nhi->subsystem_vendor != 0x2222
|
||||||
|
|| nhi->subsystem_device != 0x1111)
|
||||||
goto out;
|
goto out;
|
||||||
dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
|
dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
|
||||||
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
|
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
|
||||||
|
@ -3276,9 +3335,14 @@ out:
|
||||||
pci_dev_put(nhi);
|
pci_dev_put(nhi);
|
||||||
pci_dev_put(sibling);
|
pci_dev_put(sibling);
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
|
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
|
||||||
|
PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
|
||||||
quirk_apple_wait_for_thunderbolt);
|
quirk_apple_wait_for_thunderbolt);
|
||||||
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
|
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
|
||||||
|
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
|
||||||
|
quirk_apple_wait_for_thunderbolt);
|
||||||
|
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
|
||||||
|
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
|
||||||
quirk_apple_wait_for_thunderbolt);
|
quirk_apple_wait_for_thunderbolt);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -3610,10 +3674,8 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
|
||||||
|
|
||||||
static void quirk_dma_func0_alias(struct pci_dev *dev)
|
static void quirk_dma_func0_alias(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
if (PCI_FUNC(dev->devfn) != 0) {
|
if (PCI_FUNC(dev->devfn) != 0)
|
||||||
dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
|
pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
|
||||||
dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3626,10 +3688,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
|
||||||
|
|
||||||
static void quirk_dma_func1_alias(struct pci_dev *dev)
|
static void quirk_dma_func1_alias(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
if (PCI_FUNC(dev->devfn) != 1) {
|
if (PCI_FUNC(dev->devfn) != 1)
|
||||||
dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
|
pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
|
||||||
dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3695,13 +3755,8 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
|
||||||
const struct pci_device_id *id;
|
const struct pci_device_id *id;
|
||||||
|
|
||||||
id = pci_match_id(fixed_dma_alias_tbl, dev);
|
id = pci_match_id(fixed_dma_alias_tbl, dev);
|
||||||
if (id) {
|
if (id)
|
||||||
dev->dma_alias_devfn = id->driver_data;
|
pci_add_dma_alias(dev, id->driver_data);
|
||||||
dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
|
|
||||||
dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
|
|
||||||
PCI_SLOT(dev->dma_alias_devfn),
|
|
||||||
PCI_FUNC(dev->dma_alias_devfn));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
|
||||||
|
@ -3733,6 +3788,21 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
|
||||||
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
|
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
|
||||||
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
|
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to
|
||||||
|
* be added as aliases to the DMA device in order to allow buffer access
|
||||||
|
* when IOMMU is enabled. Following devfns have to match RIT-LUT table
|
||||||
|
* programmed in the EEPROM.
|
||||||
|
*/
|
||||||
|
static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
|
||||||
|
pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
|
||||||
|
pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
|
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
|
||||||
* class code. Fix it.
|
* class code. Fix it.
|
||||||
|
@ -3936,6 +4006,55 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
|
||||||
return acs_flags & ~flags ? 0 : 1;
|
return acs_flags & ~flags ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
|
||||||
|
* the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
|
||||||
|
* 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
|
||||||
|
* control registers whereas the PCIe spec packs them into words (Rev 3.0,
|
||||||
|
* 7.16 ACS Extended Capability). The bit definitions are correct, but the
|
||||||
|
* control register is at offset 8 instead of 6 and we should probably use
|
||||||
|
* dword accesses to them. This applies to the following PCI Device IDs, as
|
||||||
|
* found in volume 1 of the datasheet[2]:
|
||||||
|
*
|
||||||
|
* 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16}
|
||||||
|
* 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20}
|
||||||
|
*
|
||||||
|
* N.B. This doesn't fix what lspci shows.
|
||||||
|
*
|
||||||
|
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
|
||||||
|
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
|
||||||
|
*/
|
||||||
|
static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
return pci_is_pcie(dev) &&
|
||||||
|
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
|
||||||
|
((dev->device & ~0xf) == 0xa110 ||
|
||||||
|
(dev->device >= 0xa167 && dev->device <= 0xa16a));
|
||||||
|
}
|
||||||
|
|
||||||
|
#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
|
||||||
|
|
||||||
|
static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
|
||||||
|
{
|
||||||
|
int pos;
|
||||||
|
u32 cap, ctrl;
|
||||||
|
|
||||||
|
if (!pci_quirk_intel_spt_pch_acs_match(dev))
|
||||||
|
return -ENOTTY;
|
||||||
|
|
||||||
|
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
|
||||||
|
if (!pos)
|
||||||
|
return -ENOTTY;
|
||||||
|
|
||||||
|
/* see pci_acs_flags_enabled() */
|
||||||
|
pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
|
||||||
|
acs_flags &= (cap | PCI_ACS_EC);
|
||||||
|
|
||||||
|
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
|
||||||
|
|
||||||
|
return acs_flags & ~ctrl ? 0 : 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
|
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -4024,6 +4143,7 @@ static const struct pci_dev_acs_enabled {
|
||||||
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
|
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
|
||||||
/* Intel PCH root ports */
|
/* Intel PCH root ports */
|
||||||
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
|
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
|
||||||
|
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
|
||||||
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
|
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
|
||||||
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
|
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
|
||||||
/* Cavium ThunderX */
|
/* Cavium ThunderX */
|
||||||
|
@ -4159,16 +4279,44 @@ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
int pos;
|
||||||
|
u32 cap, ctrl;
|
||||||
|
|
||||||
|
if (!pci_quirk_intel_spt_pch_acs_match(dev))
|
||||||
|
return -ENOTTY;
|
||||||
|
|
||||||
|
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
|
||||||
|
if (!pos)
|
||||||
|
return -ENOTTY;
|
||||||
|
|
||||||
|
pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
|
||||||
|
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
|
||||||
|
|
||||||
|
ctrl |= (cap & PCI_ACS_SV);
|
||||||
|
ctrl |= (cap & PCI_ACS_RR);
|
||||||
|
ctrl |= (cap & PCI_ACS_CR);
|
||||||
|
ctrl |= (cap & PCI_ACS_UF);
|
||||||
|
|
||||||
|
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
|
||||||
|
|
||||||
|
dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct pci_dev_enable_acs {
|
static const struct pci_dev_enable_acs {
|
||||||
u16 vendor;
|
u16 vendor;
|
||||||
u16 device;
|
u16 device;
|
||||||
int (*enable_acs)(struct pci_dev *dev);
|
int (*enable_acs)(struct pci_dev *dev);
|
||||||
} pci_dev_enable_acs[] = {
|
} pci_dev_enable_acs[] = {
|
||||||
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
|
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
|
||||||
|
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
|
||||||
{ 0 }
|
{ 0 }
|
||||||
};
|
};
|
||||||
|
|
||||||
void pci_dev_specific_enable_acs(struct pci_dev *dev)
|
int pci_dev_specific_enable_acs(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
const struct pci_dev_enable_acs *i;
|
const struct pci_dev_enable_acs *i;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -4180,9 +4328,11 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev)
|
||||||
i->device == (u16)PCI_ANY_ID)) {
|
i->device == (u16)PCI_ANY_ID)) {
|
||||||
ret = i->enable_acs(dev);
|
ret = i->enable_acs(dev);
|
||||||
if (ret >= 0)
|
if (ret >= 0)
|
||||||
return;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -40,11 +40,15 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
|
||||||
* If the device is broken and uses an alias requester ID for
|
* If the device is broken and uses an alias requester ID for
|
||||||
* DMA, iterate over that too.
|
* DMA, iterate over that too.
|
||||||
*/
|
*/
|
||||||
if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) {
|
if (unlikely(pdev->dma_alias_mask)) {
|
||||||
ret = fn(pdev, PCI_DEVID(pdev->bus->number,
|
u8 devfn;
|
||||||
pdev->dma_alias_devfn), data);
|
|
||||||
if (ret)
|
for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
|
||||||
return ret;
|
ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
|
||||||
|
data);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
|
for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
|
||||||
|
|
|
@ -249,7 +249,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
|
||||||
* cfg_read/cfg_write.
|
* cfg_read/cfg_write.
|
||||||
*/
|
*/
|
||||||
tb_ctl_WARN(ctl,
|
tb_ctl_WARN(ctl,
|
||||||
"CFG_ERROR(%llx:%x): Invalid config space of offset\n",
|
"CFG_ERROR(%llx:%x): Invalid config space or offset\n",
|
||||||
res->response_route, res->response_port);
|
res->response_route, res->response_port);
|
||||||
return;
|
return;
|
||||||
case TB_CFG_ERROR_NO_SUCH_PORT:
|
case TB_CFG_ERROR_NO_SUCH_PORT:
|
||||||
|
|
|
@ -221,7 +221,7 @@ struct tb_drom_entry_port {
|
||||||
u8 micro1:4;
|
u8 micro1:4;
|
||||||
u8 micro3;
|
u8 micro3;
|
||||||
|
|
||||||
/* BYTES 5-6, TODO: verify (find hardware that has these set) */
|
/* BYTES 6-7, TODO: verify (find hardware that has these set) */
|
||||||
u8 peer_port_rid:4;
|
u8 peer_port_rid:4;
|
||||||
u8 unknown3:3;
|
u8 unknown3:3;
|
||||||
bool has_peer_port:1;
|
bool has_peer_port:1;
|
||||||
|
@ -388,6 +388,11 @@ int tb_drom_read(struct tb_switch *sw)
|
||||||
sw->ports[4].link_nr = 1;
|
sw->ports[4].link_nr = 1;
|
||||||
sw->ports[3].dual_link_port = &sw->ports[4];
|
sw->ports[3].dual_link_port = &sw->ports[4];
|
||||||
sw->ports[4].dual_link_port = &sw->ports[3];
|
sw->ports[4].dual_link_port = &sw->ports[3];
|
||||||
|
|
||||||
|
/* Port 5 is inaccessible on this gen 1 controller */
|
||||||
|
if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
|
||||||
|
sw->ports[5].disabled = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,8 @@ static int ring_interrupt_index(struct tb_ring *ring)
|
||||||
*/
|
*/
|
||||||
static void ring_interrupt_active(struct tb_ring *ring, bool active)
|
static void ring_interrupt_active(struct tb_ring *ring, bool active)
|
||||||
{
|
{
|
||||||
int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
|
int reg = REG_RING_INTERRUPT_BASE +
|
||||||
|
ring_interrupt_index(ring) / 32 * 4;
|
||||||
int bit = ring_interrupt_index(ring) & 31;
|
int bit = ring_interrupt_index(ring) & 31;
|
||||||
int mask = 1 << bit;
|
int mask = 1 << bit;
|
||||||
u32 old, new;
|
u32 old, new;
|
||||||
|
@ -564,7 +565,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
/* cannot fail - table is allocated bin pcim_iomap_regions */
|
/* cannot fail - table is allocated bin pcim_iomap_regions */
|
||||||
nhi->iobase = pcim_iomap_table(pdev)[0];
|
nhi->iobase = pcim_iomap_table(pdev)[0];
|
||||||
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
|
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
|
||||||
if (nhi->hop_count != 12)
|
if (nhi->hop_count != 12 && nhi->hop_count != 32)
|
||||||
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
|
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
|
||||||
nhi->hop_count);
|
nhi->hop_count);
|
||||||
INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
|
INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
|
||||||
|
@ -633,16 +634,24 @@ static const struct dev_pm_ops nhi_pm_ops = {
|
||||||
static struct pci_device_id nhi_ids[] = {
|
static struct pci_device_id nhi_ids[] = {
|
||||||
/*
|
/*
|
||||||
* We have to specify class, the TB bridges use the same device and
|
* We have to specify class, the TB bridges use the same device and
|
||||||
* vendor (sub)id.
|
* vendor (sub)id on gen 1 and gen 2 controllers.
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
|
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
|
||||||
.vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
|
.vendor = PCI_VENDOR_ID_INTEL,
|
||||||
|
.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
|
||||||
.subvendor = 0x2222, .subdevice = 0x1111,
|
.subvendor = 0x2222, .subdevice = 0x1111,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
|
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
|
||||||
.vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
|
.vendor = PCI_VENDOR_ID_INTEL,
|
||||||
|
.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
|
||||||
|
.subvendor = 0x2222, .subdevice = 0x1111,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
|
||||||
|
.vendor = PCI_VENDOR_ID_INTEL,
|
||||||
|
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
|
||||||
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
|
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
|
||||||
},
|
},
|
||||||
{ 0,}
|
{ 0,}
|
||||||
|
|
|
@ -293,9 +293,9 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
|
||||||
if (active) {
|
if (active) {
|
||||||
data = data & 0xFFFFFF83;
|
data = data & 0xFFFFFF83;
|
||||||
switch (sw->config.device_id) {
|
switch (sw->config.device_id) {
|
||||||
case 0x1513:
|
case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
|
||||||
case 0x151a:
|
case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
|
||||||
case 0x1549:
|
case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
data |= 4;
|
data |= 4;
|
||||||
|
@ -350,7 +350,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
sw->tb = tb;
|
sw->tb = tb;
|
||||||
if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5))
|
if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
|
||||||
goto err;
|
goto err;
|
||||||
tb_info(tb,
|
tb_info(tb,
|
||||||
"initializing Switch at %#llx (depth: %d, up port: %d)\n",
|
"initializing Switch at %#llx (depth: %d, up port: %d)\n",
|
||||||
|
@ -370,7 +370,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
|
||||||
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
|
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
|
||||||
sw->config.vendor_id);
|
sw->config.vendor_id);
|
||||||
|
|
||||||
if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549)
|
if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
|
||||||
|
sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
|
||||||
|
sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
|
||||||
tb_sw_warn(sw, "unsupported switch device id %#x\n",
|
tb_sw_warn(sw, "unsupported switch device id %#x\n",
|
||||||
sw->config.device_id);
|
sw->config.device_id);
|
||||||
|
|
||||||
|
@ -425,9 +427,9 @@ err:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches
|
* tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
|
||||||
*/
|
*/
|
||||||
void tb_sw_set_unpplugged(struct tb_switch *sw)
|
void tb_sw_set_unplugged(struct tb_switch *sw)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
if (sw == sw->tb->root_switch) {
|
if (sw == sw->tb->root_switch) {
|
||||||
|
@ -441,7 +443,7 @@ void tb_sw_set_unpplugged(struct tb_switch *sw)
|
||||||
sw->is_unplugged = true;
|
sw->is_unplugged = true;
|
||||||
for (i = 0; i <= sw->config.max_port_number; i++) {
|
for (i = 0; i <= sw->config.max_port_number; i++) {
|
||||||
if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
|
if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
|
||||||
tb_sw_set_unpplugged(sw->ports[i].remote->sw);
|
tb_sw_set_unplugged(sw->ports[i].remote->sw);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,7 +485,7 @@ int tb_switch_resume(struct tb_switch *sw)
|
||||||
|| tb_switch_resume(port->remote->sw)) {
|
|| tb_switch_resume(port->remote->sw)) {
|
||||||
tb_port_warn(port,
|
tb_port_warn(port,
|
||||||
"lost during suspend, disconnecting\n");
|
"lost during suspend, disconnecting\n");
|
||||||
tb_sw_set_unpplugged(port->remote->sw);
|
tb_sw_set_unplugged(port->remote->sw);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -246,7 +246,7 @@ static void tb_handle_hotplug(struct work_struct *work)
|
||||||
if (ev->unplug) {
|
if (ev->unplug) {
|
||||||
if (port->remote) {
|
if (port->remote) {
|
||||||
tb_port_info(port, "unplugged\n");
|
tb_port_info(port, "unplugged\n");
|
||||||
tb_sw_set_unpplugged(port->remote->sw);
|
tb_sw_set_unplugged(port->remote->sw);
|
||||||
tb_free_invalid_tunnels(tb);
|
tb_free_invalid_tunnels(tb);
|
||||||
tb_switch_free(port->remote->sw);
|
tb_switch_free(port->remote->sw);
|
||||||
port->remote = NULL;
|
port->remote = NULL;
|
||||||
|
|
|
@ -226,7 +226,7 @@ void tb_switch_free(struct tb_switch *sw);
|
||||||
void tb_switch_suspend(struct tb_switch *sw);
|
void tb_switch_suspend(struct tb_switch *sw);
|
||||||
int tb_switch_resume(struct tb_switch *sw);
|
int tb_switch_resume(struct tb_switch *sw);
|
||||||
int tb_switch_reset(struct tb *tb, u64 route);
|
int tb_switch_reset(struct tb *tb, u64 route);
|
||||||
void tb_sw_set_unpplugged(struct tb_switch *sw);
|
void tb_sw_set_unplugged(struct tb_switch *sw);
|
||||||
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
|
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
|
||||||
|
|
||||||
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
|
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
|
||||||
|
|
|
@ -30,7 +30,7 @@ enum tb_cap {
|
||||||
TB_CAP_I2C = 0x0005,
|
TB_CAP_I2C = 0x0005,
|
||||||
TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */
|
TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */
|
||||||
TB_CAP_TIME2 = 0x0305,
|
TB_CAP_TIME2 = 0x0305,
|
||||||
TB_CAL_IECS = 0x0405,
|
TB_CAP_IECS = 0x0405,
|
||||||
TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */
|
TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -166,8 +166,6 @@ enum pci_dev_flags {
|
||||||
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
|
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
|
||||||
/* Flag for quirk use to store if quirk-specific ACS is enabled */
|
/* Flag for quirk use to store if quirk-specific ACS is enabled */
|
||||||
PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
|
PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
|
||||||
/* Flag to indicate the device uses dma_alias_devfn */
|
|
||||||
PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
|
|
||||||
/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
|
/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
|
||||||
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
|
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
|
||||||
/* Do not use bus resets for device */
|
/* Do not use bus resets for device */
|
||||||
|
@ -273,7 +271,7 @@ struct pci_dev {
|
||||||
u8 rom_base_reg; /* which config register controls the ROM */
|
u8 rom_base_reg; /* which config register controls the ROM */
|
||||||
u8 pin; /* which interrupt pin this device uses */
|
u8 pin; /* which interrupt pin this device uses */
|
||||||
u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
|
u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
|
||||||
u8 dma_alias_devfn;/* devfn of DMA alias, if any */
|
unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */
|
||||||
|
|
||||||
struct pci_driver *driver; /* which driver has allocated this device */
|
struct pci_driver *driver; /* which driver has allocated this device */
|
||||||
u64 dma_mask; /* Mask of the bits of bus address this
|
u64 dma_mask; /* Mask of the bits of bus address this
|
||||||
|
@ -1663,7 +1661,7 @@ enum pci_fixup_pass {
|
||||||
#ifdef CONFIG_PCI_QUIRKS
|
#ifdef CONFIG_PCI_QUIRKS
|
||||||
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
|
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
|
||||||
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
|
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
|
||||||
void pci_dev_specific_enable_acs(struct pci_dev *dev);
|
int pci_dev_specific_enable_acs(struct pci_dev *dev);
|
||||||
#else
|
#else
|
||||||
static inline void pci_fixup_device(enum pci_fixup_pass pass,
|
static inline void pci_fixup_device(enum pci_fixup_pass pass,
|
||||||
struct pci_dev *dev) { }
|
struct pci_dev *dev) { }
|
||||||
|
@ -1672,7 +1670,10 @@ static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
|
||||||
{
|
{
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { }
|
static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
return -ENOTTY;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
|
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
|
||||||
|
@ -1988,6 +1989,8 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
|
||||||
|
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
|
||||||
int pci_for_each_dma_alias(struct pci_dev *pdev,
|
int pci_for_each_dma_alias(struct pci_dev *pdev,
|
||||||
int (*fn)(struct pci_dev *pdev,
|
int (*fn)(struct pci_dev *pdev,
|
||||||
u16 alias, void *data), void *data);
|
u16 alias, void *data), void *data);
|
||||||
|
|
|
@ -2604,6 +2604,24 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_82441 0x1237
|
#define PCI_DEVICE_ID_INTEL_82441 0x1237
|
||||||
#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
|
#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
|
||||||
#define PCI_DEVICE_ID_INTEL_82439 0x1250
|
#define PCI_DEVICE_ID_INTEL_82439 0x1250
|
||||||
|
#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */
|
||||||
|
#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a
|
||||||
|
#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548
|
||||||
|
#define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549
|
||||||
|
#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */
|
||||||
|
#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567
|
||||||
|
#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568
|
||||||
|
#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569
|
||||||
|
#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */
|
||||||
|
#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b
|
||||||
|
#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c
|
||||||
|
#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d
|
||||||
|
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */
|
||||||
|
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576
|
||||||
|
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
|
||||||
|
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
|
||||||
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
|
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
|
||||||
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
|
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
|
||||||
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
|
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
|
||||||
|
|
Загрузка…
Ссылка в новой задаче