Merge branches 'pci/host-rcar', 'pci/hotplug', 'pci/iommu', 'pci/misc' and 'pci/msi' into next
* pci/host-rcar: PCI: rcar: Remove rcar_pcie_setup_window() resource argument PCI: rcar: Cleanup style and formatting PCI: rcar: Use correct initial HW settings PCI: rcar: Remove redundant config accessor register number checks * pci/hotplug: PCI: cpqphp: Remove unnecessary null test before debugfs_remove() PCI: pciehp: Clear Data Link Layer State Changed during init PCI: pciehp: Remove struct controller.no_cmd_complete PCI: pciehp: Remove assumptions about which commands cause completion events PCI: pciehp: Compute timeout from hotplug command start time PCI: pciehp: Wait for hotplug command completion lazily PCI: pciehp: Make pcie_wait_cmd() self-contained PCI: Prevent NULL dereference during pciehp probe * pci/iommu: PCI: Add bridge DMA alias quirk for Intel 82801 bridge * pci/misc: ACPI / PCI: Fix sysfs acpi_index and label errors PCI/portdrv: Remove warning about invalid IRQ for hot-added PCIe ports * pci/msi: PCI/MSI: Cache Multiple Message Capable in struct msi_desc PCI/MSI: Remove unused msi_enabled_mask() PCI/MSI: Add internal msix_clear_and_set_ctrl() function
This commit is contained in:
Коммит
1d0df48692
|
@ -105,7 +105,7 @@
|
|||
#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
|
||||
#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
|
||||
|
||||
#define PCI_MAX_RESOURCES 4
|
||||
#define RCAR_PCI_MAX_RESOURCES 4
|
||||
#define MAX_NR_INBOUND_MAPS 6
|
||||
|
||||
struct rcar_msi {
|
||||
|
@ -127,7 +127,7 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip)
|
|||
struct rcar_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct resource res[PCI_MAX_RESOURCES];
|
||||
struct resource res[RCAR_PCI_MAX_RESOURCES];
|
||||
struct resource busn;
|
||||
int root_bus_nr;
|
||||
struct clk *clk;
|
||||
|
@ -140,36 +140,37 @@ static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
|
|||
return sys->private_data;
|
||||
}
|
||||
|
||||
static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
|
||||
unsigned long reg)
|
||||
static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
|
||||
unsigned long reg)
|
||||
{
|
||||
writel(val, pcie->base + reg);
|
||||
}
|
||||
|
||||
static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg)
|
||||
static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
|
||||
unsigned long reg)
|
||||
{
|
||||
return readl(pcie->base + reg);
|
||||
}
|
||||
|
||||
enum {
|
||||
PCI_ACCESS_READ,
|
||||
PCI_ACCESS_WRITE,
|
||||
RCAR_PCI_ACCESS_READ,
|
||||
RCAR_PCI_ACCESS_WRITE,
|
||||
};
|
||||
|
||||
static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
|
||||
{
|
||||
int shift = 8 * (where & 3);
|
||||
u32 val = pci_read_reg(pcie, where & ~3);
|
||||
u32 val = rcar_pci_read_reg(pcie, where & ~3);
|
||||
|
||||
val &= ~(mask << shift);
|
||||
val |= data << shift;
|
||||
pci_write_reg(pcie, val, where & ~3);
|
||||
rcar_pci_write_reg(pcie, val, where & ~3);
|
||||
}
|
||||
|
||||
static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
|
||||
{
|
||||
int shift = 8 * (where & 3);
|
||||
u32 val = pci_read_reg(pcie, where & ~3);
|
||||
u32 val = rcar_pci_read_reg(pcie, where & ~3);
|
||||
|
||||
return val >> shift;
|
||||
}
|
||||
|
@ -205,14 +206,14 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
|
|||
if (dev != 0)
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
|
||||
if (access_type == PCI_ACCESS_READ) {
|
||||
*data = pci_read_reg(pcie, PCICONF(index));
|
||||
if (access_type == RCAR_PCI_ACCESS_READ) {
|
||||
*data = rcar_pci_read_reg(pcie, PCICONF(index));
|
||||
} else {
|
||||
/* Keep an eye out for changes to the root bus number */
|
||||
if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
|
||||
pcie->root_bus_nr = *data & 0xff;
|
||||
|
||||
pci_write_reg(pcie, *data, PCICONF(index));
|
||||
rcar_pci_write_reg(pcie, *data, PCICONF(index));
|
||||
}
|
||||
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
|
@ -222,20 +223,20 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
|
|||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
|
||||
/* Clear errors */
|
||||
pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
|
||||
rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
|
||||
|
||||
/* Set the PIO address */
|
||||
pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) |
|
||||
PCIE_CONF_FUNC(func) | reg, PCIECAR);
|
||||
rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
|
||||
PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
|
||||
|
||||
/* Enable the configuration access */
|
||||
if (bus->parent->number == pcie->root_bus_nr)
|
||||
pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
|
||||
rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
|
||||
else
|
||||
pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
|
||||
rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
|
||||
|
||||
/* Check for errors */
|
||||
if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
|
||||
if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
|
||||
/* Check for master and target aborts */
|
||||
|
@ -243,13 +244,13 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
|
|||
(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
|
||||
if (access_type == PCI_ACCESS_READ)
|
||||
*data = pci_read_reg(pcie, PCIECDR);
|
||||
if (access_type == RCAR_PCI_ACCESS_READ)
|
||||
*data = rcar_pci_read_reg(pcie, PCIECDR);
|
||||
else
|
||||
pci_write_reg(pcie, *data, PCIECDR);
|
||||
rcar_pci_write_reg(pcie, *data, PCIECDR);
|
||||
|
||||
/* Disable the configuration access */
|
||||
pci_write_reg(pcie, 0, PCIECCTLR);
|
||||
rcar_pci_write_reg(pcie, 0, PCIECCTLR);
|
||||
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
@ -260,12 +261,7 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
|
||||
int ret;
|
||||
|
||||
if ((size == 2) && (where & 1))
|
||||
return PCIBIOS_BAD_REGISTER_NUMBER;
|
||||
else if ((size == 4) && (where & 3))
|
||||
return PCIBIOS_BAD_REGISTER_NUMBER;
|
||||
|
||||
ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
|
||||
ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
|
||||
bus, devfn, where, val);
|
||||
if (ret != PCIBIOS_SUCCESSFUL) {
|
||||
*val = 0xffffffff;
|
||||
|
@ -291,12 +287,7 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
int shift, ret;
|
||||
u32 data;
|
||||
|
||||
if ((size == 2) && (where & 1))
|
||||
return PCIBIOS_BAD_REGISTER_NUMBER;
|
||||
else if ((size == 4) && (where & 3))
|
||||
return PCIBIOS_BAD_REGISTER_NUMBER;
|
||||
|
||||
ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
|
||||
ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
|
||||
bus, devfn, where, &data);
|
||||
if (ret != PCIBIOS_SUCCESSFUL)
|
||||
return ret;
|
||||
|
@ -315,7 +306,7 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
} else
|
||||
data = val;
|
||||
|
||||
ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE,
|
||||
ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
|
||||
bus, devfn, where, &data);
|
||||
|
||||
return ret;
|
||||
|
@ -326,14 +317,15 @@ static struct pci_ops rcar_pcie_ops = {
|
|||
.write = rcar_pcie_write_conf,
|
||||
};
|
||||
|
||||
static void rcar_pcie_setup_window(int win, struct resource *res,
|
||||
struct rcar_pcie *pcie)
|
||||
static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
|
||||
{
|
||||
struct resource *res = &pcie->res[win];
|
||||
|
||||
/* Setup PCIe address space mappings for each resource */
|
||||
resource_size_t size;
|
||||
u32 mask;
|
||||
|
||||
pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
|
||||
rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
|
||||
|
||||
/*
|
||||
* The PAMR mask is calculated in units of 128Bytes, which
|
||||
|
@ -341,17 +333,17 @@ static void rcar_pcie_setup_window(int win, struct resource *res,
|
|||
*/
|
||||
size = resource_size(res);
|
||||
mask = (roundup_pow_of_two(size) / SZ_128) - 1;
|
||||
pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
|
||||
rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
|
||||
|
||||
pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
|
||||
pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
|
||||
rcar_pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
|
||||
rcar_pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
|
||||
|
||||
/* First resource is for IO */
|
||||
mask = PAR_ENABLE;
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
mask |= IO_SPACE;
|
||||
|
||||
pci_write_reg(pcie, mask, PCIEPTCTLR(win));
|
||||
rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
|
||||
}
|
||||
|
||||
static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
|
||||
|
@ -363,13 +355,13 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
|
|||
pcie->root_bus_nr = -1;
|
||||
|
||||
/* Setup PCI resources */
|
||||
for (i = 0; i < PCI_MAX_RESOURCES; i++) {
|
||||
for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
|
||||
|
||||
res = &pcie->res[i];
|
||||
if (!res->flags)
|
||||
continue;
|
||||
|
||||
rcar_pcie_setup_window(i, res, pcie);
|
||||
rcar_pcie_setup_window(i, pcie);
|
||||
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
pci_ioremap_io(nr * SZ_64K, res->start);
|
||||
|
@ -415,7 +407,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
|
|||
unsigned int timeout = 100;
|
||||
|
||||
while (timeout--) {
|
||||
if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
|
||||
if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
|
||||
return 0;
|
||||
|
||||
udelay(100);
|
||||
|
@ -438,15 +430,15 @@ static void phy_write_reg(struct rcar_pcie *pcie,
|
|||
((addr & 0xff) << ADR_POS);
|
||||
|
||||
/* Set write data */
|
||||
pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
|
||||
pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
|
||||
rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
|
||||
rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
|
||||
|
||||
/* Ignore errors as they will be dealt with if the data link is down */
|
||||
phy_wait_for_ack(pcie);
|
||||
|
||||
/* Clear command */
|
||||
pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
|
||||
pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
|
||||
rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
|
||||
rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
|
||||
|
||||
/* Ignore errors as they will be dealt with if the data link is down */
|
||||
phy_wait_for_ack(pcie);
|
||||
|
@ -457,7 +449,7 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
|
|||
unsigned int timeout = 10;
|
||||
|
||||
while (timeout--) {
|
||||
if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
|
||||
if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
|
||||
return 0;
|
||||
|
||||
msleep(5);
|
||||
|
@ -471,17 +463,17 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
|
|||
int err;
|
||||
|
||||
/* Begin initialization */
|
||||
pci_write_reg(pcie, 0, PCIETCTLR);
|
||||
rcar_pci_write_reg(pcie, 0, PCIETCTLR);
|
||||
|
||||
/* Set mode */
|
||||
pci_write_reg(pcie, 1, PCIEMSR);
|
||||
rcar_pci_write_reg(pcie, 1, PCIEMSR);
|
||||
|
||||
/*
|
||||
* Initial header for port config space is type 1, set the device
|
||||
* class to match. Hardware takes care of propagating the IDSETR
|
||||
* settings, so there is no need to bother with a quirk.
|
||||
*/
|
||||
pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
|
||||
rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
|
||||
|
||||
/*
|
||||
* Setup Secondary Bus Number & Subordinate Bus Number, even though
|
||||
|
@ -491,33 +483,31 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
|
|||
rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
|
||||
|
||||
/* Initialize default capabilities. */
|
||||
rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP);
|
||||
rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
|
||||
rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
|
||||
PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
|
||||
rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
|
||||
PCI_HEADER_TYPE_BRIDGE);
|
||||
|
||||
/* Enable data link layer active state reporting */
|
||||
rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC);
|
||||
rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
|
||||
PCI_EXP_LNKCAP_DLLLARC);
|
||||
|
||||
/* Write out the physical slot number = 0 */
|
||||
rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
|
||||
|
||||
/* Set the completion timer timeout to the maximum 50ms. */
|
||||
rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50);
|
||||
rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
|
||||
|
||||
/* Terminate list of capabilities (Next Capability Offset=0) */
|
||||
rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0);
|
||||
|
||||
/* Enable MAC data scrambling. */
|
||||
rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0);
|
||||
rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
|
||||
|
||||
/* Enable MSI */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
|
||||
rcar_pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
|
||||
|
||||
/* Finish initialization - establish a PCI Express link */
|
||||
pci_write_reg(pcie, CFINIT, PCIETCTLR);
|
||||
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
|
||||
|
||||
/* This will timeout if we don't have a link. */
|
||||
err = rcar_pcie_wait_for_dl(pcie);
|
||||
|
@ -527,11 +517,6 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
|
|||
/* Enable INTx interrupts */
|
||||
rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
|
||||
|
||||
/* Enable slave Bus Mastering */
|
||||
rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK,
|
||||
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
|
||||
PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST);
|
||||
|
||||
wmb();
|
||||
|
||||
return 0;
|
||||
|
@ -560,7 +545,7 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
|
|||
phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
|
||||
|
||||
while (timeout--) {
|
||||
if (pci_read_reg(pcie, H1_PCIEPHYSR))
|
||||
if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
|
||||
return rcar_pcie_hw_init(pcie);
|
||||
|
||||
msleep(5);
|
||||
|
@ -599,7 +584,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
|
|||
struct rcar_msi *msi = &pcie->msi;
|
||||
unsigned long reg;
|
||||
|
||||
reg = pci_read_reg(pcie, PCIEMSIFR);
|
||||
reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
|
||||
|
||||
/* MSI & INTx share an interrupt - we only handle MSI here */
|
||||
if (!reg)
|
||||
|
@ -610,7 +595,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
|
|||
unsigned int irq;
|
||||
|
||||
/* clear the interrupt */
|
||||
pci_write_reg(pcie, 1 << index, PCIEMSIFR);
|
||||
rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
|
||||
|
||||
irq = irq_find_mapping(msi->domain, index);
|
||||
if (irq) {
|
||||
|
@ -624,7 +609,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
|
|||
}
|
||||
|
||||
/* see if there's any more pending in this vector */
|
||||
reg = pci_read_reg(pcie, PCIEMSIFR);
|
||||
reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -651,8 +636,8 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
|
|||
|
||||
irq_set_msi_desc(irq, desc);
|
||||
|
||||
msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
|
||||
msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR);
|
||||
msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
|
||||
msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
|
||||
msg.data = hwirq;
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
|
@ -729,11 +714,11 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
|
|||
msi->pages = __get_free_pages(GFP_KERNEL, 0);
|
||||
base = virt_to_phys((void *)msi->pages);
|
||||
|
||||
pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
|
||||
pci_write_reg(pcie, 0, PCIEMSIAUR);
|
||||
rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
|
||||
rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
|
||||
|
||||
/* enable all MSI interrupts */
|
||||
pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
|
||||
rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -826,6 +811,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
|
|||
if (cpu_addr > 0) {
|
||||
unsigned long nr_zeros = __ffs64(cpu_addr);
|
||||
u64 alignment = 1ULL << nr_zeros;
|
||||
|
||||
size = min(range->size, alignment);
|
||||
} else {
|
||||
size = range->size;
|
||||
|
@ -841,13 +827,13 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
|
|||
* Set up 64-bit inbound regions as the range parser doesn't
|
||||
* distinguish between 32 and 64-bit types.
|
||||
*/
|
||||
pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
|
||||
pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
|
||||
pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
|
||||
rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
|
||||
rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
|
||||
rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
|
||||
|
||||
pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
|
||||
pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
|
||||
pci_write_reg(pcie, 0, PCIELAMR(idx+1));
|
||||
rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
|
||||
rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
|
||||
rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
|
||||
|
||||
pci_addr += size;
|
||||
cpu_addr += size;
|
||||
|
@ -952,7 +938,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
|
|||
of_pci_range_to_resource(&range, pdev->dev.of_node,
|
||||
&pcie->res[win++]);
|
||||
|
||||
if (win > PCI_MAX_RESOURCES)
|
||||
if (win > RCAR_PCI_MAX_RESOURCES)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -982,7 +968,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
data = pci_read_reg(pcie, MACSR);
|
||||
data = rcar_pci_read_reg(pcie, MACSR);
|
||||
dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
|
||||
|
||||
rcar_pcie_enable(pcie);
|
||||
|
|
|
@ -216,8 +216,7 @@ void cpqhp_create_debugfs_files(struct controller *ctrl)
|
|||
|
||||
void cpqhp_remove_debugfs_files(struct controller *ctrl)
|
||||
{
|
||||
if (ctrl->dentry)
|
||||
debugfs_remove(ctrl->dentry);
|
||||
debugfs_remove(ctrl->dentry);
|
||||
ctrl->dentry = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -92,9 +92,10 @@ struct controller {
|
|||
struct slot *slot;
|
||||
wait_queue_head_t queue; /* sleep & wake process */
|
||||
u32 slot_cap;
|
||||
u32 slot_ctrl;
|
||||
struct timer_list poll_timer;
|
||||
unsigned long cmd_started; /* jiffies */
|
||||
unsigned int cmd_busy:1;
|
||||
unsigned int no_cmd_complete:1;
|
||||
unsigned int link_active_reporting:1;
|
||||
unsigned int notification_enabled:1;
|
||||
unsigned int power_fault_detected;
|
||||
|
|
|
@ -255,6 +255,13 @@ static int pciehp_probe(struct pcie_device *dev)
|
|||
else if (pciehp_acpi_slot_detection_check(dev->port))
|
||||
goto err_out_none;
|
||||
|
||||
if (!dev->port->subordinate) {
|
||||
/* Can happen if we run out of bus numbers during probe */
|
||||
dev_err(&dev->device,
|
||||
"Hotplug bridge without secondary bus, ignoring\n");
|
||||
goto err_out_none;
|
||||
}
|
||||
|
||||
ctrl = pcie_init(dev);
|
||||
if (!ctrl) {
|
||||
dev_err(&dev->device, "Controller initialization failed\n");
|
||||
|
|
|
@ -104,11 +104,10 @@ static inline void pciehp_free_irq(struct controller *ctrl)
|
|||
free_irq(ctrl->pcie->irq, ctrl);
|
||||
}
|
||||
|
||||
static int pcie_poll_cmd(struct controller *ctrl)
|
||||
static int pcie_poll_cmd(struct controller *ctrl, int timeout)
|
||||
{
|
||||
struct pci_dev *pdev = ctrl_dev(ctrl);
|
||||
u16 slot_status;
|
||||
int timeout = 1000;
|
||||
|
||||
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
|
||||
if (slot_status & PCI_EXP_SLTSTA_CC) {
|
||||
|
@ -129,18 +128,52 @@ static int pcie_poll_cmd(struct controller *ctrl)
|
|||
return 0; /* timeout */
|
||||
}
|
||||
|
||||
static void pcie_wait_cmd(struct controller *ctrl, int poll)
|
||||
static void pcie_wait_cmd(struct controller *ctrl)
|
||||
{
|
||||
unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
|
||||
unsigned long timeout = msecs_to_jiffies(msecs);
|
||||
unsigned long duration = msecs_to_jiffies(msecs);
|
||||
unsigned long cmd_timeout = ctrl->cmd_started + duration;
|
||||
unsigned long now, timeout;
|
||||
int rc;
|
||||
|
||||
if (poll)
|
||||
rc = pcie_poll_cmd(ctrl);
|
||||
/*
|
||||
* If the controller does not generate notifications for command
|
||||
* completions, we never need to wait between writes.
|
||||
*/
|
||||
if (NO_CMD_CMPL(ctrl))
|
||||
return;
|
||||
|
||||
if (!ctrl->cmd_busy)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Even if the command has already timed out, we want to call
|
||||
* pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
|
||||
*/
|
||||
now = jiffies;
|
||||
if (time_before_eq(cmd_timeout, now))
|
||||
timeout = 1;
|
||||
else
|
||||
timeout = cmd_timeout - now;
|
||||
|
||||
if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
|
||||
ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
|
||||
rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
|
||||
else
|
||||
rc = pcie_poll_cmd(ctrl, timeout);
|
||||
|
||||
/*
|
||||
* Controllers with errata like Intel CF118 don't generate
|
||||
* completion notifications unless the power/indicator/interlock
|
||||
* control bits are changed. On such controllers, we'll emit this
|
||||
* timeout message when we wait for completion of commands that
|
||||
* don't change those bits, e.g., commands that merely enable
|
||||
* interrupts.
|
||||
*/
|
||||
if (!rc)
|
||||
ctrl_dbg(ctrl, "Command not completed in 1000 msec\n");
|
||||
ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n",
|
||||
ctrl->slot_ctrl,
|
||||
jiffies_to_msecs(now - ctrl->cmd_started));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -152,34 +185,12 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
|
|||
static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
|
||||
{
|
||||
struct pci_dev *pdev = ctrl_dev(ctrl);
|
||||
u16 slot_status;
|
||||
u16 slot_ctrl;
|
||||
|
||||
mutex_lock(&ctrl->ctrl_lock);
|
||||
|
||||
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
|
||||
if (slot_status & PCI_EXP_SLTSTA_CC) {
|
||||
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
|
||||
PCI_EXP_SLTSTA_CC);
|
||||
if (!ctrl->no_cmd_complete) {
|
||||
/*
|
||||
* After 1 sec and CMD_COMPLETED still not set, just
|
||||
* proceed forward to issue the next command according
|
||||
* to spec. Just print out the error message.
|
||||
*/
|
||||
ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
|
||||
} else if (!NO_CMD_CMPL(ctrl)) {
|
||||
/*
|
||||
* This controller seems to notify of command completed
|
||||
* event even though it supports none of power
|
||||
* controller, attention led, power led and EMI.
|
||||
*/
|
||||
ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to wait for command completed event\n");
|
||||
ctrl->no_cmd_complete = 0;
|
||||
} else {
|
||||
ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe the controller is broken\n");
|
||||
}
|
||||
}
|
||||
/* Wait for any previous command that might still be in progress */
|
||||
pcie_wait_cmd(ctrl);
|
||||
|
||||
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
|
||||
slot_ctrl &= ~mask;
|
||||
|
@ -187,22 +198,9 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
|
|||
ctrl->cmd_busy = 1;
|
||||
smp_mb();
|
||||
pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
|
||||
ctrl->cmd_started = jiffies;
|
||||
ctrl->slot_ctrl = slot_ctrl;
|
||||
|
||||
/*
|
||||
* Wait for command completion.
|
||||
*/
|
||||
if (!ctrl->no_cmd_complete) {
|
||||
int poll = 0;
|
||||
/*
|
||||
* if hotplug interrupt is not enabled or command
|
||||
* completed interrupt is not enabled, we need to poll
|
||||
* command completed event.
|
||||
*/
|
||||
if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
|
||||
!(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
|
||||
poll = 1;
|
||||
pcie_wait_cmd(ctrl, poll);
|
||||
}
|
||||
mutex_unlock(&ctrl->ctrl_lock);
|
||||
}
|
||||
|
||||
|
@ -773,15 +771,6 @@ struct controller *pcie_init(struct pcie_device *dev)
|
|||
mutex_init(&ctrl->ctrl_lock);
|
||||
init_waitqueue_head(&ctrl->queue);
|
||||
dbg_ctrl(ctrl);
|
||||
/*
|
||||
* Controller doesn't notify of command completion if the "No
|
||||
* Command Completed Support" bit is set in Slot Capability
|
||||
* register or the controller supports none of power
|
||||
* controller, attention led, power led and EMI.
|
||||
*/
|
||||
if (NO_CMD_CMPL(ctrl) ||
|
||||
!(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
|
||||
ctrl->no_cmd_complete = 1;
|
||||
|
||||
/* Check if Data Link Layer Link Active Reporting is implemented */
|
||||
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
|
||||
|
@ -794,7 +783,7 @@ struct controller *pcie_init(struct pcie_device *dev)
|
|||
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
|
||||
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
|
||||
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
|
||||
PCI_EXP_SLTSTA_CC);
|
||||
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
|
||||
|
||||
/* Disable software notification */
|
||||
pcie_disable_notification(ctrl);
|
||||
|
|
|
@ -149,15 +149,14 @@ static void msi_set_enable(struct pci_dev *dev, int enable)
|
|||
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
|
||||
}
|
||||
|
||||
static void msix_set_enable(struct pci_dev *dev, int enable)
|
||||
static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
|
||||
{
|
||||
u16 control;
|
||||
u16 ctrl;
|
||||
|
||||
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
|
||||
control &= ~PCI_MSIX_FLAGS_ENABLE;
|
||||
if (enable)
|
||||
control |= PCI_MSIX_FLAGS_ENABLE;
|
||||
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
|
||||
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
|
||||
ctrl &= ~clear;
|
||||
ctrl |= set;
|
||||
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
|
||||
}
|
||||
|
||||
static inline __attribute_const__ u32 msi_mask(unsigned x)
|
||||
|
@ -168,16 +167,6 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
|
|||
return (1 << (1 << x)) - 1;
|
||||
}
|
||||
|
||||
static inline __attribute_const__ u32 msi_capable_mask(u16 control)
|
||||
{
|
||||
return msi_mask((control >> 1) & 7);
|
||||
}
|
||||
|
||||
static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
|
||||
{
|
||||
return msi_mask((control >> 4) & 7);
|
||||
}
|
||||
|
||||
/*
|
||||
* PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
|
||||
* mask all MSI interrupts by clearing the MSI enable bit does not work
|
||||
|
@ -460,7 +449,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
|
|||
arch_restore_msi_irqs(dev);
|
||||
|
||||
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
|
||||
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
|
||||
msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
|
||||
entry->masked);
|
||||
control &= ~PCI_MSI_FLAGS_QSIZE;
|
||||
control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
|
||||
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
|
||||
|
@ -469,26 +459,23 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
|
|||
static void __pci_restore_msix_state(struct pci_dev *dev)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
u16 control;
|
||||
|
||||
if (!dev->msix_enabled)
|
||||
return;
|
||||
BUG_ON(list_empty(&dev->msi_list));
|
||||
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
|
||||
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
|
||||
|
||||
/* route the table */
|
||||
pci_intx_for_msi(dev, 0);
|
||||
control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
|
||||
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
|
||||
msix_clear_and_set_ctrl(dev, 0,
|
||||
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
|
||||
|
||||
arch_restore_msi_irqs(dev);
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
msix_mask_irq(entry, entry->masked);
|
||||
}
|
||||
|
||||
control &= ~PCI_MSIX_FLAGS_MASKALL;
|
||||
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
|
||||
msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
|
||||
}
|
||||
|
||||
void pci_restore_msi_state(struct pci_dev *dev)
|
||||
|
@ -626,6 +613,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
|
|||
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
|
||||
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
|
||||
entry->msi_attrib.pos = dev->msi_cap;
|
||||
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
|
||||
|
||||
if (control & PCI_MSI_FLAGS_64BIT)
|
||||
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
|
||||
|
@ -634,7 +622,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
|
|||
/* All MSIs are unmasked by default, Mask them all */
|
||||
if (entry->msi_attrib.maskbit)
|
||||
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
|
||||
mask = msi_capable_mask(control);
|
||||
mask = msi_mask(entry->msi_attrib.multi_cap);
|
||||
msi_mask_irq(entry, mask, mask);
|
||||
|
||||
list_add_tail(&entry->list, &dev->msi_list);
|
||||
|
@ -743,12 +731,10 @@ static int msix_capability_init(struct pci_dev *dev,
|
|||
u16 control;
|
||||
void __iomem *base;
|
||||
|
||||
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
|
||||
|
||||
/* Ensure MSI-X is disabled while it is set up */
|
||||
control &= ~PCI_MSIX_FLAGS_ENABLE;
|
||||
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
|
||||
msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
|
||||
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
|
||||
/* Request & Map MSI-X table region */
|
||||
base = msix_map_region(dev, msix_table_size(control));
|
||||
if (!base)
|
||||
|
@ -767,8 +753,8 @@ static int msix_capability_init(struct pci_dev *dev,
|
|||
* MSI-X registers. We need to mask all the vectors to prevent
|
||||
* interrupts coming in before they're fully set up.
|
||||
*/
|
||||
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
|
||||
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
|
||||
msix_clear_and_set_ctrl(dev, 0,
|
||||
PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
|
||||
|
||||
msix_program_entries(dev, entries);
|
||||
|
||||
|
@ -780,8 +766,7 @@ static int msix_capability_init(struct pci_dev *dev,
|
|||
pci_intx_for_msi(dev, 0);
|
||||
dev->msix_enabled = 1;
|
||||
|
||||
control &= ~PCI_MSIX_FLAGS_MASKALL;
|
||||
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
|
||||
msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -882,7 +867,6 @@ void pci_msi_shutdown(struct pci_dev *dev)
|
|||
{
|
||||
struct msi_desc *desc;
|
||||
u32 mask;
|
||||
u16 ctrl;
|
||||
|
||||
if (!pci_msi_enable || !dev || !dev->msi_enabled)
|
||||
return;
|
||||
|
@ -895,8 +879,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
|
|||
dev->msi_enabled = 0;
|
||||
|
||||
/* Return the device with MSI unmasked as initial states */
|
||||
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
|
||||
mask = msi_capable_mask(ctrl);
|
||||
mask = msi_mask(desc->msi_attrib.multi_cap);
|
||||
/* Keep cached state to be restored */
|
||||
arch_msi_mask_irq(desc, mask, ~mask);
|
||||
|
||||
|
@ -1001,7 +984,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
|
|||
arch_msix_mask_irq(entry, 1);
|
||||
}
|
||||
|
||||
msix_set_enable(dev, 0);
|
||||
msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
pci_intx_for_msi(dev, 1);
|
||||
dev->msix_enabled = 0;
|
||||
}
|
||||
|
@ -1065,7 +1048,7 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
|
|||
|
||||
dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
||||
if (dev->msix_cap)
|
||||
msix_set_enable(dev, 0);
|
||||
msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -161,8 +161,8 @@ enum acpi_attr_enum {
|
|||
static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
|
||||
{
|
||||
int len;
|
||||
len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
|
||||
obj->string.length,
|
||||
len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
|
||||
obj->buffer.length,
|
||||
UTF16_LITTLE_ENDIAN,
|
||||
buf, PAGE_SIZE);
|
||||
buf[len] = '\n';
|
||||
|
@ -187,16 +187,22 @@ static int dsm_get_label(struct device *dev, char *buf,
|
|||
tmp = obj->package.elements;
|
||||
if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
|
||||
tmp[0].type == ACPI_TYPE_INTEGER &&
|
||||
tmp[1].type == ACPI_TYPE_STRING) {
|
||||
(tmp[1].type == ACPI_TYPE_STRING ||
|
||||
tmp[1].type == ACPI_TYPE_BUFFER)) {
|
||||
/*
|
||||
* The second string element is optional even when
|
||||
* this _DSM is implemented; when not implemented,
|
||||
* this entry must return a null string.
|
||||
*/
|
||||
if (attr == ACPI_ATTR_INDEX_SHOW)
|
||||
if (attr == ACPI_ATTR_INDEX_SHOW) {
|
||||
scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
|
||||
else if (attr == ACPI_ATTR_LABEL_SHOW)
|
||||
dsm_label_utf16s_to_utf8s(tmp + 1, buf);
|
||||
} else if (attr == ACPI_ATTR_LABEL_SHOW) {
|
||||
if (tmp[1].type == ACPI_TYPE_STRING)
|
||||
scnprintf(buf, PAGE_SIZE, "%s\n",
|
||||
tmp[1].string.pointer);
|
||||
else if (tmp[1].type == ACPI_TYPE_BUFFER)
|
||||
dsm_label_utf16s_to_utf8s(tmp + 1, buf);
|
||||
}
|
||||
len = strlen(buf) > 0 ? strlen(buf) : -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -203,10 +203,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
|
|||
(pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev->irq && dev->pin) {
|
||||
dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; check vendor BIOS\n",
|
||||
dev->vendor, dev->device);
|
||||
}
|
||||
status = pcie_port_device_register(dev);
|
||||
if (status)
|
||||
return status;
|
||||
|
|
|
@ -3405,6 +3405,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
|
|||
DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
|
||||
/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
|
||||
DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
|
||||
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
|
||||
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
|
||||
|
||||
static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
|
||||
{
|
||||
|
|
|
@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg);
|
|||
struct msi_desc {
|
||||
struct {
|
||||
__u8 is_msix : 1;
|
||||
__u8 multiple: 3; /* log2 number of messages */
|
||||
__u8 multiple: 3; /* log2 num of messages allocated */
|
||||
__u8 multi_cap : 3; /* log2 num of messages supported */
|
||||
__u8 maskbit : 1; /* mask-pending bit supported ? */
|
||||
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
|
||||
__u8 pos; /* Location of the msi capability */
|
||||
|
|
Загрузка…
Ссылка в новой задаче