Merge branch 'pci/yijing-pci_is_pcie-v2' into next
* pci/yijing-pci_is_pcie-v2: powerpc/pci: Use pci_is_pcie() to simplify code [SCSI] qla2xxx: Use pcie_is_pcie() to simplify code [SCSI] csiostor: Use pcie_capability_clear_and_set_word() to simplify code [SCSI] bfa: Use pcie_set()/get_readrq() to simplify code x86/pci: Use cached pci_dev->pcie_cap to simplify code PCI: Use pci_is_pcie() to simplify code
This commit is contained in:
Коммит
63495fff27
|
@ -189,8 +189,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
|
|||
}
|
||||
|
||||
/* If PCI-E capable, dump PCI-E cap 10, and the AER */
|
||||
cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
|
||||
if (cap) {
|
||||
if (pci_is_pcie(dev)) {
|
||||
n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
|
||||
printk(KERN_WARNING
|
||||
"EEH: PCI-E capabilities and status follow:\n");
|
||||
|
|
|
@ -45,7 +45,7 @@ static void quirk_fsl_pcie_header(struct pci_dev *dev)
|
|||
u8 hdr_type;
|
||||
|
||||
/* if we aren't a PCIe don't bother */
|
||||
if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
|
||||
if (!pci_is_pcie(dev))
|
||||
return;
|
||||
|
||||
/* if we aren't in host mode don't bother */
|
||||
|
|
|
@ -231,7 +231,7 @@ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int wh
|
|||
offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
|
||||
|
||||
if ((offset) && (where == offset))
|
||||
value = value & 0xfffffffc;
|
||||
value = value & ~PCI_EXP_LNKCTL_ASPMC;
|
||||
|
||||
return raw_pci_write(pci_domain_nr(bus), bus->number,
|
||||
devfn, where, size, value);
|
||||
|
@ -252,7 +252,7 @@ static struct pci_ops quirk_pcie_aspm_ops = {
|
|||
*/
|
||||
static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
|
||||
{
|
||||
int cap_base, i;
|
||||
int i;
|
||||
struct pci_bus *pbus;
|
||||
struct pci_dev *dev;
|
||||
|
||||
|
@ -278,7 +278,7 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
|
|||
for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
|
||||
quirk_aspm_offset[i] = 0;
|
||||
|
||||
pbus->ops = pbus->parent->ops;
|
||||
pci_bus_set_ops(pbus, pbus->parent->ops);
|
||||
} else {
|
||||
/*
|
||||
* If devices are attached to the root port at power-up or
|
||||
|
@ -286,13 +286,15 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
|
|||
* each root port to save the register offsets and replace the
|
||||
* bus ops.
|
||||
*/
|
||||
list_for_each_entry(dev, &pbus->devices, bus_list) {
|
||||
list_for_each_entry(dev, &pbus->devices, bus_list)
|
||||
/* There are 0 to 8 devices attached to this bus */
|
||||
cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP);
|
||||
quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10;
|
||||
}
|
||||
pbus->ops = &quirk_pcie_aspm_ops;
|
||||
quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
|
||||
dev->pcie_cap + PCI_EXP_LNKCTL;
|
||||
|
||||
pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
|
||||
dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
|
||||
}
|
||||
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
|
||||
|
|
|
@ -641,8 +641,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
|
|||
return;
|
||||
}
|
||||
|
||||
pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
|
||||
if (pos) {
|
||||
if (pci_is_pcie(bridge)) {
|
||||
u32 linkcap;
|
||||
u16 linksta;
|
||||
|
||||
|
|
|
@ -766,49 +766,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
|
|||
bfad->pcidev = pdev;
|
||||
|
||||
/* Adjust PCIe Maximum Read Request Size */
|
||||
if (pcie_max_read_reqsz > 0) {
|
||||
int pcie_cap_reg;
|
||||
u16 pcie_dev_ctl;
|
||||
u16 mask = 0xffff;
|
||||
|
||||
switch (pcie_max_read_reqsz) {
|
||||
case 128:
|
||||
mask = 0x0;
|
||||
break;
|
||||
case 256:
|
||||
mask = 0x1000;
|
||||
break;
|
||||
case 512:
|
||||
mask = 0x2000;
|
||||
break;
|
||||
case 1024:
|
||||
mask = 0x3000;
|
||||
break;
|
||||
case 2048:
|
||||
mask = 0x4000;
|
||||
break;
|
||||
case 4096:
|
||||
mask = 0x5000;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
|
||||
if (mask != 0xffff && pcie_cap_reg) {
|
||||
pcie_cap_reg += 0x08;
|
||||
pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
|
||||
if ((pcie_dev_ctl & 0x7000) != mask) {
|
||||
printk(KERN_WARNING "BFA[%s]: "
|
||||
if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
|
||||
if (pcie_max_read_reqsz >= 128 &&
|
||||
pcie_max_read_reqsz <= 4096 &&
|
||||
is_power_of_2(pcie_max_read_reqsz)) {
|
||||
int max_rq = pcie_get_readrq(pdev);
|
||||
printk(KERN_WARNING "BFA[%s]: "
|
||||
"pcie_max_read_request_size is %d, "
|
||||
"reset to %d\n", bfad->pci_name,
|
||||
(1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
|
||||
"reset to %d\n", bfad->pci_name, max_rq,
|
||||
pcie_max_read_reqsz);
|
||||
|
||||
pcie_dev_ctl &= ~0x7000;
|
||||
pci_write_config_word(pdev, pcie_cap_reg,
|
||||
pcie_dev_ctl | mask);
|
||||
}
|
||||
pcie_set_readrq(pdev, pcie_max_read_reqsz);
|
||||
} else {
|
||||
printk(KERN_WARNING "BFA[%s]: invalid "
|
||||
"pcie_max_read_request_size %d ignored\n",
|
||||
bfad->pci_name, pcie_max_read_reqsz);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
|
||||
{
|
||||
uint16_t val;
|
||||
int pcie_cap;
|
||||
|
||||
if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
|
||||
pci_read_config_word(hw->pdev,
|
||||
pcie_cap + PCI_EXP_DEVCTL2, &val);
|
||||
val &= 0xfff0;
|
||||
val |= range ;
|
||||
pci_write_config_word(hw->pdev,
|
||||
pcie_cap + PCI_EXP_DEVCTL2, val);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
/* HW State machine assists */
|
||||
/*****************************************************************************/
|
||||
|
@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Set pci completion timeout value to 4 seconds. */
|
||||
csio_set_pcie_completion_timeout(hw, 0xd);
|
||||
/* Set PCIe completion timeout to 4 seconds */
|
||||
if (pci_is_pcie(hw->pdev))
|
||||
pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
|
||||
PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
|
||||
|
||||
hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
|
||||
|
||||
|
|
|
@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
|
|||
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
|
||||
|
||||
/* PCIe -- adjust Maximum Read Request Size (2048). */
|
||||
if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
|
||||
if (pci_is_pcie(ha->pdev))
|
||||
pcie_set_readrq(ha->pdev, 2048);
|
||||
|
||||
ha->chip_revision = ha->pdev->revision;
|
||||
|
@ -660,10 +660,8 @@ char *
|
|||
qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int pcie_reg;
|
||||
|
||||
pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
|
||||
if (pcie_reg) {
|
||||
if (pci_is_pcie(ha->pdev)) {
|
||||
strcpy(str, "PCIe iSA");
|
||||
return str;
|
||||
}
|
||||
|
|
|
@ -558,7 +558,8 @@
|
|||
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
|
||||
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
|
||||
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
|
||||
#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
|
||||
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
|
||||
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
|
||||
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
|
||||
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
|
||||
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
|
||||
|
|
Загрузка…
Ссылка в новой задаче