Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (98 commits)
  PCI PM: Put PM callbacks in the order of execution
  PCI PM: Run default PM callbacks for all devices using new framework
  PCI PM: Register power state of devices during initialization
  PCI PM: Call pci_fixup_device from legacy routines
  PCI PM: Rearrange code in pci-driver.c
  PCI PM: Avoid touching devices behind bridges in unknown state
  PCI PM: Move pci_has_legacy_pm_support
  PCI PM: Power-manage devices without drivers during suspend-resume
  PCI PM: Add suspend counterpart of pci_reenable_device
  PCI PM: Fix poweroff and restore callbacks
  PCI: Use msleep instead of cpu_relax during ASPM link retraining
  PCI: PCIe portdrv: Add kerneldoc comments to remining core funtions
  PCI: PCIe portdrv: Rearrange code so that related things are together
  PCI: PCIe portdrv: Fix suspend and resume of PCI Express port services
  PCI: PCIe portdrv: Add kerneldoc comments to some core functions
  x86/PCI: Do not use interrupt links for devices using MSI-X
  net: sfc: Use pci_clear_master() to disable bus mastering
  PCI: Add pci_clear_master() as opposite of pci_set_master()
  PCI hotplug: remove redundant test in cpq hotplug
  PCI: pciehp: cleanup register and field definitions
  ...
This commit is contained in:
Linus Torvalds 2009-01-07 15:41:01 -08:00
Родитель 7c7758f99d f6dc1e5e3d
Коммит b424e8d3b4
74 изменённых файлов: 2193 добавлений и 1375 удалений

Просмотреть файл

@ -294,7 +294,8 @@ NOTE: pci_enable_device() can fail! Check the return value.
pci_set_master() will enable DMA by setting the bus master bit
in the PCI_COMMAND register. It also fixes the latency timer value if
it's set to something bogus by the BIOS.
it's set to something bogus by the BIOS. pci_clear_master() will
disable DMA by clearing the bus master bit.
If the PCI device can use the PCI Memory-Write-Invalidate transaction,
call pci_set_mwi(). This enables the PCI_COMMAND bit for Mem-Wr-Inval

Просмотреть файл

@ -919,6 +919,10 @@ and is between 256 and 4096 characters. It is defined in the file
inttest= [IA64]
iomem= Disable strict checking of access to MMIO memory
strict regions from userspace.
relaxed
iommu= [x86]
off
force

Просмотреть файл

@ -320,24 +320,6 @@ pcibios_update_irq(struct pci_dev *dev, int irq)
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
/* Most Alphas have straight-forward swizzling needs. */
u8 __init
common_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
while (dev->bus->parent) {
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
/* Move up the chain of bridges. */
dev = dev->bus->self;
}
*pinp = pin;
/* The slot is the slot of the last bridge. */
return PCI_SLOT(dev->devfn);
}
void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)

Просмотреть файл

@ -106,16 +106,11 @@ struct pci_iommu_arena;
* Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
* Thus, each swizzle is ((pin-1) + (device#-4)) % 4
*
* The following code swizzles for exactly one bridge. The routine
* common_swizzle below handles multiple bridges. But there are a
* couple boards that do strange things, so we define this here.
* pci_swizzle_interrupt_pin() swizzles for exactly one bridge. The routine
* pci_common_swizzle() handles multiple bridges. But there are a
* couple boards that do strange things.
*/
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin-1) + slot) % 4) + 1;
}
/* The following macro is used to implement the table-based irq mapping
function for all single-bus Alphas. */
@ -184,7 +179,7 @@ extern int pci_probe_only;
extern unsigned long alpha_agpgart_size;
extern void common_init_pci(void);
extern u8 common_swizzle(struct pci_dev *, u8 *);
#define common_swizzle pci_common_swizzle
extern struct pci_controller *alloc_pci_controller(void);
extern struct resource *alloc_resource(void);

Просмотреть файл

@ -481,7 +481,7 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp)
slot = PCI_SLOT(dev->devfn);
break;
}
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;

Просмотреть файл

@ -204,7 +204,7 @@ eiger_swizzle(struct pci_dev *dev, u8 *pinp)
break;
}
/* Must be a card-based bridge. */
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;

Просмотреть файл

@ -219,7 +219,7 @@ miata_swizzle(struct pci_dev *dev, u8 *pinp)
slot = PCI_SLOT(dev->devfn) + 9;
break;
}
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;

Просмотреть файл

@ -257,7 +257,7 @@ noritake_swizzle(struct pci_dev *dev, u8 *pinp)
slot = PCI_SLOT(dev->devfn) + 15;
break;
}
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;

Просмотреть файл

@ -160,7 +160,7 @@ ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
slot = PCI_SLOT(dev->devfn) + 10;
break;
}
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;

Просмотреть файл

@ -425,7 +425,7 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
slot = PCI_SLOT(dev->devfn) + 11;
break;
}
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;

Просмотреть файл

@ -42,7 +42,7 @@ struct pci_sys_data {
/*
* This is the standard PCI-PCI bridge swizzling algorithm.
*/
u8 pci_std_swizzle(struct pci_dev *dev, u8 *pinp);
#define pci_std_swizzle pci_common_swizzle
/*
* Call this with your hw_pci struct to initialise the PCI system.

Просмотреть файл

@ -479,33 +479,6 @@ EXPORT_SYMBOL(pcibios_resource_to_bus);
EXPORT_SYMBOL(pcibios_bus_to_resource);
#endif
/*
* This is the standard PCI-PCI bridge swizzling algorithm:
*
* Dev: 0 1 2 3
* A A B C D
* B B C D A
* C C D A B
* D D A B C
* ^^^^^^^^^^ irq pin on bridge
*/
u8 __devinit pci_std_swizzle(struct pci_dev *dev, u8 *pinp)
{
int pin = *pinp - 1;
while (dev->bus->self) {
pin = (pin + PCI_SLOT(dev->devfn)) & 3;
/*
* move up the chain of bridges,
* swizzling as we go.
*/
dev = dev->bus->self;
}
*pinp = pin + 1;
return PCI_SLOT(dev->devfn);
}
/*
* Swizzle the device pin each time we cross a bridge.
* This might update pin and returns the slot number.

Просмотреть файл

@ -63,13 +63,7 @@
*
* Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
* Thus, each swizzle is ((pin-1) + (device#-4)) % 4
*
* The following code swizzles for exactly one bridge.
*/
static inline int bridge_swizzle(int pin, unsigned int slot)
{
return (pin + slot) & 3;
}
/*
* This routine handles multiple bridges.
@ -81,15 +75,14 @@ static u8 __init integrator_swizzle(struct pci_dev *dev, u8 *pinp)
if (pin == 0)
pin = 1;
pin -= 1;
while (dev->bus->self) {
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
pin = pci_swizzle_interrupt_pin(dev, pin);
/*
* move up the chain of bridges, swizzling as we go.
*/
dev = dev->bus->self;
}
*pinp = pin + 1;
*pinp = pin;
return PCI_SLOT(dev->devfn);
}

Просмотреть файл

@ -146,12 +146,6 @@ int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return 0;
}
/* Most MIPS systems have straight-forward swizzling needs. */
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin - 1) + slot) % 4) + 1;
}
static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
{
while (dev->bus->parent) {

Просмотреть файл

@ -149,28 +149,6 @@ out:
"Skipping PCI bus scan due to resource conflict\n");
}
/* Most MIPS systems have straight-forward swizzling needs. */
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin - 1) + slot) % 4) + 1;
}
static u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
while (dev->bus->parent) {
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
/* Move up the chain of bridges. */
dev = dev->bus->self;
}
*pinp = pin;
/* The slot is the slot of the last bridge. */
return PCI_SLOT(dev->devfn);
}
static int __init pcibios_init(void)
{
struct pci_controller *hose;
@ -179,7 +157,7 @@ static int __init pcibios_init(void)
for (hose = hose_head; hose; hose = hose->next)
pcibios_scanbus(hose);
pci_fixup_irqs(common_swizzle, pcibios_map_irq);
pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq);
pci_initialized = 1;

Просмотреть файл

@ -232,11 +232,6 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
{
return (((pin - 1) + slot) % 4) + 1;
}
int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
{
struct device_node *dn, *ppnode;
@ -306,7 +301,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
/* We can only get here if we hit a P2P bridge with no node,
* let's do standard swizzling and try again
*/
lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
lspec = pci_swizzle_interrupt_pin(pdev, lspec);
pdev = ppdev;
}

Просмотреть файл

@ -5,11 +5,6 @@
#include <cpu/irq.h>
#include "pci-sh5.h"
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin - 1) + slot) % 4) + 1;
}
int __init pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
int result = -1;
@ -42,7 +37,7 @@ int __init pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin)
while (dev->bus->number > 0) {
slot = path[i].slot = PCI_SLOT(dev->devfn);
pin = path[i].pin = bridge_swizzle(pin, slot);
pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
i++;
if (i > 3) panic("PCI path to root bus too long!\n");
@ -56,7 +51,7 @@ int __init pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin)
if ((slot < 3) || (i == 0)) {
/* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
swizzle now. */
result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1;
} else {
i--;
slot = path[i].slot;

Просмотреть файл

@ -21,26 +21,6 @@
#include <linux/init.h>
#include <asm/io.h>
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin - 1) + slot) % 4) + 1;
}
static u8 __init simple_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
while (dev->bus->parent) {
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
/* Move up the chain of bridges. */
dev = dev->bus->self;
}
*pinp = pin;
/* The slot is the slot of the last bridge. */
return PCI_SLOT(dev->devfn);
}
static int __init pcibios_init(void)
{
struct pci_channel *p;
@ -61,7 +41,7 @@ static int __init pcibios_init(void)
busno = bus->subordinate + 1;
}
pci_fixup_irqs(simple_swizzle, pcibios_map_platform_irq);
pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq);
return 0;
}

Просмотреть файл

@ -38,7 +38,7 @@ EXPORT_SYMBOL(bad_dma_address);
be probably a smaller DMA mask, but this is bug-to-bug compatible
to older i386. */
struct device x86_dma_fallback_dev = {
.bus_id = "fallback device",
.init_name = "fallback device",
.coherent_dma_mask = DMA_32BIT_MASK,
.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
};

Просмотреть файл

@ -328,6 +328,8 @@ int devmem_is_allowed(unsigned long pagenr)
{
if (pagenr <= 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
if (!page_is_ram(pagenr))
return 1;
return 0;

Просмотреть файл

@ -888,6 +888,8 @@ int devmem_is_allowed(unsigned long pagenr)
{
if (pagenr <= 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
if (!page_is_ram(pagenr))
return 1;
return 0;

Просмотреть файл

@ -210,11 +210,10 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
if (bus && node != -1) {
#ifdef CONFIG_ACPI_NUMA
if (pxm >= 0)
printk(KERN_DEBUG "bus %02x -> pxm %d -> node %d\n",
busnum, pxm, node);
dev_printk(KERN_DEBUG, &bus->dev,
"on NUMA node %d (pxm %d)\n", node, pxm);
#else
printk(KERN_DEBUG "bus %02x -> node %d\n",
busnum, node);
dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
#endif
}

Просмотреть файл

@ -551,17 +551,25 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
if ((err = pci_enable_resources(dev, mask)) < 0)
return err;
if (!dev->msi_enabled)
if (!pci_dev_msi_enabled(dev))
return pcibios_enable_irq(dev);
return 0;
}
void pcibios_disable_device (struct pci_dev *dev)
{
if (!dev->msi_enabled && pcibios_disable_irq)
if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
pcibios_disable_irq(dev);
}
int pci_ext_cfg_avail(struct pci_dev *dev)
{
if (raw_pci_ext_ops)
return 1;
else
return 0;
}
struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
{
struct pci_bus *bus = NULL;

Просмотреть файл

@ -129,7 +129,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
pr = pci_find_parent_resource(dev, r);
if (!r->start || !pr ||
request_resource(pr, r) < 0) {
dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
@ -170,7 +170,7 @@ static void __init pcibios_allocate_resources(int pass)
r->flags, disabled, pass);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
/* We'll assign a new address later */
r->end -= r->start;
r->start = 0;

Просмотреть файл

@ -12,6 +12,7 @@ static __init int pci_arch_init(void)
type = pci_direct_probe();
#endif
if (!(pci_probe & PCI_PROBE_NOEARLY))
pci_mmcfg_early_init();
#ifdef CONFIG_PCI_OLPC

Просмотреть файл

@ -533,7 +533,7 @@ static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
{
struct pci_dev *bridge;
int pin = pci_get_interrupt_pin(dev, &bridge);
return pcibios_set_irq_routing(bridge, pin, irq);
return pcibios_set_irq_routing(bridge, pin - 1, irq);
}
#endif
@ -887,7 +887,6 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
dev_dbg(&dev->dev, "no interrupt pin\n");
return 0;
}
pin = pin - 1;
/* Find IRQ routing entry */
@ -897,17 +896,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
info = pirq_get_info(dev);
if (!info) {
dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n",
'A' + pin);
'A' + pin - 1);
return 0;
}
pirq = info->irq[pin].link;
mask = info->irq[pin].bitmap;
pirq = info->irq[pin - 1].link;
mask = info->irq[pin - 1].bitmap;
if (!pirq) {
dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin);
dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1);
return 0;
}
dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x",
'A' + pin, pirq, mask, pirq_table->exclusive_irqs);
'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs);
mask &= pcibios_irq_mask;
/* Work around broken HP Pavilion Notebooks which assign USB to
@ -949,7 +948,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
newirq = i;
}
}
dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq);
dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq);
/* Check if it is hardcoded */
if ((pirq & 0xf0) == 0xf0) {
@ -977,18 +976,18 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
return 0;
}
}
dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq);
dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq);
/* Update IRQ for all devices with the same pirq value */
while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
if (!pin)
continue;
pin--;
info = pirq_get_info(dev2);
if (!info)
continue;
if (info->irq[pin].link == pirq) {
if (info->irq[pin - 1].link == pirq) {
/*
* We refuse to override the dev->irq
* information. Give a warning!
@ -1042,6 +1041,9 @@ static void __init pcibios_fixup_irqs(void)
dev = NULL;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (!pin)
continue;
#ifdef CONFIG_X86_IO_APIC
/*
* Recalculate IRQ numbers if we use the I/O APIC.
@ -1049,15 +1051,11 @@ static void __init pcibios_fixup_irqs(void)
if (io_apic_assign_pci_irqs) {
int irq;
if (!pin)
continue;
/*
* interrupt pins are numbered starting from 1
*/
pin--;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
PCI_SLOT(dev->devfn), pin);
PCI_SLOT(dev->devfn), pin - 1);
/*
* Busses behind bridges are typically not listed in the
* MP-table. In this case we have to look up the IRQ
@ -1070,22 +1068,22 @@ static void __init pcibios_fixup_irqs(void)
struct pci_dev *bridge = dev->bus->self;
int bus;
pin = (pin + PCI_SLOT(dev->devfn)) % 4;
pin = pci_swizzle_interrupt_pin(dev, pin);
bus = bridge->bus->number;
irq = IO_APIC_get_PCI_irq_vector(bus,
PCI_SLOT(bridge->devfn), pin);
PCI_SLOT(bridge->devfn), pin - 1);
if (irq >= 0)
dev_warn(&dev->dev,
"using bridge %s INT %c to "
"get IRQ %d\n",
pci_name(bridge),
'A' + pin, irq);
'A' + pin - 1, irq);
}
if (irq >= 0) {
dev_info(&dev->dev,
"PCI->APIC IRQ transform: INT %c "
"-> IRQ %d\n",
'A' + pin, irq);
'A' + pin - 1, irq);
dev->irq = irq;
}
}
@ -1093,7 +1091,7 @@ static void __init pcibios_fixup_irqs(void)
/*
* Still no IRQ? Try to lookup one...
*/
if (pin && !dev->irq)
if (!dev->irq)
pcibios_lookup_irq(dev, 0);
}
}
@ -1220,12 +1218,10 @@ static int pirq_enable_irq(struct pci_dev *dev)
if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
char *msg = "";
pin--; /* interrupt pins are numbered starting from 1 */
if (io_apic_assign_pci_irqs) {
int irq;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1);
/*
* Busses behind bridges are typically not listed in the MP-table.
* In this case we have to look up the IRQ based on the parent bus,
@ -1236,20 +1232,20 @@ static int pirq_enable_irq(struct pci_dev *dev)
while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
struct pci_dev *bridge = dev->bus->self;
pin = (pin + PCI_SLOT(dev->devfn)) % 4;
pin = pci_swizzle_interrupt_pin(dev, pin);
irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
PCI_SLOT(bridge->devfn), pin);
PCI_SLOT(bridge->devfn), pin - 1);
if (irq >= 0)
dev_warn(&dev->dev, "using bridge %s "
"INT %c to get IRQ %d\n",
pci_name(bridge), 'A' + pin,
pci_name(bridge), 'A' + pin - 1,
irq);
dev = bridge;
}
dev = temp_dev;
if (irq >= 0) {
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin, irq);
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
dev->irq = irq;
return 0;
} else
@ -1268,7 +1264,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n",
'A' + pin, msg);
'A' + pin - 1, msg);
}
return 0;
}

Просмотреть файл

@ -24,24 +24,6 @@ static void pci_visws_disable_irq(struct pci_dev *dev) { }
unsigned int pci_bus0, pci_bus1;
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin - 1) + slot) % 4) + 1;
}
static u8 __init visws_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
while (dev->bus->self) { /* Move up the chain of bridges. */
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
dev = dev->bus->self;
}
*pinp = pin;
return PCI_SLOT(dev->devfn);
}
static int __init visws_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
int irq, bus = dev->bus->number;
@ -106,7 +88,7 @@ int __init pci_visws_init(void)
raw_pci_ops = &pci_direct_conf1;
pci_scan_bus_with_sysdata(pci_bus0);
pci_scan_bus_with_sysdata(pci_bus1);
pci_fixup_irqs(visws_swizzle, visws_map_irq);
pci_fixup_irqs(pci_common_swizzle, visws_map_irq);
pcibios_resource_survey();
return 0;
}

Просмотреть файл

@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@ -193,6 +194,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
unsigned long long value = 0;
acpi_handle handle = NULL;
struct acpi_device *child;
u32 flags, base_flags;
if (!device)
@ -210,6 +212,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
device->ops.bind = acpi_pci_bind;
/*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
*/
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
pci_acpi_osc_support(device->handle, flags);
/*
* Segment
* -------
@ -335,6 +344,17 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
list_for_each_entry(child, &device->children, node)
acpi_pci_bridge_scan(child);
/* Indicate support for various _OSC capabilities. */
if (pci_ext_cfg_avail(root->bus->self))
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
if (pcie_aspm_enabled())
flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
if (pci_msi_enabled())
flags |= OSC_MSI_SUPPORT;
if (flags != base_flags)
pci_acpi_osc_support(device->handle, flags);
end:
if (result) {
if (!list_empty(&root->node))

Просмотреть файл

@ -4807,7 +4807,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
}
err = pci_request_selected_regions(pdev,
err = pci_request_selected_regions_exclusive(pdev,
pci_select_bars(pdev, IORESOURCE_MEM),
e1000e_driver_name);
if (err)

Просмотреть файл

@ -1403,9 +1403,9 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
}
/* Disable both devices */
pci_disable_device(efx->pci_dev);
pci_clear_master(efx->pci_dev);
if (FALCON_IS_DUAL_FUNC(efx))
pci_disable_device(nic_data->pci_dev2);
pci_clear_master(nic_data->pci_dev2);
falcon_disable_interrupts(efx);
if (++n_int_errors < FALCON_MAX_INT_ERRORS) {

Просмотреть файл

@ -547,7 +547,7 @@ dino_card_fixup(struct pci_dev *dev)
** The additional "-1" adjusts for skewing the IRQ<->slot.
*/
dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin);
dev->irq = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
dev->irq = pci_swizzle_interrupt_pin(dev, irq_pin) - 1;
/* Shouldn't really need to do this but it's in case someone tries
** to bypass PCI services and look at the card themselves.
@ -672,7 +672,7 @@ dino_fixup_bus(struct pci_bus *bus)
dino_cfg_read(dev->bus, dev->devfn,
PCI_INTERRUPT_PIN, 1, &irq_pin);
irq_pin = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
irq_pin = pci_swizzle_interrupt_pin(dev, irq_pin) - 1;
printk(KERN_WARNING "Device %s has undefined IRQ, "
"setting to %d\n", pci_name(dev), irq_pin);
dino_cfg_write(dev->bus, dev->devfn,

Просмотреть файл

@ -519,8 +519,7 @@ iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev)
**
** Advantage is it's really easy to implement.
*/
intr_pin = ((intr_pin-1)+PCI_SLOT(pcidev->devfn)) % 4;
intr_pin++; /* convert back to INTA-D (1-4) */
intr_pin = pci_swizzle_interrupt_pin(pcidev, intr_pin);
#endif /* PCI_BRIDGE_FUNCS */
/*

Просмотреть файл

@ -42,6 +42,15 @@ config PCI_DEBUG
When in doubt, say N.
config PCI_STUB
tristate "PCI Stub driver"
depends on PCI
help
Say Y or M here if you want be able to reserve a PCI device
when it is going to be assigned to a guest operating system.
When in doubt, say N.
config HT_IRQ
bool "Interrupts on hypertransport devices"
default y

Просмотреть файл

@ -53,6 +53,8 @@ obj-$(CONFIG_HOTPLUG) += setup-bus.o
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
ifeq ($(CONFIG_PCI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif

Просмотреть файл

@ -66,6 +66,39 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
/**
* pci_read_vpd - Read one entry from Vital Product Data
* @dev: pci device struct
* @pos: offset in vpd space
* @count: number of bytes to read
* @buf: pointer to where to store result
*
*/
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
{
if (!dev->vpd || !dev->vpd->ops)
return -ENODEV;
return dev->vpd->ops->read(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_read_vpd);
/**
* pci_write_vpd - Write entry to Vital Product Data
* @dev: pci device struct
* @pos: offset in vpd space
* @count: number of bytes to read
* @val: value to write
*
*/
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
{
if (!dev->vpd || !dev->vpd->ops)
return -ENODEV;
return dev->vpd->ops->write(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_write_vpd);
/*
* The following routines are to prevent the user from accessing PCI config
* space when it's unsafe to do so. Some devices require this during BIST and
@ -133,125 +166,145 @@ PCI_USER_WRITE_CONFIG(dword, u32)
struct pci_vpd_pci22 {
struct pci_vpd base;
spinlock_t lock; /* controls access to hardware and the flags */
u8 cap;
struct mutex lock;
u16 flag;
bool busy;
bool flag; /* value of F bit to wait for */
u8 cap;
};
/* Wait for last operation to complete */
/*
* Wait for last operation to complete.
* This code has to spin since there is no other notification from the PCI
* hardware. Since the VPD is often implemented by serial attachment to an
* EEPROM, it may take many milliseconds to complete.
*/
static int pci_vpd_pci22_wait(struct pci_dev *dev)
{
struct pci_vpd_pci22 *vpd =
container_of(dev->vpd, struct pci_vpd_pci22, base);
u16 flag, status;
int wait;
unsigned long timeout = jiffies + HZ/20 + 2;
u16 status;
int ret;
if (!vpd->busy)
return 0;
flag = vpd->flag ? PCI_VPD_ADDR_F : 0;
wait = vpd->flag ? 10 : 1000; /* read: 100 us; write: 10 ms */
for (;;) {
ret = pci_user_read_config_word(dev,
vpd->cap + PCI_VPD_ADDR,
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
if (ret)
return ret;
if ((status & PCI_VPD_ADDR_F) == flag) {
if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
vpd->busy = false;
return 0;
}
if (wait-- == 0)
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
if (fatal_signal_pending(current))
return -EINTR;
if (!cond_resched())
udelay(10);
}
}
static int pci_vpd_pci22_read(struct pci_dev *dev, int pos, int size,
char *buf)
static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
struct pci_vpd_pci22 *vpd =
container_of(dev->vpd, struct pci_vpd_pci22, base);
u32 val;
int ret;
int begin, end, i;
loff_t end = pos + count;
u8 *buf = arg;
if (pos < 0 || pos > vpd->base.len || size > vpd->base.len - pos)
if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
return -EINVAL;
if (size == 0)
return 0;
spin_lock_irq(&vpd->lock);
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
ret = pci_vpd_pci22_wait(dev);
if (ret < 0)
goto out;
while (pos < end) {
u32 val;
unsigned int i, skip;
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
pos & ~3);
if (ret < 0)
goto out;
break;
vpd->busy = true;
vpd->flag = 1;
vpd->flag = PCI_VPD_ADDR_F;
ret = pci_vpd_pci22_wait(dev);
if (ret < 0)
goto out;
ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA,
&val);
out:
spin_unlock_irq(&vpd->lock);
if (ret < 0)
return ret;
break;
/* Convert to bytes */
begin = pos & 3;
end = min(4, begin + size);
for (i = 0; i < end; ++i) {
if (i >= begin)
ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
if (ret < 0)
break;
skip = pos & 3;
for (i = 0; i < sizeof(u32); i++) {
if (i >= skip) {
*buf++ = val;
if (++pos == end)
break;
}
val >>= 8;
}
return end - begin;
}
out:
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
static int pci_vpd_pci22_write(struct pci_dev *dev, int pos, int size,
const char *buf)
static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
struct pci_vpd_pci22 *vpd =
container_of(dev->vpd, struct pci_vpd_pci22, base);
u32 val;
int ret;
const u8 *buf = arg;
loff_t end = pos + count;
int ret = 0;
if (pos < 0 || pos > vpd->base.len || pos & 3 ||
size > vpd->base.len - pos || size < 4)
if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
return -EINVAL;
val = (u8) *buf++;
val |= ((u8) *buf++) << 8;
val |= ((u8) *buf++) << 16;
val |= ((u32)(u8) *buf++) << 24;
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
spin_lock_irq(&vpd->lock);
ret = pci_vpd_pci22_wait(dev);
if (ret < 0)
goto out;
ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA,
val);
while (pos < end) {
u32 val;
val = *buf++;
val |= *buf++ << 8;
val |= *buf++ << 16;
val |= *buf++ << 24;
ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
if (ret < 0)
goto out;
break;
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
pos | PCI_VPD_ADDR_F);
if (ret < 0)
goto out;
break;
vpd->busy = true;
vpd->flag = 0;
ret = pci_vpd_pci22_wait(dev);
out:
spin_unlock_irq(&vpd->lock);
if (ret < 0)
return ret;
return 4;
pos += sizeof(u32);
}
out:
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
static void pci_vpd_pci22_release(struct pci_dev *dev)
@ -259,7 +312,7 @@ static void pci_vpd_pci22_release(struct pci_dev *dev)
kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
}
static struct pci_vpd_ops pci_vpd_pci22_ops = {
static const struct pci_vpd_ops pci_vpd_pci22_ops = {
.read = pci_vpd_pci22_read,
.write = pci_vpd_pci22_write,
.release = pci_vpd_pci22_release,
@ -279,13 +332,36 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
vpd->base.len = PCI_VPD_PCI22_SIZE;
vpd->base.ops = &pci_vpd_pci22_ops;
spin_lock_init(&vpd->lock);
mutex_init(&vpd->lock);
vpd->cap = cap;
vpd->busy = false;
dev->vpd = &vpd->base;
return 0;
}
/**
* pci_vpd_truncate - Set available Vital Product Data size
* @dev: pci device struct
* @size: available memory in bytes
*
* Adjust size of available VPD area.
*/
int pci_vpd_truncate(struct pci_dev *dev, size_t size)
{
if (!dev->vpd)
return -EINVAL;
/* limited by the access method */
if (size > dev->vpd->len)
return -EINVAL;
dev->vpd->len = size;
dev->vpd->attr->size = size;
return 0;
}
EXPORT_SYMBOL(pci_vpd_truncate);
/**
* pci_block_user_cfg_access - Block userspace PCI config reads/writes
* @dev: pci device struct

Просмотреть файл

@ -71,7 +71,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
}
/**
* add a single device
* pci_bus_add_device - add a single device
* @dev: device to add
*
* This adds a single pci device to the global
@ -90,6 +90,37 @@ int pci_bus_add_device(struct pci_dev *dev)
return 0;
}
/**
* pci_bus_add_child - add a child bus
* @bus: bus to add
*
* This adds sysfs entries for a single bus
*/
int pci_bus_add_child(struct pci_bus *bus)
{
int retval;
if (bus->bridge)
bus->dev.parent = bus->bridge;
retval = device_register(&bus->dev);
if (retval)
return retval;
bus->is_added = 1;
retval = device_create_file(&bus->dev, &dev_attr_cpuaffinity);
if (retval)
return retval;
retval = device_create_file(&bus->dev, &dev_attr_cpulistaffinity);
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(bus);
return retval;
}
/**
* pci_bus_add_devices - insert newly discovered PCI devices
* @bus: bus to check for new devices
@ -105,7 +136,7 @@ int pci_bus_add_device(struct pci_dev *dev)
void pci_bus_add_devices(struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
struct pci_bus *child;
int retval;
list_for_each_entry(dev, &bus->devices, bus_list) {
@ -120,45 +151,29 @@ void pci_bus_add_devices(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
BUG_ON(!dev->is_added);
child = dev->subordinate;
/*
* If there is an unattached subordinate bus, attach
* it and then scan for unattached PCI devices.
*/
if (dev->subordinate) {
if (list_empty(&dev->subordinate->node)) {
if (!child)
continue;
if (list_empty(&child->node)) {
down_write(&pci_bus_sem);
list_add_tail(&dev->subordinate->node,
&dev->bus->children);
list_add_tail(&child->node, &dev->bus->children);
up_write(&pci_bus_sem);
}
pci_bus_add_devices(dev->subordinate);
pci_bus_add_devices(child);
/* register the bus with sysfs as the parent is now
* properly registered. */
child_bus = dev->subordinate;
if (child_bus->is_added)
/*
* register the bus with sysfs as the parent is now
* properly registered.
*/
if (child->is_added)
continue;
child_bus->dev.parent = child_bus->bridge;
retval = device_register(&child_bus->dev);
retval = pci_bus_add_child(child);
if (retval)
dev_err(&dev->dev, "Error registering pci_bus,"
" continuing...\n");
else {
child_bus->is_added = 1;
retval = device_create_file(&child_bus->dev,
&dev_attr_cpuaffinity);
}
if (retval)
dev_err(&dev->dev, "Error creating cpuaffinity"
" file, continuing...\n");
retval = device_create_file(&child_bus->dev,
&dev_attr_cpulistaffinity);
if (retval)
dev_err(&dev->dev,
"Error creating cpulistaffinity"
" file, continuing...\n");
}
dev_err(&dev->dev, "Error adding bus, continuing\n");
}
}

Просмотреть файл

@ -55,6 +55,9 @@ pciehp-objs := pciehp_core.o \
pciehp_ctrl.o \
pciehp_pci.o \
pciehp_hpc.o
ifdef CONFIG_ACPI
pciehp-objs += pciehp_acpi.o
endif
shpchp-objs := shpchp_core.o \
shpchp_ctrl.o \

Просмотреть файл

@ -501,5 +501,74 @@ int acpi_root_bridge(acpi_handle handle)
}
EXPORT_SYMBOL_GPL(acpi_root_bridge);
static int is_ejectable(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
unsigned long long removable;
status = acpi_get_handle(handle, "_ADR", &tmp);
if (ACPI_FAILURE(status))
return 0;
status = acpi_get_handle(handle, "_EJ0", &tmp);
if (ACPI_SUCCESS(status))
return 1;
status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
if (ACPI_SUCCESS(status) && removable)
return 1;
return 0;
}
/**
* acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot
* @pbus: the PCI bus of the PCI slot corresponding to 'handle'
* @handle: ACPI handle to check
*
* Return 1 if handle is ejectable PCI slot, 0 otherwise.
*/
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
{
acpi_handle bridge_handle, parent_handle;
if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus)))
return 0;
if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle))))
return 0;
if (bridge_handle != parent_handle)
return 0;
return is_ejectable(handle);
}
EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable);
static acpi_status
check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *found = (int *)context;
if (is_ejectable(handle)) {
*found = 1;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
/**
* acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots
* @pbus - PCI bus to scan
*
* Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise.
*/
int acpi_pci_detect_ejectable(struct pci_bus *pbus)
{
acpi_handle handle;
int found = 0;
if (!(handle = acpi_pci_get_bridge_handle(pbus)))
return 0;
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
check_hotplug, (void *)&found, NULL);
return found;
}
EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable);
module_param(debug_acpi, bool, 0644);
MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not");

Просмотреть файл

@ -46,6 +46,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/pci-acpi.h>
#include <linux/mutex.h>
#include "../pci.h"
@ -62,61 +63,6 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
/*
* initialization & terminatation routines
*/
/**
* is_ejectable - determine if a slot is ejectable
* @handle: handle to acpi namespace
*
* Ejectable slot should satisfy at least these conditions:
*
* 1. has _ADR method
* 2. has _EJ0 method
*
* optionally
*
* 1. has _STA method
* 2. has _PS0 method
* 3. has _PS3 method
* 4. ..
*/
static int is_ejectable(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
status = acpi_get_handle(handle, "_ADR", &tmp);
if (ACPI_FAILURE(status)) {
return 0;
}
status = acpi_get_handle(handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status)) {
return 0;
}
return 1;
}
/* callback routine to check for the existence of ejectable slots */
static acpi_status
is_ejectable_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *count = (int *)context;
if (is_ejectable(handle)) {
(*count)++;
/* only one ejectable slot is enough */
return AE_CTRL_TERMINATE;
} else {
return AE_OK;
}
}
/* callback routine to check for the existence of a pci dock device */
static acpi_status
is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
@ -131,9 +77,6 @@ is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
}
}
/*
* the _DCK method can do funny things... and sometimes not
* hah-hah funny.
@ -184,17 +127,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
acpi_status status = AE_OK;
unsigned long long adr, sun;
int device, function, retval;
struct pci_bus *pbus = bridge->pci_bus;
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status))
return AE_OK;
status = acpi_get_handle(handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status) && !(is_dock_device(handle)))
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
@ -205,7 +143,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
INIT_LIST_HEAD(&newfunc->sibling);
newfunc->handle = handle;
newfunc->function = function;
if (ACPI_SUCCESS(status))
if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
newfunc->flags = FUNC_HAS_EJ0;
if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp)))
@ -256,8 +195,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
bridge->nr_slots++;
dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
slot->sun, pci_domain_nr(bridge->pci_bus),
bridge->pci_bus->number, slot->device);
slot->sun, pci_domain_nr(pbus), pbus->number, device);
retval = acpiphp_register_hotplug_slot(slot);
if (retval) {
if (retval == -EBUSY)
@ -274,8 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
list_add_tail(&newfunc->sibling, &slot->funcs);
/* associate corresponding pci_dev */
newfunc->pci_dev = pci_get_slot(bridge->pci_bus,
PCI_DEVFN(device, function));
newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (newfunc->pci_dev) {
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
}
@ -324,27 +261,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
/* see if it's worth looking at this bridge */
static int detect_ejectable_slots(acpi_handle *bridge_handle)
static int detect_ejectable_slots(struct pci_bus *pbus)
{
acpi_status status;
int count;
count = 0;
/* only check slots defined directly below bridge object */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
is_ejectable_slot, (void *)&count, NULL);
/*
* we also need to add this bridge if there is a dock bridge or
* other pci device on a dock station (removable)
*/
if (!count)
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle,
(u32)1, is_pci_dock_device, (void *)&count,
NULL);
return count;
int found = acpi_pci_detect_ejectable(pbus);
if (!found) {
acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus);
acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
is_pci_dock_device, (void *)&found, NULL);
}
return found;
}
@ -554,7 +479,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
goto out;
/* check if this bridge has ejectable slots */
if ((detect_ejectable_slots(handle) > 0)) {
if ((detect_ejectable_slots(dev->subordinate) > 0)) {
dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
add_p2p_bridge(handle, dev);
}
@ -615,7 +540,7 @@ static int add_bridge(acpi_handle handle)
}
/* check if this bridge has ejectable slots */
if (detect_ejectable_slots(handle) > 0) {
if (detect_ejectable_slots(pci_bus) > 0) {
dbg("found PCI host-bus bridge with hot-pluggable slots\n");
add_host_bridge(handle, pci_bus);
}

Просмотреть файл

@ -271,7 +271,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
dbg("%s: generationg bus event\n", __func__);
acpi_bus_generate_proc_event(note->device, note->event, detail);
acpi_bus_generate_netlink_event(note->device->pnp.device_class,
note->device->dev.bus_id,
dev_name(&note->device->dev),
note->event, detail);
} else
note->event = event;

Просмотреть файл

@ -1954,7 +1954,7 @@ void cpqhp_pushbutton_thread(unsigned long slot)
return ;
}
if (func != NULL && ctrl != NULL) {
if (ctrl != NULL) {
if (cpqhp_process_SI(ctrl, func) != 0) {
amber_LED_on(ctrl, hp_slot);
green_LED_off(ctrl, hp_slot);
@ -2604,7 +2604,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
for (cloop = 0; cloop < 4; cloop++) {
if (irqs.valid_INT & (0x01 << cloop)) {
rc = cpqhp_set_irq(func->bus, func->device,
0x0A + cloop, irqs.interrupt[cloop]);
cloop + 1, irqs.interrupt[cloop]);
if (rc)
goto free_and_out;
}
@ -2945,7 +2945,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
}
if (!behind_bridge) {
rc = cpqhp_set_irq(func->bus, func->device, temp_byte + 0x09, IRQ);
rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ);
if (rc)
return 1;
} else {

Просмотреть файл

@ -171,7 +171,7 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
fakebus->number = bus_num;
dbg("%s: dev %d, bus %d, pin %d, num %d\n",
__func__, dev_num, bus_num, int_pin, irq_num);
rc = pcibios_set_irq_routing(fakedev, int_pin - 0x0a, irq_num);
rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num);
kfree(fakedev);
kfree(fakebus);
dbg("%s: rc %d\n", __func__, rc);

Просмотреть файл

@ -324,6 +324,7 @@ static int disable_slot(struct hotplug_slot *slot)
if (test_and_set_bit(0, &dslot->removed)) {
dbg("Slot already scheduled for removal\n");
pci_dev_put(dev);
return -ENODEV;
}

Просмотреть файл

@ -220,11 +220,23 @@ struct hpc_ops {
#include <acpi/actypes.h>
#include <linux/pci-acpi.h>
extern void __init pciehp_acpi_slot_detection_init(void);
extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
static inline void pciehp_firmware_init(void)
{
pciehp_acpi_slot_detection_init();
}
static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
int retval;
u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
return acpi_get_hp_hw_control_from_firmware(dev, flags);
retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
if (retval)
return retval;
return pciehp_acpi_slot_detection_check(dev);
}
static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
@ -235,6 +247,7 @@ static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
return 0;
}
#else
#define pciehp_firmware_init() do {} while (0)
#define pciehp_get_hp_hw_control_from_firmware(dev) 0
#define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV)
#endif /* CONFIG_ACPI */

Просмотреть файл

@ -0,0 +1,141 @@
/*
* ACPI related functions for PCI Express Hot Plug driver.
*
* Copyright (C) 2008 Kenji Kaneshige
* Copyright (C) 2008 Fujitsu Limited.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include "pciehp.h"
#define PCIEHP_DETECT_PCIE (0)
#define PCIEHP_DETECT_ACPI (1)
#define PCIEHP_DETECT_AUTO (2)
#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO
static int slot_detection_mode;
static char *pciehp_detect_mode;
module_param(pciehp_detect_mode, charp, 0444);
MODULE_PARM_DESC(pciehp_detect_mode,
"Slot detection mode: pcie, acpi, auto\n"
" pcie - Use PCIe based slot detection\n"
" acpi - Use ACPI for slot detection\n"
" auto(default) - Auto select mode. Use acpi option if duplicate\n"
" slot ids are found. Otherwise, use pcie option\n");
int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
if (slot_detection_mode != PCIEHP_DETECT_ACPI)
return 0;
if (acpi_pci_detect_ejectable(dev->subordinate))
return 0;
return -ENODEV;
}
static int __init parse_detect_mode(void)
{
if (!pciehp_detect_mode)
return PCIEHP_DETECT_DEFAULT;
if (!strcmp(pciehp_detect_mode, "pcie"))
return PCIEHP_DETECT_PCIE;
if (!strcmp(pciehp_detect_mode, "acpi"))
return PCIEHP_DETECT_ACPI;
if (!strcmp(pciehp_detect_mode, "auto"))
return PCIEHP_DETECT_AUTO;
warn("bad specifier '%s' for pciehp_detect_mode. Use default\n",
pciehp_detect_mode);
return PCIEHP_DETECT_DEFAULT;
}
static struct pcie_port_service_id __initdata port_pci_ids[] = {
{
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.port_type = PCIE_ANY_PORT,
.service_type = PCIE_PORT_SERVICE_HP,
.driver_data = 0,
}, { /* end: all zeroes */ }
};
static int __initdata dup_slot_id;
static int __initdata acpi_slot_detected;
static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
/* Dummy driver for dumplicate name detection */
static int __init dummy_probe(struct pcie_device *dev,
const struct pcie_port_service_id *id)
{
int pos;
u32 slot_cap;
struct slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
struct pci_bus *pbus = pdev->subordinate;
if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL)))
return -ENOMEM;
/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
if (pciehp_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
return -ENODEV;
pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
slot->number = slot_cap >> 19;
list_for_each_entry(tmp, &dummy_slots, slot_list) {
if (tmp->number == slot->number)
dup_slot_id++;
}
list_add_tail(&slot->slot_list, &dummy_slots);
if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus))
acpi_slot_detected = 1;
return -ENODEV; /* dummy driver always returns error */
}
static struct pcie_port_service_driver __initdata dummy_driver = {
.name = "pciehp_dummy",
.id_table = port_pci_ids,
.probe = dummy_probe,
};
static int __init select_detection_mode(void)
{
struct slot *slot, *tmp;
pcie_port_service_register(&dummy_driver);
pcie_port_service_unregister(&dummy_driver);
list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) {
list_del(&slot->slot_list);
kfree(slot);
}
if (acpi_slot_detected && dup_slot_id)
return PCIEHP_DETECT_ACPI;
return PCIEHP_DETECT_PCIE;
}
void __init pciehp_acpi_slot_detection_init(void)
{
slot_detection_mode = parse_detect_mode();
if (slot_detection_mode != PCIEHP_DETECT_AUTO)
goto out;
slot_detection_mode = select_detection_mode();
out:
if (slot_detection_mode == PCIEHP_DETECT_ACPI)
info("Using ACPI for slot detection.\n");
}

Просмотреть файл

@ -522,6 +522,7 @@ static int __init pcied_init(void)
{
int retval = 0;
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");

Просмотреть файл

@ -178,14 +178,13 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
"Issue of Slot Power Off command failed\n");
return;
}
}
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
* removed from the slot/adapter.
*/
msleep(1000);
}
if (PWR_LED(ctrl))
pslot->hpc_ops->green_led_off(pslot);
@ -286,14 +285,13 @@ static int remove_board(struct slot *p_slot)
"Issue of Slot Disable command failed\n");
return retval;
}
}
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
* removed from the slot/adapter.
*/
msleep(1000);
}
if (PWR_LED(ctrl))
/* turn off Green LED */

Просмотреть файл

@ -42,42 +42,6 @@
static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
struct ctrl_reg {
u8 cap_id;
u8 nxt_ptr;
u16 cap_reg;
u32 dev_cap;
u16 dev_ctrl;
u16 dev_status;
u32 lnk_cap;
u16 lnk_ctrl;
u16 lnk_status;
u32 slot_cap;
u16 slot_ctrl;
u16 slot_status;
u16 root_ctrl;
u16 rsvp;
u32 root_status;
} __attribute__ ((packed));
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
PCIECAPID = offsetof(struct ctrl_reg, cap_id),
NXTCAPPTR = offsetof(struct ctrl_reg, nxt_ptr),
CAPREG = offsetof(struct ctrl_reg, cap_reg),
DEVCAP = offsetof(struct ctrl_reg, dev_cap),
DEVCTRL = offsetof(struct ctrl_reg, dev_ctrl),
DEVSTATUS = offsetof(struct ctrl_reg, dev_status),
LNKCAP = offsetof(struct ctrl_reg, lnk_cap),
LNKCTRL = offsetof(struct ctrl_reg, lnk_ctrl),
LNKSTATUS = offsetof(struct ctrl_reg, lnk_status),
SLOTCAP = offsetof(struct ctrl_reg, slot_cap),
SLOTCTRL = offsetof(struct ctrl_reg, slot_ctrl),
SLOTSTATUS = offsetof(struct ctrl_reg, slot_status),
ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl),
ROOTSTATUS = offsetof(struct ctrl_reg, root_status),
};
static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
{
struct pci_dev *dev = ctrl->pci_dev;
@ -102,95 +66,9 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
}
/* Field definitions in PCI Express Capabilities Register */
#define CAP_VER 0x000F
#define DEV_PORT_TYPE 0x00F0
#define SLOT_IMPL 0x0100
#define MSG_NUM 0x3E00
/* Device or Port Type */
#define NAT_ENDPT 0x00
#define LEG_ENDPT 0x01
#define ROOT_PORT 0x04
#define UP_STREAM 0x05
#define DN_STREAM 0x06
#define PCIE_PCI_BRDG 0x07
#define PCI_PCIE_BRDG 0x10
/* Field definitions in Device Capabilities Register */
#define DATTN_BUTTN_PRSN 0x1000
#define DATTN_LED_PRSN 0x2000
#define DPWR_LED_PRSN 0x4000
/* Field definitions in Link Capabilities Register */
#define MAX_LNK_SPEED 0x000F
#define MAX_LNK_WIDTH 0x03F0
#define LINK_ACTIVE_REPORTING 0x00100000
/* Link Width Encoding */
#define LNK_X1 0x01
#define LNK_X2 0x02
#define LNK_X4 0x04
#define LNK_X8 0x08
#define LNK_X12 0x0C
#define LNK_X16 0x10
#define LNK_X32 0x20
/*Field definitions of Link Status Register */
#define LNK_SPEED 0x000F
#define NEG_LINK_WD 0x03F0
#define LNK_TRN_ERR 0x0400
#define LNK_TRN 0x0800
#define SLOT_CLK_CONF 0x1000
#define LINK_ACTIVE 0x2000
/* Field definitions in Slot Capabilities Register */
#define ATTN_BUTTN_PRSN 0x00000001
#define PWR_CTRL_PRSN 0x00000002
#define MRL_SENS_PRSN 0x00000004
#define ATTN_LED_PRSN 0x00000008
#define PWR_LED_PRSN 0x00000010
#define HP_SUPR_RM_SUP 0x00000020
#define HP_CAP 0x00000040
#define SLOT_PWR_VALUE 0x000003F8
#define SLOT_PWR_LIMIT 0x00000C00
#define PSN 0xFFF80000 /* PSN: Physical Slot Number */
/* Field definitions in Slot Control Register */
#define ATTN_BUTTN_ENABLE 0x0001
#define PWR_FAULT_DETECT_ENABLE 0x0002
#define MRL_DETECT_ENABLE 0x0004
#define PRSN_DETECT_ENABLE 0x0008
#define CMD_CMPL_INTR_ENABLE 0x0010
#define HP_INTR_ENABLE 0x0020
#define ATTN_LED_CTRL 0x00C0
#define PWR_LED_CTRL 0x0300
#define PWR_CTRL 0x0400
#define EMI_CTRL 0x0800
/* Attention indicator and Power indicator states */
#define LED_ON 0x01
#define LED_BLINK 0x10
#define LED_OFF 0x11
/* Power Control Command */
#define POWER_ON 0
#define POWER_OFF 0x0400
/* EMI Status defines */
#define EMI_DISENGAGED 0
#define EMI_ENGAGED 1
/* Field definitions in Slot Status Register */
#define ATTN_BUTTN_PRESSED 0x0001
#define PWR_FAULT_DETECTED 0x0002
#define MRL_SENS_CHANGED 0x0004
#define PRSN_DETECT_CHANGED 0x0008
#define CMD_COMPLETED 0x0010
#define MRL_STATE 0x0020
#define PRSN_STATE 0x0040
#define EMI_STATE 0x0080
#define EMI_STATUS_BIT 7
#define POWER_OFF PCI_EXP_SLTCTL_PCC
static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
@ -253,24 +131,22 @@ static inline void pciehp_free_irq(struct controller *ctrl)
static int pcie_poll_cmd(struct controller *ctrl)
{
u16 slot_status;
int timeout = 1000;
int err, timeout = 1000;
if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
if (slot_status & CMD_COMPLETED) {
pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
return 1;
}
}
while (timeout > 0) {
msleep(10);
timeout -= 10;
if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
if (slot_status & CMD_COMPLETED) {
pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
return 1;
}
}
}
return 0; /* timeout */
}
@ -302,14 +178,14 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
mutex_lock(&ctrl->ctrl_lock);
retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
goto out;
}
if (slot_status & CMD_COMPLETED) {
if (slot_status & PCI_EXP_SLTSTA_CC) {
if (!ctrl->no_cmd_complete) {
/*
* After 1 sec and CMD_COMPLETED still not set, just
@ -332,7 +208,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
}
}
retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
goto out;
@ -342,7 +218,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
smp_mb();
retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl);
if (retval)
ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
@ -356,8 +232,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
* completed interrupt is not enabled, we need to poll
* command completed event.
*/
if (!(slot_ctrl & HP_INTR_ENABLE) ||
!(slot_ctrl & CMD_CMPL_INTR_ENABLE))
if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
!(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
poll = 1;
pcie_wait_cmd(ctrl, poll);
}
@ -370,9 +246,9 @@ static inline int check_link_active(struct controller *ctrl)
{
u16 link_status;
if (pciehp_readw(ctrl, LNKSTATUS, &link_status))
if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &link_status))
return 0;
return !!(link_status & LINK_ACTIVE);
return !!(link_status & PCI_EXP_LNKSTA_DLLLA);
}
static void pcie_wait_link_active(struct controller *ctrl)
@ -412,15 +288,15 @@ static int hpc_check_lnk_status(struct controller *ctrl)
} else
msleep(1000);
retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
return retval;
}
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) ||
!(lnk_status & NEG_LINK_WD)) {
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
ctrl_err(ctrl, "Link Training Error occurs \n");
retval = -1;
return retval;
@ -436,16 +312,16 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
u8 atten_led_state;
int retval = 0;
retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
return retval;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6;
atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
switch (atten_led_state) {
case 0:
@ -475,15 +351,15 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
u8 pwr_state;
int retval = 0;
retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
return retval;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
pwr_state = (slot_ctrl & PWR_CTRL) >> 10;
pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
switch (pwr_state) {
case 0:
@ -504,17 +380,15 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
int retval = 0;
int retval;
retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
return retval;
}
*status = (((slot_status & MRL_STATE) >> 5) == 0) ? 0 : 1;
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
return 0;
}
@ -522,18 +396,15 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
u8 card_state;
int retval = 0;
int retval;
retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
return retval;
}
card_state = (u8)((slot_status & PRSN_STATE) >> 6);
*status = (card_state == 1) ? 1 : 0;
*status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
return 0;
}
@ -541,32 +412,28 @@ static int hpc_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
u8 pwr_fault;
int retval = 0;
int retval;
retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "Cannot check for power fault\n");
return retval;
}
pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
return pwr_fault;
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
static int hpc_get_emi_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
int retval = 0;
int retval;
retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "Cannot check EMI status\n");
return retval;
}
*status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
*status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
return retval;
}
@ -576,8 +443,8 @@ static int hpc_toggle_emi(struct slot *slot)
u16 cmd_mask;
int rc;
slot_cmd = EMI_CTRL;
cmd_mask = EMI_CTRL;
slot_cmd = PCI_EXP_SLTCTL_EIC;
cmd_mask = PCI_EXP_SLTCTL_EIC;
rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
slot->last_emi_toggle = get_seconds();
@ -591,7 +458,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
u16 cmd_mask;
int rc;
cmd_mask = ATTN_LED_CTRL;
cmd_mask = PCI_EXP_SLTCTL_AIC;
switch (value) {
case 0 : /* turn off */
slot_cmd = 0x00C0;
@ -607,7 +474,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
}
rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
return rc;
}
@ -619,10 +486,10 @@ static void hpc_set_green_led_on(struct slot *slot)
u16 cmd_mask;
slot_cmd = 0x0100;
cmd_mask = PWR_LED_CTRL;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
static void hpc_set_green_led_off(struct slot *slot)
@ -632,10 +499,10 @@ static void hpc_set_green_led_off(struct slot *slot)
u16 cmd_mask;
slot_cmd = 0x0300;
cmd_mask = PWR_LED_CTRL;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
static void hpc_set_green_led_blink(struct slot *slot)
@ -645,10 +512,10 @@ static void hpc_set_green_led_blink(struct slot *slot)
u16 cmd_mask;
slot_cmd = 0x0200;
cmd_mask = PWR_LED_CTRL;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
static int hpc_power_on_slot(struct slot * slot)
@ -662,15 +529,15 @@ static int hpc_power_on_slot(struct slot * slot)
ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
/* Clear sticky power-fault bit from previous power failures */
retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
__func__);
return retval;
}
slot_status &= PWR_FAULT_DETECTED;
slot_status &= PCI_EXP_SLTSTA_PFD;
if (slot_status) {
retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status);
retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status);
if (retval) {
ctrl_err(ctrl,
"%s: Cannot write to SLOTSTATUS register\n",
@ -680,13 +547,13 @@ static int hpc_power_on_slot(struct slot * slot)
}
slot_cmd = POWER_ON;
cmd_mask = PWR_CTRL;
cmd_mask = PCI_EXP_SLTCTL_PCC;
/* Enable detection that we turned off at slot power-off time */
if (!pciehp_poll_mode) {
slot_cmd |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
PRSN_DETECT_ENABLE);
cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
PRSN_DETECT_ENABLE);
slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
PCI_EXP_SLTCTL_PDCE);
cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
PCI_EXP_SLTCTL_PDCE);
}
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@ -696,7 +563,7 @@ static int hpc_power_on_slot(struct slot * slot)
return -1;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
return retval;
}
@ -753,7 +620,7 @@ static int hpc_power_off_slot(struct slot * slot)
changed = pcie_mask_bad_dllp(ctrl);
slot_cmd = POWER_OFF;
cmd_mask = PWR_CTRL;
cmd_mask = PCI_EXP_SLTCTL_PCC;
/*
* If we get MRL or presence detect interrupts now, the isr
* will notice the sticky power-fault bit too and issue power
@ -762,10 +629,10 @@ static int hpc_power_off_slot(struct slot * slot)
* till the slot is powered on again.
*/
if (!pciehp_poll_mode) {
slot_cmd &= ~(PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
PRSN_DETECT_ENABLE);
cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
PRSN_DETECT_ENABLE);
slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
PCI_EXP_SLTCTL_PDCE);
cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
PCI_EXP_SLTCTL_PDCE);
}
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@ -775,7 +642,7 @@ static int hpc_power_off_slot(struct slot * slot)
goto out;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
__func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
out:
if (changed)
pcie_unmask_bad_dllp(ctrl);
@ -796,19 +663,19 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
*/
intr_loc = 0;
do {
if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) {
if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) {
ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
__func__);
return IRQ_NONE;
}
detected &= (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED |
MRL_SENS_CHANGED | PRSN_DETECT_CHANGED |
CMD_COMPLETED);
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC);
intr_loc |= detected;
if (!intr_loc)
return IRQ_NONE;
if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) {
ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
__func__);
return IRQ_NONE;
@ -818,31 +685,31 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
/* Check Command Complete Interrupt Pending */
if (intr_loc & CMD_COMPLETED) {
if (intr_loc & PCI_EXP_SLTSTA_CC) {
ctrl->cmd_busy = 0;
smp_mb();
wake_up(&ctrl->queue);
}
if (!(intr_loc & ~CMD_COMPLETED))
if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
return IRQ_HANDLED;
p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
/* Check MRL Sensor Changed */
if (intr_loc & MRL_SENS_CHANGED)
if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
pciehp_handle_switch_change(p_slot);
/* Check Attention Button Pressed */
if (intr_loc & ATTN_BUTTN_PRESSED)
if (intr_loc & PCI_EXP_SLTSTA_ABP)
pciehp_handle_attention_button(p_slot);
/* Check Presence Detect Changed */
if (intr_loc & PRSN_DETECT_CHANGED)
if (intr_loc & PCI_EXP_SLTSTA_PDC)
pciehp_handle_presence_change(p_slot);
/* Check Power Fault Detected */
if (intr_loc & PWR_FAULT_DETECTED)
if (intr_loc & PCI_EXP_SLTSTA_PFD)
pciehp_handle_power_fault(p_slot);
return IRQ_HANDLED;
@ -855,7 +722,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
u32 lnk_cap;
int retval = 0;
retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
return retval;
@ -884,13 +751,13 @@ static int hpc_get_max_lnk_width(struct slot *slot,
u32 lnk_cap;
int retval = 0;
retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
return retval;
}
switch ((lnk_cap & 0x03F0) >> 4){
switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){
case 0:
lnk_wdth = PCIE_LNK_WIDTH_RESRV;
break;
@ -933,14 +800,14 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
int retval = 0;
u16 lnk_status;
retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
__func__);
return retval;
}
switch (lnk_status & 0x0F) {
switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
case 1:
lnk_speed = PCIE_2PT5GB;
break;
@ -963,14 +830,14 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
int retval = 0;
u16 lnk_status;
retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
__func__);
return retval;
}
switch ((lnk_status & 0x03F0) >> 4){
switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){
case 0:
lnk_wdth = PCIE_LNK_WIDTH_RESRV;
break;
@ -1036,18 +903,19 @@ int pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
cmd = PRSN_DETECT_ENABLE;
cmd = PCI_EXP_SLTCTL_PDCE;
if (ATTN_BUTTN(ctrl))
cmd |= ATTN_BUTTN_ENABLE;
cmd |= PCI_EXP_SLTCTL_ABPE;
if (POWER_CTRL(ctrl))
cmd |= PWR_FAULT_DETECT_ENABLE;
cmd |= PCI_EXP_SLTCTL_PFDE;
if (MRL_SENS(ctrl))
cmd |= MRL_DETECT_ENABLE;
cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
if (pcie_write_cmd(ctrl, cmd, mask)) {
ctrl_err(ctrl, "Cannot enable software notification\n");
@ -1059,8 +927,9 @@ int pcie_enable_notification(struct controller *ctrl)
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
if (pcie_write_cmd(ctrl, 0, mask))
ctrl_warn(ctrl, "Cannot disable software notification\n");
}
@ -1157,9 +1026,9 @@ static inline void dbg_ctrl(struct controller *ctrl)
EMI(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Command Completed : %3s\n",
NO_CMD_CMPL(ctrl) ? "no" : "yes");
pciehp_readw(ctrl, SLOTSTATUS, &reg16);
pciehp_readw(ctrl, PCI_EXP_SLTSTA, &reg16);
ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
pciehp_readw(ctrl, SLOTCTRL, &reg16);
pciehp_readw(ctrl, PCI_EXP_SLTCTL, &reg16);
ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
}
@ -1183,7 +1052,7 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl_err(ctrl, "Cannot find PCI Express capability\n");
goto abort_ctrl;
}
if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) {
ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
goto abort_ctrl;
}
@ -1208,17 +1077,17 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->no_cmd_complete = 1;
/* Check if Data Link Layer Link Active Reporting is implemented */
if (pciehp_readl(ctrl, LNKCAP, &link_cap)) {
if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) {
ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
goto abort_ctrl;
}
if (link_cap & LINK_ACTIVE_REPORTING) {
if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
ctrl_dbg(ctrl, "Link Active Reporting supported\n");
ctrl->link_active_reporting = 1;
}
/* Clear all remaining event bits in Slot Status register */
if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
goto abort_ctrl;
/* Disable sotfware notification */

Просмотреть файл

@ -15,7 +15,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
dev_printk(KERN_ERR, &pdev->dev,
"Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
parent->dev.bus_id, parent->vendor, parent->device);
dev_name(&parent->dev), parent->vendor, parent->device);
dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason);
dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
WARN_ON(1);

Просмотреть файл

@ -776,28 +776,19 @@ void pci_no_msi(void)
pci_msi_enable = 0;
}
/**
* pci_msi_enabled - is MSI enabled?
*
* Returns true if MSI has not been disabled by the command-line option
* pci=nomsi.
**/
int pci_msi_enabled(void)
{
return pci_msi_enable;
}
EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
INIT_LIST_HEAD(&dev->msi_list);
}
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
static void __devinit msi_acpi_init(void)
{
if (acpi_pci_disabled)
return;
pci_osc_support_set(OSC_MSI_SUPPORT);
pcie_osc_support_set(OSC_MSI_SUPPORT);
}
#else
static inline void msi_acpi_init(void) { }
#endif /* CONFIG_ACPI */
void __devinit msi_init(void)
{
if (!pci_msi_enable)
return;
msi_acpi_init();
}

Просмотреть файл

@ -24,13 +24,14 @@ struct acpi_osc_data {
acpi_handle handle;
u32 support_set;
u32 control_set;
u32 control_query;
int is_queried;
struct list_head sibiling;
};
static LIST_HEAD(acpi_osc_data_list);
struct acpi_osc_args {
u32 capbuf[3];
u32 ctrl_result;
};
static DEFINE_MUTEX(pci_acpi_lock);
@ -56,7 +57,7 @@ static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
static acpi_status acpi_run_osc(acpi_handle handle,
struct acpi_osc_args *osc_args)
struct acpi_osc_args *osc_args, u32 *retval)
{
acpi_status status;
struct acpi_object_list input;
@ -112,8 +113,7 @@ static acpi_status acpi_run_osc(acpi_handle handle,
goto out_kfree;
}
out_success:
osc_args->ctrl_result =
*((u32 *)(out_obj->buffer.pointer + 8));
*retval = *((u32 *)(out_obj->buffer.pointer + 8));
status = AE_OK;
out_kfree:
@ -121,11 +121,10 @@ out_kfree:
return status;
}
static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
u32 *result)
static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data)
{
acpi_status status;
u32 support_set;
u32 support_set, result;
struct acpi_osc_args osc_args;
/* do _OSC query for all possible controls */
@ -134,56 +133,45 @@ static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
status = acpi_run_osc(osc_data->handle, &osc_args);
status = acpi_run_osc(osc_data->handle, &osc_args, &result);
if (ACPI_SUCCESS(status)) {
osc_data->support_set = support_set;
*result = osc_args.ctrl_result;
osc_data->control_query = result;
osc_data->is_queried = 1;
}
return status;
}
static acpi_status acpi_query_osc(acpi_handle handle,
u32 level, void *context, void **retval)
/*
* pci_acpi_osc_support: Invoke _OSC indicating support for the given feature
* @flags: Bitmask of flags to support
*
* See the ACPI spec for the definition of the flags
*/
int pci_acpi_osc_support(acpi_handle handle, u32 flags)
{
acpi_status status;
struct acpi_osc_data *osc_data;
u32 flags = (unsigned long)context, dummy;
acpi_handle tmp;
struct acpi_osc_data *osc_data;
int rc = 0;
status = acpi_get_handle(handle, "_OSC", &tmp);
if (ACPI_FAILURE(status))
return AE_OK;
return -ENOTTY;
mutex_lock(&pci_acpi_lock);
osc_data = acpi_get_osc_data(handle);
if (!osc_data) {
printk(KERN_ERR "acpi osc data array is full\n");
rc = -ENOMEM;
goto out;
}
__acpi_query_osc(flags, osc_data, &dummy);
__acpi_query_osc(flags, osc_data);
out:
mutex_unlock(&pci_acpi_lock);
return AE_OK;
}
/**
* __pci_osc_support_set - register OS support to Firmware
* @flags: OS support bits
* @hid: hardware ID
*
* Update OS support fields and doing a _OSC Query to obtain an update
* from Firmware on supported control bits.
**/
acpi_status __pci_osc_support_set(u32 flags, const char *hid)
{
if (!(flags & OSC_SUPPORT_MASKS))
return AE_TYPE;
acpi_get_devices(hid, acpi_query_osc,
(void *)(unsigned long)flags, NULL);
return AE_OK;
return rc;
}
/**
@ -196,7 +184,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
{
acpi_status status;
u32 ctrlset, control_set, result;
u32 control_req, control_set, result;
acpi_handle tmp;
struct acpi_osc_data *osc_data;
struct acpi_osc_args osc_args;
@ -213,28 +201,34 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
goto out;
}
ctrlset = (flags & OSC_CONTROL_MASKS);
if (!ctrlset) {
control_req = (flags & OSC_CONTROL_MASKS);
if (!control_req) {
status = AE_TYPE;
goto out;
}
status = __acpi_query_osc(osc_data->support_set, osc_data, &result);
if (ACPI_FAILURE(status))
/* No need to evaluate _OSC if the control was already granted. */
if ((osc_data->control_set & control_req) == control_req)
goto out;
if ((result & ctrlset) != ctrlset) {
if (!osc_data->is_queried) {
status = __acpi_query_osc(osc_data->support_set, osc_data);
if (ACPI_FAILURE(status))
goto out;
}
if ((osc_data->control_query & control_req) != control_req) {
status = AE_SUPPORT;
goto out;
}
control_set = osc_data->control_set | ctrlset;
control_set = osc_data->control_set | control_req;
osc_args.capbuf[OSC_QUERY_TYPE] = 0;
osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
status = acpi_run_osc(handle, &osc_args);
status = acpi_run_osc(handle, &osc_args, &result);
if (ACPI_SUCCESS(status))
osc_data->control_set = control_set;
osc_data->control_set = result;
out:
mutex_unlock(&pci_acpi_lock);
return status;
@ -375,7 +369,7 @@ static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
* The string should be the same as root bridge's name
* Please look at 'pci_scan_bus_parented'
*/
num = sscanf(dev->bus_id, "pci%04x:%02x", &seg, &bus);
num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus);
if (num != 2)
return -ENODEV;
*handle = acpi_get_pci_rootbridge_handle(seg, bus);

Просмотреть файл

@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/cpu.h>
#include "pci.h"
/*
@ -48,7 +49,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
subdevice=PCI_ANY_ID, class=0, class_mask=0;
unsigned long driver_data=0;
int fields=0;
int retval;
int retval=0;
fields = sscanf(buf, "%x %x %x %x %x %x %lx",
&vendor, &device, &subvendor, &subdevice,
@ -58,6 +59,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
/* Only accept driver_data values that match an existing id_table
entry */
if (ids) {
retval = -EINVAL;
while (ids->vendor || ids->subvendor || ids->class_mask) {
if (driver_data == ids->driver_data) {
@ -68,6 +70,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
}
if (retval) /* No match */
return retval;
}
dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
if (!dynid)
@ -183,32 +186,43 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
return pci_match_id(drv->id_table, dev);
}
struct drv_dev_and_id {
struct pci_driver *drv;
struct pci_dev *dev;
const struct pci_device_id *id;
};
static long local_pci_probe(void *_ddi)
{
struct drv_dev_and_id *ddi = _ddi;
return ddi->drv->probe(ddi->dev, ddi->id);
}
static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
const struct pci_device_id *id)
{
int error;
#ifdef CONFIG_NUMA
/* Execute driver initialization on node where the
device's bus is attached to. This way the driver likely
allocates its local memory on the right node without
any need to change it. */
struct mempolicy *oldpol;
cpumask_t oldmask = current->cpus_allowed;
int node = dev_to_node(&dev->dev);
int error, node;
struct drv_dev_and_id ddi = { drv, dev, id };
/* Execute driver initialization on node where the device's
bus is attached to. This way the driver likely allocates
its local memory on the right node without any need to
change it. */
node = dev_to_node(&dev->dev);
if (node >= 0) {
int cpu;
node_to_cpumask_ptr(nodecpumask, node);
set_cpus_allowed_ptr(current, nodecpumask);
}
/* And set default memory allocation policy */
oldpol = current->mempolicy;
current->mempolicy = NULL; /* fall back to system default policy */
#endif
error = drv->probe(dev, id);
#ifdef CONFIG_NUMA
set_cpus_allowed_ptr(current, &oldmask);
current->mempolicy = oldpol;
#endif
get_online_cpus();
cpu = cpumask_any_and(nodecpumask, cpu_online_mask);
if (cpu < nr_cpu_ids)
error = work_on_cpu(cpu, local_pci_probe, &ddi);
else
error = local_pci_probe(&ddi);
put_online_cpus();
} else
error = local_pci_probe(&ddi);
return error;
}
@ -300,21 +314,12 @@ static void pci_device_shutdown(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
struct pci_driver *drv = pci_dev->driver;
return drv && (drv->suspend || drv->suspend_late || drv->resume
|| drv->resume_early);
}
/*
* Default "suspend" method for devices that have no driver provided suspend,
* or not even a driver at all.
* or not even a driver at all (second part).
*/
static void pci_default_pm_suspend(struct pci_dev *pci_dev)
static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
{
pci_save_state(pci_dev);
/*
* mark its power state as "unknown", since we don't know if
* e.g. the BIOS will change its device state when we suspend.
@ -323,21 +328,11 @@ static void pci_default_pm_suspend(struct pci_dev *pci_dev)
pci_dev->current_state = PCI_UNKNOWN;
}
/*
* Default "resume" method for devices that have no driver provided resume,
* or not even a driver at all (first part).
*/
static void pci_default_pm_resume_early(struct pci_dev *pci_dev)
{
/* restore the PCI config space */
pci_restore_state(pci_dev);
}
/*
* Default "resume" method for devices that have no driver provided resume,
* or not even a driver at all (second part).
*/
static int pci_default_pm_resume_late(struct pci_dev *pci_dev)
static int pci_pm_reenable_device(struct pci_dev *pci_dev)
{
int retval;
@ -363,8 +358,16 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
i = drv->suspend(pci_dev, state);
suspend_report_result(drv->suspend, i);
} else {
pci_default_pm_suspend(pci_dev);
pci_save_state(pci_dev);
/*
* This is for compatibility with existing code with legacy PM
* support.
*/
pci_pm_set_unknown_state(pci_dev);
}
pci_fixup_device(pci_fixup_suspend, pci_dev);
return i;
}
@ -381,32 +384,130 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
return i;
}
static int pci_legacy_resume(struct device *dev)
{
int error;
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
if (drv && drv->resume) {
error = drv->resume(pci_dev);
} else {
pci_default_pm_resume_early(pci_dev);
error = pci_default_pm_resume_late(pci_dev);
}
return error;
}
static int pci_legacy_resume_early(struct device *dev)
{
int error = 0;
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
if (drv && drv->resume_early)
error = drv->resume_early(pci_dev);
return error;
}
static int pci_legacy_resume(struct device *dev)
{
int error;
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (drv && drv->resume) {
error = drv->resume(pci_dev);
} else {
/* restore the PCI config space */
pci_restore_state(pci_dev);
error = pci_pm_reenable_device(pci_dev);
}
return error;
}
/* Auxiliary functions used by the new power management framework */
static int pci_restore_standard_config(struct pci_dev *pci_dev)
{
struct pci_dev *parent = pci_dev->bus->self;
int error = 0;
/* Check if the device's bus is operational */
if (!parent || parent->current_state == PCI_D0) {
pci_restore_state(pci_dev);
pci_update_current_state(pci_dev, PCI_D0);
} else {
dev_warn(&pci_dev->dev, "unable to restore config, "
"bridge %s in low power state D%d\n", pci_name(parent),
parent->current_state);
pci_dev->current_state = PCI_UNKNOWN;
error = -EAGAIN;
}
return error;
}
static bool pci_is_bridge(struct pci_dev *pci_dev)
{
return !!(pci_dev->subordinate);
}
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
{
if (pci_restore_standard_config(pci_dev))
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
static int pci_pm_default_resume(struct pci_dev *pci_dev)
{
/*
* pci_restore_standard_config() should have been called once already,
* but it would have failed if the device's parent bridge had not been
* in power state D0 at that time. Check it and try again if necessary.
*/
if (pci_dev->current_state == PCI_UNKNOWN) {
int error = pci_restore_standard_config(pci_dev);
if (error)
return error;
}
pci_fixup_device(pci_fixup_resume, pci_dev);
if (!pci_is_bridge(pci_dev))
pci_enable_wake(pci_dev, PCI_D0, false);
return pci_pm_reenable_device(pci_dev);
}
static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev)
{
/* If device is enabled at this point, disable it */
pci_disable_enabled_device(pci_dev);
/*
* Save state with interrupts enabled, because in principle the bus the
* device is on may be put into a low power state after this code runs.
*/
pci_save_state(pci_dev);
}
static void pci_pm_default_suspend(struct pci_dev *pci_dev)
{
pci_pm_default_suspend_generic(pci_dev);
if (!pci_is_bridge(pci_dev))
pci_prepare_to_sleep(pci_dev);
pci_fixup_device(pci_fixup_suspend, pci_dev);
}
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
struct pci_driver *drv = pci_dev->driver;
bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
|| drv->resume_early);
/*
* Legacy PM support is used by default, so warn if the new framework is
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
WARN_ON(ret && drv->driver.pm);
return ret;
}
/* New power management framework */
static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
@ -434,15 +535,16 @@ static int pci_pm_suspend(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->suspend) {
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
if (drv && drv->pm && drv->pm->suspend) {
error = drv->pm->suspend(dev);
suspend_report_result(drv->pm->suspend, error);
}
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend(dev, PMSG_SUSPEND);
}
pci_fixup_device(pci_fixup_suspend, pci_dev);
if (!error)
pci_pm_default_suspend(pci_dev);
return error;
}
@ -453,36 +555,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->suspend_noirq) {
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
if (drv && drv->pm && drv->pm->suspend_noirq) {
error = drv->pm->suspend_noirq(dev);
suspend_report_result(drv->pm->suspend_noirq, error);
}
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend_late(dev, PMSG_SUSPEND);
} else {
pci_default_pm_suspend(pci_dev);
}
return error;
}
static int pci_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (drv && drv->pm) {
if (drv->pm->resume)
error = drv->pm->resume(dev);
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume(dev);
} else {
error = pci_default_pm_resume_late(pci_dev);
}
if (!error)
pci_pm_set_unknown_state(pci_dev);
return error;
}
@ -493,17 +575,31 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev));
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
if (drv && drv->pm) {
if (drv->pm->resume_noirq)
pci_pm_default_resume_noirq(pci_dev);
if (drv && drv->pm && drv->pm->resume_noirq)
error = drv->pm->resume_noirq(dev);
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume_early(dev);
} else {
pci_default_pm_resume_early(pci_dev);
return error;
}
static int pci_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
error = pci_pm_default_resume(pci_dev);
if (!error && drv && drv->pm && drv->pm->resume)
error = drv->pm->resume(dev);
return error;
}
@ -524,15 +620,16 @@ static int pci_pm_freeze(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->freeze) {
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_FREEZE);
if (drv && drv->pm && drv->pm->freeze) {
error = drv->pm->freeze(dev);
suspend_report_result(drv->pm->freeze, error);
}
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend(dev, PMSG_FREEZE);
pci_fixup_device(pci_fixup_suspend, pci_dev);
}
if (!error)
pci_pm_default_suspend_generic(pci_dev);
return error;
}
@ -543,33 +640,16 @@ static int pci_pm_freeze_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->freeze_noirq) {
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
if (drv && drv->pm && drv->pm->freeze_noirq) {
error = drv->pm->freeze_noirq(dev);
suspend_report_result(drv->pm->freeze_noirq, error);
}
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend_late(dev, PMSG_FREEZE);
} else {
pci_default_pm_suspend(pci_dev);
}
return error;
}
static int pci_pm_thaw(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->thaw)
error = drv->pm->thaw(dev);
} else if (pci_has_legacy_pm_support(pci_dev)) {
pci_fixup_device(pci_fixup_resume, pci_dev);
error = pci_legacy_resume(dev);
}
if (!error)
pci_pm_set_unknown_state(pci_dev);
return error;
}
@ -580,14 +660,31 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->thaw_noirq)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
pci_update_current_state(pci_dev, PCI_D0);
if (drv && drv->pm && drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
} else if (pci_has_legacy_pm_support(pci_dev)) {
pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev));
error = pci_legacy_resume_early(dev);
return error;
}
static int pci_pm_thaw(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
pci_pm_reenable_device(pci_dev);
if (drv && drv->pm && drv->pm->thaw)
error = drv->pm->thaw(dev);
return error;
}
@ -597,16 +694,16 @@ static int pci_pm_poweroff(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_suspend, pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_HIBERNATE);
if (drv && drv->pm) {
if (drv->pm->poweroff) {
if (drv && drv->pm && drv->pm->poweroff) {
error = drv->pm->poweroff(dev);
suspend_report_result(drv->pm->poweroff, error);
}
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend(dev, PMSG_HIBERNATE);
}
if (!error)
pci_pm_default_suspend(pci_dev);
return error;
}
@ -616,33 +713,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->poweroff_noirq) {
if (pci_has_legacy_pm_support(to_pci_dev(dev)))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
if (drv && drv->pm && drv->pm->poweroff_noirq) {
error = drv->pm->poweroff_noirq(dev);
suspend_report_result(drv->pm->poweroff_noirq, error);
}
} else if (pci_has_legacy_pm_support(to_pci_dev(dev))) {
error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
}
return error;
}
static int pci_pm_restore(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->restore)
error = drv->pm->restore(dev);
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume(dev);
} else {
error = pci_default_pm_resume_late(pci_dev);
}
pci_fixup_device(pci_fixup_resume, pci_dev);
return error;
}
@ -653,17 +730,30 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
if (drv && drv->pm) {
if (drv->pm->restore_noirq)
pci_pm_default_resume_noirq(pci_dev);
if (drv && drv->pm && drv->pm->restore_noirq)
error = drv->pm->restore_noirq(dev);
} else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume_early(dev);
} else {
pci_default_pm_resume_early(pci_dev);
return error;
}
pci_fixup_device(pci_fixup_resume_early, pci_dev);
static int pci_pm_restore(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
error = pci_pm_default_resume(pci_dev);
if (!error && drv && drv->pm && drv->pm->restore)
error = drv->pm->restore(dev);
return error;
}

47
drivers/pci/pci-stub.c Normal file
Просмотреть файл

@ -0,0 +1,47 @@
/* pci-stub - simple stub driver to reserve a pci device
*
* Copyright (C) 2008 Red Hat, Inc.
* Author:
* Chris Wright
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Usage is simple, allocate a new id to the stub driver and bind the
* device to it. For example:
*
* # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind
* # ls -l /sys/bus/pci/devices/0000:00:19.0/driver
* .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub
*/
#include <linux/module.h>
#include <linux/pci.h>
static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
return 0;
}
static struct pci_driver stub_driver = {
.name = "pci-stub",
.id_table = NULL, /* only dynamic id's */
.probe = pci_stub_probe,
};
static int __init pci_stub_init(void)
{
return pci_register_driver(&stub_driver);
}
static void __exit pci_stub_exit(void)
{
pci_unregister_driver(&stub_driver);
}
module_init(pci_stub_init);
module_exit(pci_stub_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");

Просмотреть файл

@ -58,13 +58,14 @@ static ssize_t broken_parity_status_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
ssize_t consumed = -EINVAL;
unsigned long val;
if ((count > 0) && (*buf == '0' || *buf == '1')) {
pdev->broken_parity_status = *buf == '1' ? 1 : 0;
consumed = count;
}
return consumed;
if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
pdev->broken_parity_status = !!val;
return count;
}
static ssize_t local_cpus_show(struct device *dev,
@ -101,11 +102,13 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
struct pci_dev * pci_dev = to_pci_dev(dev);
char * str = buf;
int i;
int max = 7;
int max;
resource_size_t start, end;
if (pci_dev->subordinate)
max = DEVICE_COUNT_RESOURCE;
else
max = PCI_BRIDGE_RESOURCES;
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
@ -133,19 +136,23 @@ static ssize_t is_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
ssize_t result = -EINVAL;
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
ssize_t result = strict_strtoul(buf, 0, &val);
if (result < 0)
return result;
/* this can crash the machine when done on the "wrong" device */
if (!capable(CAP_SYS_ADMIN))
return count;
return -EPERM;
if (*buf == '0') {
if (!val) {
if (atomic_read(&pdev->enable_cnt) != 0)
pci_disable_device(pdev);
else
result = -EIO;
} else if (*buf == '1')
} else
result = pci_enable_device(pdev);
return result < 0 ? result : count;
@ -185,25 +192,28 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
/* bad things may happen if the no_msi flag is changed
* while some drivers are loaded */
if (!capable(CAP_SYS_ADMIN))
return count;
return -EPERM;
/* Maybe pci devices without subordinate busses shouldn't even have this
* attribute in the first place? */
if (!pdev->subordinate)
return count;
if (*buf == '0') {
pdev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
dev_warn(&pdev->dev, "forced subordinate bus to not support MSI,"
" bad things could happen.\n");
}
/* Is the flag going to change, or keep the value it already had? */
if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^
!!val) {
pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI;
if (*buf == '1') {
pdev->subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
dev_warn(&pdev->dev, "forced subordinate bus to support MSI,"
" bad things could happen.\n");
dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI,"
" bad things could happen\n", val ? "" : " not");
}
return count;
@ -361,55 +371,33 @@ pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
pci_read_vpd(struct kobject *kobj, struct bin_attribute *bin_attr,
read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
to_pci_dev(container_of(kobj, struct device, kobj));
int end;
int ret;
if (off > bin_attr->size)
count = 0;
else if (count > bin_attr->size - off)
count = bin_attr->size - off;
end = off + count;
while (off < end) {
ret = dev->vpd->ops->read(dev, off, end - off, buf);
if (ret < 0)
return ret;
buf += ret;
off += ret;
}
return count;
return pci_read_vpd(dev, off, count, buf);
}
static ssize_t
pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr,
write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
to_pci_dev(container_of(kobj, struct device, kobj));
int end;
int ret;
if (off > bin_attr->size)
count = 0;
else if (count > bin_attr->size - off)
count = bin_attr->size - off;
end = off + count;
while (off < end) {
ret = dev->vpd->ops->write(dev, off, end - off, buf);
if (ret < 0)
return ret;
buf += ret;
off += ret;
}
return count;
return pci_write_vpd(dev, off, count, buf);
}
#ifdef HAVE_PCI_LEGACY
@ -569,7 +557,7 @@ void pci_remove_legacy_files(struct pci_bus *b)
#ifdef HAVE_PCI_MMAP
static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
{
unsigned long nr, start, size;
@ -620,6 +608,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start))
return -EINVAL;
return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
}
@ -832,8 +823,8 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
attr->size = dev->vpd->len;
attr->attr.name = "vpd";
attr->attr.mode = S_IRUSR | S_IWUSR;
attr->read = pci_read_vpd;
attr->write = pci_write_vpd;
attr->read = read_vpd_attr;
attr->write = write_vpd_attr;
retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
if (retval) {
kfree(dev->vpd->attr);

Просмотреть файл

@ -56,6 +56,22 @@ unsigned char pci_bus_max_busnr(struct pci_bus* bus)
}
EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
#ifdef CONFIG_HAS_IOMEM
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
/*
* Make sure the BAR is actually a memory resource, not an IO resource
*/
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
WARN_ON(1);
return NULL;
}
return ioremap_nocache(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
#endif
#if 0
/**
* pci_max_busnr - returns maximum PCI bus number
@ -360,25 +376,10 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
static void
pci_restore_bars(struct pci_dev *dev)
{
int i, numres;
int i;
switch (dev->hdr_type) {
case PCI_HEADER_TYPE_NORMAL:
numres = 6;
break;
case PCI_HEADER_TYPE_BRIDGE:
numres = 2;
break;
case PCI_HEADER_TYPE_CARDBUS:
numres = 1;
break;
default:
/* Should never get here, but just in case... */
return;
}
for (i = 0; i < numres; i ++)
pci_update_resource(dev, &dev->resource[i], i);
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
static struct pci_platform_pm_ops *pci_platform_pm;
@ -524,14 +525,17 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
* pci_update_current_state - Read PCI power state of given device from its
* PCI PM registers and cache it
* @dev: PCI device to handle.
* @state: State to cache in case the device doesn't have the PM capability
*/
static void pci_update_current_state(struct pci_dev *dev)
void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
{
if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
} else {
dev->current_state = state;
}
}
@ -574,7 +578,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
int ret = platform_pci_set_power_state(dev, PCI_D0);
if (!ret)
pci_update_current_state(dev);
pci_update_current_state(dev, PCI_D0);
}
/* This device is quirked not to be put into D3, so
don't put it in D3 */
@ -587,7 +591,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
/* Allow the platform to finalize the transition */
int ret = platform_pci_set_power_state(dev, state);
if (!ret) {
pci_update_current_state(dev);
pci_update_current_state(dev, state);
error = 0;
}
}
@ -640,19 +644,14 @@ static int pci_save_pcie_state(struct pci_dev *dev)
int pos, i = 0;
struct pci_cap_saved_state *save_state;
u16 *cap;
int found = 0;
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (pos <= 0)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
if (!save_state)
save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL);
else
found = 1;
if (!save_state) {
dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
return -ENOMEM;
}
cap = (u16 *)&save_state->data[0];
@ -661,9 +660,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
save_state->cap_nr = PCI_CAP_ID_EXP;
if (!found)
pci_add_saved_cap(dev, save_state);
return 0;
}
@ -688,30 +685,21 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
static int pci_save_pcix_state(struct pci_dev *dev)
{
int pos, i = 0;
int pos;
struct pci_cap_saved_state *save_state;
u16 *cap;
int found = 0;
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (pos <= 0)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
if (!save_state)
save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL);
else
found = 1;
if (!save_state) {
dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
return -ENOMEM;
}
cap = (u16 *)&save_state->data[0];
pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]);
save_state->cap_nr = PCI_CAP_ID_PCIX;
if (!found)
pci_add_saved_cap(dev, save_state);
pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
return 0;
}
@ -982,6 +970,32 @@ void pcim_pin_device(struct pci_dev *pdev)
*/
void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
static void do_pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
if (pci_command & PCI_COMMAND_MASTER) {
pci_command &= ~PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_command);
}
pcibios_disable_device(dev);
}
/**
* pci_disable_enabled_device - Disable device without updating enable_cnt
* @dev: PCI device to disable
*
* NOTE: This function is a backend of PCI power management routines and is
* not supposed to be called drivers.
*/
void pci_disable_enabled_device(struct pci_dev *dev)
{
if (atomic_read(&dev->enable_cnt))
do_pci_disable_device(dev);
}
/**
* pci_disable_device - Disable PCI device after use
* @dev: PCI device to be disabled
@ -996,7 +1010,6 @@ void
pci_disable_device(struct pci_dev *dev)
{
struct pci_devres *dr;
u16 pci_command;
dr = find_pci_dr(dev);
if (dr)
@ -1005,14 +1018,9 @@ pci_disable_device(struct pci_dev *dev)
if (atomic_sub_return(1, &dev->enable_cnt) != 0)
return;
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
if (pci_command & PCI_COMMAND_MASTER) {
pci_command &= ~PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_command);
}
dev->is_busmaster = 0;
do_pci_disable_device(dev);
pcibios_disable_device(dev);
dev->is_busmaster = 0;
}
/**
@ -1107,7 +1115,7 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
int error = 0;
bool pme_done = false;
if (!device_may_wakeup(&dev->dev))
if (enable && !device_may_wakeup(&dev->dev))
return -EINVAL;
/*
@ -1252,14 +1260,15 @@ void pci_pm_init(struct pci_dev *dev)
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm)
return;
goto Exit;
/* Check device's ability to generate PME# */
pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
return;
goto Exit;
}
dev->pm_cap = pm;
@ -1298,6 +1307,74 @@ void pci_pm_init(struct pci_dev *dev)
} else {
dev->pme_support = 0;
}
Exit:
pci_update_current_state(dev, PCI_D0);
}
/**
* platform_pci_wakeup_init - init platform wakeup if present
* @dev: PCI device
*
* Some devices don't have PCI PM caps but can still generate wakeup
* events through platform methods (like ACPI events). If @dev supports
* platform wakeup events, set the device flag to indicate as much. This
* may be redundant if the device also supports PCI PM caps, but double
* initialization should be safe in that case.
*/
void platform_pci_wakeup_init(struct pci_dev *dev)
{
if (!platform_pci_can_wakeup(dev))
return;
device_set_wakeup_capable(&dev->dev, true);
device_set_wakeup_enable(&dev->dev, false);
platform_pci_sleep_wake(dev, false);
}
/**
* pci_add_save_buffer - allocate buffer for saving given capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
* @size: requested size of the buffer
*/
static int pci_add_cap_save_buffer(
struct pci_dev *dev, char cap, unsigned int size)
{
int pos;
struct pci_cap_saved_state *save_state;
pos = pci_find_capability(dev, cap);
if (pos <= 0)
return 0;
save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
if (!save_state)
return -ENOMEM;
save_state->cap_nr = cap;
pci_add_saved_cap(dev, save_state);
return 0;
}
/**
* pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
* @dev: the PCI device
*/
void pci_allocate_cap_save_buffers(struct pci_dev *dev)
{
int error;
error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16));
if (error)
dev_err(&dev->dev,
"unable to preallocate PCI Express save buffer\n");
error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
if (error)
dev_err(&dev->dev,
"unable to preallocate PCI-X save buffer\n");
}
/**
@ -1337,6 +1414,20 @@ void pci_enable_ari(struct pci_dev *dev)
bridge->ari_enabled = 1;
}
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
*
* Perform INTx swizzling for a device behind one level of bridge. This is
* required by section 9.1 of the PCI-to-PCI bridge specification for devices
* behind bridges on add-in cards.
*/
u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
{
return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
}
int
pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
{
@ -1345,15 +1436,35 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
pin = dev->pin;
if (!pin)
return -1;
pin--;
while (dev->bus->self) {
pin = (pin + PCI_SLOT(dev->devfn)) % 4;
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
*bridge = dev;
return pin;
}
/**
* pci_common_swizzle - swizzle INTx all the way to root bridge
* @dev: the PCI device
* @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
*
* Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
* bridges all the way up to a PCI root bus.
*/
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
while (dev->bus->self) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
*pinp = pin;
return PCI_SLOT(dev->devfn);
}
/**
* pci_release_region - Release a PCI bar
* @pdev: PCI device whose resources were previously reserved by pci_request_region
@ -1395,7 +1506,8 @@ void pci_release_region(struct pci_dev *pdev, int bar)
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
int exclusive)
{
struct pci_devres *dr;
@ -1408,8 +1520,9 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
goto err_out;
}
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!request_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name))
if (!__request_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name,
exclusive))
goto err_out;
}
@ -1427,6 +1540,47 @@ err_out:
return -EBUSY;
}
/**
* pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource.
*
* Mark the PCI region associated with PCI device @pdev BR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, 0);
}
/**
* pci_request_region_exclusive - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource.
*
* Mark the PCI region associated with PCI device @pdev BR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*
* The key difference that _exclusive makes it that userspace is
* explicitly not allowed to map the resource via /dev/mem or
* sysfs.
*/
int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
}
/**
* pci_release_selected_regions - Release selected PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved
@ -1444,20 +1598,14 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
pci_release_region(pdev, i);
}
/**
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
* @res_name: Name to be associated with resource
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name)
int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name, int excl)
{
int i;
for (i = 0; i < 6; i++)
if (bars & (1 << i))
if(pci_request_region(pdev, i, res_name))
if (__pci_request_region(pdev, i, res_name, excl))
goto err_out;
return 0;
@ -1469,6 +1617,26 @@ err_out:
return -EBUSY;
}
/**
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
* @res_name: Name to be associated with resource
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name)
{
return __pci_request_selected_regions(pdev, bars, res_name, 0);
}
int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
int bars, const char *res_name)
{
return __pci_request_selected_regions(pdev, bars, res_name,
IORESOURCE_EXCLUSIVE);
}
/**
* pci_release_regions - Release reserved PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved by pci_request_regions
@ -1501,6 +1669,45 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
}
/**
* pci_request_regions_exclusive - Reserved PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @res_name: Name to be associated with resource.
*
* Mark all PCI regions associated with PCI device @pdev as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* pci_request_regions_exclusive() will mark the region so that
* /dev/mem and the sysfs MMIO access will not be allowed.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions_exclusive(pdev,
((1 << 6) - 1), res_name);
}
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
if (enable)
cmd = old_cmd | PCI_COMMAND_MASTER;
else
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
dev_dbg(&dev->dev, "%s bus mastering\n",
enable ? "enabling" : "disabling");
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = enable;
}
/**
* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
@ -1508,21 +1715,21 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
* Enables bus-mastering on the device and calls pcibios_set_master()
* to do the needed arch specific settings.
*/
void
pci_set_master(struct pci_dev *dev)
void pci_set_master(struct pci_dev *dev)
{
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (! (cmd & PCI_COMMAND_MASTER)) {
dev_dbg(&dev->dev, "enabling bus mastering\n");
cmd |= PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = 1;
__pci_set_master(dev, true);
pcibios_set_master(dev);
}
/**
* pci_clear_master - disables bus-mastering for device dev
* @dev: the PCI device to disable
*/
void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
#ifdef PCI_DISABLE_MWI
int pci_set_mwi(struct pci_dev *dev)
{
@ -1751,24 +1958,7 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
#endif
/**
* pci_execute_reset_function() - Reset a PCI device function
* @dev: Device function to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
* to PCI config space in order to use this function.
*
* The device function is presumed to be unused when this function is called.
* Resetting the device will make the contents of PCI configuration space
* random, so any caller of this must be prepared to reinitialise the
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
* Returns 0 if the device function was successfully reset or -ENOTTY if the
* device doesn't support resetting a single function.
*/
int pci_execute_reset_function(struct pci_dev *dev)
static int __pcie_flr(struct pci_dev *dev, int probe)
{
u16 status;
u32 cap;
@ -1780,6 +1970,9 @@ int pci_execute_reset_function(struct pci_dev *dev)
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
pci_block_user_cfg_access(dev);
/* Wait for Transaction Pending bit clean */
@ -1802,6 +1995,80 @@ int pci_execute_reset_function(struct pci_dev *dev)
pci_unblock_user_cfg_access(dev);
return 0;
}
static int __pci_af_flr(struct pci_dev *dev, int probe)
{
int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
u8 status;
u8 cap;
if (!cappos)
return -ENOTTY;
pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
if (probe)
return 0;
pci_block_user_cfg_access(dev);
/* Wait for Transaction Pending bit clean */
msleep(100);
pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
if (status & PCI_AF_STATUS_TP) {
dev_info(&dev->dev, "Busy after 100ms while trying to"
" reset; sleeping for 1 second\n");
ssleep(1);
pci_read_config_byte(dev,
cappos + PCI_AF_STATUS, &status);
if (status & PCI_AF_STATUS_TP)
dev_info(&dev->dev, "Still busy after 1s; "
"proceeding with reset anyway\n");
}
pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
mdelay(100);
pci_unblock_user_cfg_access(dev);
return 0;
}
static int __pci_reset_function(struct pci_dev *pdev, int probe)
{
int res;
res = __pcie_flr(pdev, probe);
if (res != -ENOTTY)
return res;
res = __pci_af_flr(pdev, probe);
if (res != -ENOTTY)
return res;
return res;
}
/**
* pci_execute_reset_function() - Reset a PCI device function
* @dev: Device function to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
* to PCI config space in order to use this function.
*
* The device function is presumed to be unused when this function is called.
* Resetting the device will make the contents of PCI configuration space
* random, so any caller of this must be prepared to reinitialise the
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
* Returns 0 if the device function was successfully reset or -ENOTTY if the
* device doesn't support resetting a single function.
*/
int pci_execute_reset_function(struct pci_dev *dev)
{
return __pci_reset_function(dev, 0);
}
EXPORT_SYMBOL_GPL(pci_execute_reset_function);
/**
@ -1822,15 +2089,10 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function);
*/
int pci_reset_function(struct pci_dev *dev)
{
u32 cap;
int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
int r;
int r = __pci_reset_function(dev, 1);
if (!exppos)
return -ENOTTY;
pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (r < 0)
return r;
if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
disable_irq(dev->irq);
@ -2022,6 +2284,28 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
return bars;
}
/**
* pci_resource_bar - get position of the BAR associated with a resource
* @dev: the PCI device
* @resno: the resource number
* @type: the BAR type to be filled in
*
* Returns BAR position in config space, or 0 if the BAR is invalid.
*/
int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
{
if (resno < PCI_ROM_RESOURCE) {
*type = pci_bar_unknown;
return PCI_BASE_ADDRESS_0 + 4 * resno;
} else if (resno == PCI_ROM_RESOURCE) {
*type = pci_bar_mem32;
return dev->rom_base_reg;
}
dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
return 0;
}
static void __devinit pci_no_domains(void)
{
#ifdef CONFIG_PCI_DOMAINS
@ -2029,6 +2313,19 @@ static void __devinit pci_no_domains(void)
#endif
}
/**
* pci_ext_cfg_enabled - can we access extended PCI config space?
* @dev: The PCI device of the root bridge.
*
* Returns 1 if we can access PCI extended config space (offsets
* greater than 0xff). This is the default implementation. Architecture
* implementations can override this.
*/
int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
{
return 1;
}
static int __devinit pci_init(void)
{
struct pci_dev *dev = NULL;
@ -2037,8 +2334,6 @@ static int __devinit pci_init(void)
pci_fixup_device(pci_fixup_final, dev);
}
msi_init();
return 0;
}
@ -2083,11 +2378,15 @@ EXPORT_SYMBOL(pci_find_capability);
EXPORT_SYMBOL(pci_bus_find_capability);
EXPORT_SYMBOL(pci_release_regions);
EXPORT_SYMBOL(pci_request_regions);
EXPORT_SYMBOL(pci_request_regions_exclusive);
EXPORT_SYMBOL(pci_release_region);
EXPORT_SYMBOL(pci_request_region);
EXPORT_SYMBOL(pci_request_region_exclusive);
EXPORT_SYMBOL(pci_release_selected_regions);
EXPORT_SYMBOL(pci_request_selected_regions);
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
EXPORT_SYMBOL(pci_set_master);
EXPORT_SYMBOL(pci_clear_master);
EXPORT_SYMBOL(pci_set_mwi);
EXPORT_SYMBOL(pci_try_set_mwi);
EXPORT_SYMBOL(pci_clear_mwi);

Просмотреть файл

@ -10,6 +10,10 @@ extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
struct vm_area_struct *vma);
#endif
/**
* Firmware PM callbacks
@ -40,7 +44,11 @@ struct pci_platform_pm_ops {
};
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
extern void pci_disable_enabled_device(struct pci_dev *dev);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
@ -50,14 +58,14 @@ extern int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
struct pci_vpd_ops {
int (*read)(struct pci_dev *dev, int pos, int size, char *buf);
int (*write)(struct pci_dev *dev, int pos, int size, const char *buf);
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
void (*release)(struct pci_dev *dev);
};
struct pci_vpd {
unsigned int len;
struct pci_vpd_ops *ops;
const struct pci_vpd_ops *ops;
struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
};
@ -98,11 +106,9 @@ extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
void pci_no_msi(void);
extern void pci_msi_init_pci_dev(struct pci_dev *dev);
extern void __devinit msi_init(void);
#else
static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
static inline void msi_init(void) { }
#endif
#ifdef CONFIG_PCIEAER
@ -159,16 +165,28 @@ struct pci_slot_attribute {
};
#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
enum pci_bar_type {
pci_bar_unknown, /* Standard PCI BAR probe */
pci_bar_io, /* An io port BAR */
pci_bar_mem32, /* A 32-bit memory BAR */
pci_bar_mem64, /* A 64-bit memory BAR */
};
extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
extern int pci_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
extern int pci_bus_add_child(struct pci_bus *bus);
extern void pci_enable_ari(struct pci_dev *dev);
/**
* pci_ari_enabled - query ARI forwarding status
* @dev: the PCI device
* @bus: the PCI bus
*
* Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
*/
static inline int pci_ari_enabled(struct pci_dev *dev)
static inline int pci_ari_enabled(struct pci_bus *bus)
{
return dev->ari_enabled;
return bus->self && bus->self->ari_enabled;
}
#endif /* DRIVERS_PCI_H */

Просмотреть файл

@ -38,7 +38,6 @@ int aer_osc_setup(struct pcie_device *pciedev)
handle = acpi_find_root_bridge_handle(pdev);
if (handle) {
pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT);
status = pci_osc_control_set(handle,
OSC_PCI_EXPRESS_AER_CONTROL |
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);

Просмотреть файл

@ -233,7 +233,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
if (info->flags & AER_TLP_HEADER_VALID_FLAG) {
unsigned char *tlp = (unsigned char *) &info->tlp;
printk("%sTLB Header:\n", loglevel);
printk("%sTLP Header:\n", loglevel);
printk("%s%02x%02x%02x%02x %02x%02x%02x%02x"
" %02x%02x%02x%02x %02x%02x%02x%02x\n",
loglevel,

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/pci-aspm.h>
#include "../pci.h"
@ -33,6 +34,11 @@ struct endpoint_state {
struct pcie_link_state {
struct list_head sibiling;
struct pci_dev *pdev;
bool downstream_has_switch;
struct pcie_link_state *parent;
struct list_head children;
struct list_head link;
/* ASPM state */
unsigned int support_state;
@ -70,6 +76,8 @@ static const char *policy_str[] = {
[POLICY_POWERSAVE] = "powersave"
};
#define LINK_RETRAIN_TIMEOUT HZ
static int policy_to_aspm_state(struct pci_dev *pdev)
{
struct pcie_link_state *link_state = pdev->link_state;
@ -125,7 +133,7 @@ static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
link_state->clk_pm_enabled = !!enable;
}
static void pcie_check_clock_pm(struct pci_dev *pdev)
static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
{
int pos;
u32 reg32;
@ -149,10 +157,26 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
link_state->clk_pm_capable = capable;
link_state->clk_pm_enabled = enabled;
link_state->bios_clk_state = enabled;
if (!blacklist) {
link_state->clk_pm_capable = capable;
pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
} else {
link_state->clk_pm_capable = 0;
pcie_set_clock_pm(pdev, 0);
}
}
static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
{
struct pci_dev *child_dev;
list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
return true;
}
return false;
}
/*
@ -217,16 +241,18 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
/* Wait for link training end */
/* break out after waiting for 1 second */
/* break out after waiting for timeout */
start_jiffies = jiffies;
while ((jiffies - start_jiffies) < HZ) {
for (;;) {
pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
cpu_relax();
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
break;
msleep(1);
}
/* training failed -> recover */
if ((jiffies - start_jiffies) >= HZ) {
if (reg16 & PCI_EXP_LNKSTA_LT) {
dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
" common clock\n");
i = 0;
@ -419,9 +445,9 @@ static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
{
struct pci_dev *child_dev;
/* If no child, disable the link */
/* If no child, ignore the link */
if (list_empty(&pdev->subordinate->devices))
return 0;
return state;
list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
/*
@ -462,6 +488,9 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
int valid = 1;
struct pcie_link_state *link_state = pdev->link_state;
/* If no child, disable the link */
if (list_empty(&pdev->subordinate->devices))
state = 0;
/*
* if the downstream component has pci bridge function, don't do ASPM
* now
@ -493,20 +522,52 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
link_state->enabled_state = state;
}
static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
{
struct pcie_link_state *root_port_link = link;
while (root_port_link->parent)
root_port_link = root_port_link->parent;
return root_port_link;
}
/* check the whole hierarchy, and configure each link in the hierarchy */
static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
unsigned int state)
{
struct pcie_link_state *link_state = pdev->link_state;
struct pcie_link_state *root_port_link = get_root_port_link(link_state);
struct pcie_link_state *leaf;
if (link_state->support_state == 0)
return;
state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
/* state 0 means disabling aspm */
state = pcie_aspm_check_state(pdev, state);
/* check all links who have specific root port link */
list_for_each_entry(leaf, &link_list, sibiling) {
if (!list_empty(&leaf->children) ||
get_root_port_link(leaf) != root_port_link)
continue;
state = pcie_aspm_check_state(leaf->pdev, state);
}
/* check root port link too in case it hasn't children */
state = pcie_aspm_check_state(root_port_link->pdev, state);
if (link_state->enabled_state == state)
return;
__pcie_aspm_config_link(pdev, state);
/*
* we must change the hierarchy. See comments in
* __pcie_aspm_config_link for the order
**/
if (state & PCIE_LINK_STATE_L1) {
list_for_each_entry(leaf, &link_list, sibiling) {
if (get_root_port_link(leaf) == root_port_link)
__pcie_aspm_config_link(leaf->pdev, state);
}
} else {
list_for_each_entry_reverse(leaf, &link_list, sibiling) {
if (get_root_port_link(leaf) == root_port_link)
__pcie_aspm_config_link(leaf->pdev, state);
}
}
}
/*
@ -570,6 +631,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
unsigned int state;
struct pcie_link_state *link_state;
int error = 0;
int blacklist;
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
@ -580,29 +642,58 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
if (list_empty(&pdev->subordinate->devices))
goto out;
if (pcie_aspm_sanity_check(pdev))
goto out;
blacklist = !!pcie_aspm_sanity_check(pdev);
mutex_lock(&aspm_lock);
link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
if (!link_state)
goto unlock_out;
link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
INIT_LIST_HEAD(&link_state->children);
INIT_LIST_HEAD(&link_state->link);
if (pdev->bus->self) {/* this is a switch */
struct pcie_link_state *parent_link_state;
parent_link_state = pdev->bus->parent->self->link_state;
if (!parent_link_state) {
kfree(link_state);
goto unlock_out;
}
list_add(&link_state->link, &parent_link_state->children);
link_state->parent = parent_link_state;
}
pdev->link_state = link_state;
if (!blacklist) {
pcie_aspm_configure_common_clock(pdev);
pcie_aspm_cap_init(pdev);
/* config link state to avoid BIOS error */
state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
__pcie_aspm_config_link(pdev, state);
pcie_check_clock_pm(pdev);
} else {
link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
link_state->bios_aspm_state = 0;
/* Set support state to 0, so we will disable ASPM later */
link_state->support_state = 0;
}
link_state->pdev = pdev;
list_add(&link_state->sibiling, &link_list);
if (link_state->downstream_has_switch) {
/*
* If link has switch, delay the link config. The leaf link
* initialization will config the whole hierarchy. but we must
* make sure BIOS doesn't set unsupported link state
**/
state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
__pcie_aspm_config_link(pdev, state);
} else
__pcie_aspm_configure_link_state(pdev,
policy_to_aspm_state(pdev));
pcie_check_clock_pm(pdev, blacklist);
unlock_out:
if (error)
free_link_state(pdev);
@ -635,6 +726,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
/* All functions are removed, so just disable ASPM for the link */
__pcie_aspm_config_one_dev(parent, 0);
list_del(&link_state->sibiling);
list_del(&link_state->link);
/* Clock PM is for endpoint device */
free_link_state(parent);
@ -857,24 +949,15 @@ void pcie_no_aspm(void)
aspm_disabled = 1;
}
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#include <linux/pci-acpi.h>
static void pcie_aspm_platform_init(void)
/**
* pcie_aspm_enabled - is PCIe ASPM enabled?
*
* Returns true if ASPM has not been disabled by the command-line option
* pcie_aspm=off.
**/
int pcie_aspm_enabled(void)
{
pcie_osc_support_set(OSC_ACTIVE_STATE_PWR_SUPPORT|
OSC_CLOCK_PWR_CAPABILITY_SUPPORT);
return !aspm_disabled;
}
#else
static inline void pcie_aspm_platform_init(void) { }
#endif
EXPORT_SYMBOL(pcie_aspm_enabled);
static int __init pcie_aspm_init(void)
{
if (aspm_disabled)
return 0;
pcie_aspm_platform_init();
return 0;
}
fs_initcall(pcie_aspm_init);

Просмотреть файл

@ -16,14 +16,10 @@
#include "portdrv.h"
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
static int pcie_port_bus_suspend(struct device *dev, pm_message_t state);
static int pcie_port_bus_resume(struct device *dev);
struct bus_type pcie_port_bus_type = {
.name = "pci_express",
.match = pcie_port_bus_match,
.suspend = pcie_port_bus_suspend,
.resume = pcie_port_bus_resume,
};
EXPORT_SYMBOL_GPL(pcie_port_bus_type);
@ -49,32 +45,12 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
return 1;
}
static int pcie_port_bus_suspend(struct device *dev, pm_message_t state)
int pcie_port_bus_register(void)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->suspend)
driver->suspend(pciedev, state);
return 0;
return bus_register(&pcie_port_bus_type);
}
static int pcie_port_bus_resume(struct device *dev)
void pcie_port_bus_unregister(void)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->resume)
driver->resume(pciedev);
return 0;
bus_unregister(&pcie_port_bus_type);
}

Просмотреть файл

@ -19,91 +19,15 @@
extern int pcie_mch_quirk; /* MSI-quirk Indicator */
static int pcie_port_probe_service(struct device *dev)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
int status;
if (!dev || !dev->driver)
return -ENODEV;
driver = to_service_driver(dev->driver);
if (!driver || !driver->probe)
return -ENODEV;
pciedev = to_pcie_device(dev);
status = driver->probe(pciedev, driver->id_table);
if (!status) {
dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
driver->name);
get_device(dev);
}
return status;
}
static int pcie_port_remove_service(struct device *dev)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->remove) {
dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
driver->name);
driver->remove(pciedev);
put_device(dev);
}
return 0;
}
static void pcie_port_shutdown_service(struct device *dev) {}
static int pcie_port_suspend_service(struct device *dev, pm_message_t state)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->suspend)
driver->suspend(pciedev, state);
return 0;
}
static int pcie_port_resume_service(struct device *dev)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->resume)
driver->resume(pciedev);
return 0;
}
/*
* release_pcie_device
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
*
* Being invoked automatically when device is being removed
* in response to device_unregister(dev) call.
* Release all resources being claimed.
* Invoked automatically when device is being removed in response to
* device_unregister(dev). Release all resources being claimed.
*/
static void release_pcie_device(struct device *dev)
{
dev_printk(KERN_DEBUG, dev, "free port service\n");
kfree(to_pcie_device(dev));
}
@ -129,6 +53,15 @@ static int is_msi_quirked(struct pci_dev *dev)
return quirk;
}
/**
* assign_interrupt_mode - choose interrupt mode for PCI Express port services
* (INTx, MSI-X, MSI) and set up vectors
* @dev: PCI Express port to handle
* @vectors: Array of interrupt vectors to populate
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
*
* Return value: Interrupt mode associated with the port
*/
static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
{
int i, pos, nvec, status = -EINVAL;
@ -150,7 +83,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
if (pos) {
struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] =
{{0, 0}, {0, 1}, {0, 2}, {0, 3}};
dev_info(&dev->dev, "found MSI-X capability\n");
status = pci_enable_msix(dev, msix_entries, nvec);
if (!status) {
int j = 0;
@ -165,7 +97,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
if (status) {
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos) {
dev_info(&dev->dev, "found MSI capability\n");
status = pci_enable_msi(dev);
if (!status) {
interrupt_mode = PCIE_PORT_MSI_MODE;
@ -177,6 +108,16 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
return interrupt_mode;
}
/**
* get_port_device_capability - discover capabilities of a PCI Express port
* @dev: PCI Express port to examine
*
* The capabilities are read from the port's PCI Express configuration registers
* as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and
* 7.9 - 7.11.
*
* Return value: Bitmask of discovered port capabilities
*/
static int get_port_device_capability(struct pci_dev *dev)
{
int services = 0, pos;
@ -204,6 +145,15 @@ static int get_port_device_capability(struct pci_dev *dev)
return services;
}
/**
* pcie_device_init - initialize PCI Express port service device
* @dev: Port service device to initialize
* @parent: PCI Express port to associate the service device with
* @port_type: Type of the port
* @service_type: Type of service to associate with the service device
* @irq: Interrupt vector to associate with the service device
* @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
*/
static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
int port_type, int service_type, int irq, int irq_mode)
{
@ -224,11 +174,19 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
device->driver = NULL;
device->driver_data = NULL;
device->release = release_pcie_device; /* callback to free pcie dev */
snprintf(device->bus_id, sizeof(device->bus_id), "%s:pcie%02x",
dev_set_name(device, "%s:pcie%02x",
pci_name(parent), get_descriptor_id(port_type, service_type));
device->parent = &parent->dev;
}
/**
* alloc_pcie_device - allocate PCI Express port service device structure
* @parent: PCI Express port to associate the service device with
* @port_type: Type of the port
* @service_type: Type of service to associate with the service device
* @irq: Interrupt vector to associate with the service device
* @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
*/
static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
int port_type, int service_type, int irq, int irq_mode)
{
@ -239,10 +197,13 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
return NULL;
pcie_device_init(parent, device, port_type, service_type, irq,irq_mode);
dev_printk(KERN_DEBUG, &device->device, "allocate port service\n");
return device;
}
/**
* pcie_port_device_probe - check if device is a PCI Express port
* @dev: Device to check
*/
int pcie_port_device_probe(struct pci_dev *dev)
{
int pos, type;
@ -260,6 +221,13 @@ int pcie_port_device_probe(struct pci_dev *dev)
return -ENODEV;
}
/**
* pcie_port_device_register - register PCI Express port
* @dev: PCI Express port to register
*
* Allocate the port extension structure and register services associated with
* the port.
*/
int pcie_port_device_register(struct pci_dev *dev)
{
struct pcie_port_device_ext *p_ext;
@ -323,6 +291,11 @@ static int suspend_iter(struct device *dev, void *data)
return 0;
}
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
* @state: Representation of system power management transition in progress
*/
int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state)
{
return device_for_each_child(&dev->dev, &state, suspend_iter);
@ -341,6 +314,10 @@ static int resume_iter(struct device *dev, void *data)
return 0;
}
/**
* pcie_port_device_suspend - resume port services associated with a PCIe port
* @dev: PCI Express port to handle
*/
int pcie_port_device_resume(struct pci_dev *dev)
{
return device_for_each_child(&dev->dev, NULL, resume_iter);
@ -363,6 +340,13 @@ static int remove_iter(struct device *dev, void *data)
return 0;
}
/**
* pcie_port_device_remove - unregister PCI Express port service devices
* @dev: PCI Express port the service devices to unregister are associated with
*
* Remove PCI Express port service devices associated with given port and
* disable MSI-X or MSI for the port.
*/
void pcie_port_device_remove(struct pci_dev *dev)
{
struct device *device;
@ -386,16 +370,80 @@ void pcie_port_device_remove(struct pci_dev *dev)
pci_disable_msi(dev);
}
int pcie_port_bus_register(void)
/**
* pcie_port_probe_service - probe driver for given PCI Express port service
* @dev: PCI Express port service device to probe against
*
* If PCI Express port service driver is registered with
* pcie_port_service_register(), this function will be called by the driver core
* whenever match is found between the driver and a port service device.
*/
static int pcie_port_probe_service(struct device *dev)
{
return bus_register(&pcie_port_bus_type);
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
int status;
if (!dev || !dev->driver)
return -ENODEV;
driver = to_service_driver(dev->driver);
if (!driver || !driver->probe)
return -ENODEV;
pciedev = to_pcie_device(dev);
status = driver->probe(pciedev, driver->id_table);
if (!status) {
dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
driver->name);
get_device(dev);
}
return status;
}
void pcie_port_bus_unregister(void)
/**
* pcie_port_remove_service - detach driver from given PCI Express port service
* @dev: PCI Express port service device to handle
*
* If PCI Express port service driver is registered with
* pcie_port_service_register(), this function will be called by the driver core
* when device_unregister() is called for the port service device associated
* with the driver.
*/
static int pcie_port_remove_service(struct device *dev)
{
bus_unregister(&pcie_port_bus_type);
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->remove) {
dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
driver->name);
driver->remove(pciedev);
put_device(dev);
}
return 0;
}
/**
* pcie_port_shutdown_service - shut down given PCI Express port service
* @dev: PCI Express port service device to handle
*
* If PCI Express port service driver is registered with
* pcie_port_service_register(), this function will be called by the driver core
* when device_shutdown() is called for the port service device associated
* with the driver.
*/
static void pcie_port_shutdown_service(struct device *dev) {}
/**
* pcie_port_service_register - register PCI Express port service driver
* @new: PCI Express port service driver to register
*/
int pcie_port_service_register(struct pcie_port_service_driver *new)
{
new->driver.name = (char *)new->name;
@ -403,15 +451,17 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
new->driver.probe = pcie_port_probe_service;
new->driver.remove = pcie_port_remove_service;
new->driver.shutdown = pcie_port_shutdown_service;
new->driver.suspend = pcie_port_suspend_service;
new->driver.resume = pcie_port_resume_service;
return driver_register(&new->driver);
}
void pcie_port_service_unregister(struct pcie_port_service_driver *new)
/**
* pcie_port_service_unregister - unregister PCI Express port service driver
* @drv: PCI Express port service driver to unregister
*/
void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
{
driver_unregister(&new->driver);
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(pcie_port_service_register);

Просмотреть файл

@ -41,7 +41,6 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
int retval;
pci_restore_state(dev);
retval = pci_enable_device(dev);
if (retval)
return retval;
@ -52,11 +51,18 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
#ifdef CONFIG_PM
static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state)
{
int ret = pcie_port_device_suspend(dev, state);
return pcie_port_device_suspend(dev, state);
if (!ret)
ret = pcie_portdrv_save_config(dev);
return ret;
}
static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state)
{
return pci_save_state(dev);
}
static int pcie_portdrv_resume_early(struct pci_dev *dev)
{
return pci_restore_state(dev);
}
static int pcie_portdrv_resume(struct pci_dev *dev)
@ -66,6 +72,8 @@ static int pcie_portdrv_resume(struct pci_dev *dev)
}
#else
#define pcie_portdrv_suspend NULL
#define pcie_portdrv_suspend_late NULL
#define pcie_portdrv_resume_early NULL
#define pcie_portdrv_resume NULL
#endif
@ -221,6 +229,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
/* If fatal, restore cfg space for possible link reset at upstream */
if (dev->error_state == pci_channel_io_frozen) {
pci_restore_state(dev);
pcie_portdrv_restore_config(dev);
pci_enable_pcie_error_reporting(dev);
}
@ -283,6 +292,8 @@ static struct pci_driver pcie_portdriver = {
.remove = pcie_portdrv_remove,
.suspend = pcie_portdrv_suspend,
.suspend_late = pcie_portdrv_suspend_late,
.resume_early = pcie_portdrv_resume_early,
.resume = pcie_portdrv_resume,
.err_handler = &pcie_portdrv_err_handler,

Просмотреть файл

@ -135,13 +135,6 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask)
return size;
}
enum pci_bar_type {
pci_bar_unknown, /* Standard PCI BAR probe */
pci_bar_io, /* An io port BAR */
pci_bar_mem32, /* A 32-bit memory BAR */
pci_bar_mem64, /* A 64-bit memory BAR */
};
static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
{
if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
@ -156,11 +149,16 @@ static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
return pci_bar_mem32;
}
/*
* If the type is not unknown, we assume that the lowest bit is 'enable'.
* Returns 1 if the BAR was 64-bit and 0 if it was 32-bit.
/**
* pci_read_base - read a PCI BAR
* @dev: the PCI device
* @type: type of the BAR
* @res: resource buffer to be filled in
* @pos: BAR position in the config space
*
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int pos)
{
u32 l, sz, mask;
@ -400,19 +398,17 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
if (!child)
return NULL;
child->self = bridge;
child->parent = parent;
child->ops = parent->ops;
child->sysdata = parent->sysdata;
child->bus_flags = parent->bus_flags;
child->bridge = get_device(&bridge->dev);
/* initialize some portions of the bus device, but don't register it
* now as the parent is not properly set up yet. This device will get
* registered later in pci_bus_add_devices()
*/
child->dev.class = &pcibus_class;
sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr);
dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
/*
* Set up the primary, secondary and subordinate
@ -422,8 +418,14 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->primary = parent->secondary;
child->subordinate = 0xff;
if (!bridge)
return child;
child->self = bridge;
child->bridge = get_device(&bridge->dev);
/* Set up default resource pointers and names.. */
for (i = 0; i < 4; i++) {
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
child->resource[i]->name = child->name;
}
@ -958,8 +960,12 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* MSI/MSI-X list */
pci_msi_init_pci_dev(dev);
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
/* Power Management */
pci_pm_init(dev);
platform_pci_wakeup_init(dev);
/* Vital Product Data */
pci_vpd_pci22_init(dev);
@ -1130,7 +1136,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
memset(dev, 0, sizeof(*dev));
dev->parent = parent;
dev->release = pci_release_bus_bridge_dev;
sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus);
dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
error = device_register(dev);
if (error)
goto dev_reg_err;
@ -1141,7 +1147,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
b->dev.class = &pcibus_class;
b->dev.parent = b->bridge;
sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus);
dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
error = device_register(&b->dev);
if (error)
goto class_dev_reg_err;

Просмотреть файл

@ -252,11 +252,20 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
const struct proc_dir_entry *dp = PDE(inode);
struct pci_dev *dev = dp->data;
struct pci_filp_private *fpriv = file->private_data;
int ret;
int i, ret;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
if (pci_mmap_fits(dev, i, vma))
break;
}
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
ret = pci_mmap_page_range(dev, vma,
fpriv->mmap_state,
fpriv->write_combine);
@ -352,15 +361,16 @@ static int show_device(struct seq_file *m, void *v)
dev->vendor,
dev->device,
dev->irq);
/* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */
for (i=0; i<7; i++) {
/* only print standard and ROM resources to preserve compatibility */
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
resource_size_t start, end;
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
seq_printf(m, "\t%16llx",
(unsigned long long)(start |
(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
}
for (i=0; i<7; i++) {
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
resource_size_t start, end;
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
seq_printf(m, "\t%16llx",

Просмотреть файл

@ -56,7 +56,7 @@ static void quirk_passive_release(struct pci_dev *dev)
while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
pci_read_config_byte(d, 0x82, &dlc);
if (!(dlc & 1<<1)) {
dev_err(&d->dev, "PIIX3: Enabling Passive Release\n");
dev_info(&d->dev, "PIIX3: Enabling Passive Release\n");
dlc |= 1<<1;
pci_write_config_byte(d, 0x82, dlc);
}
@ -449,7 +449,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev)
{
u32 region;
@ -459,20 +459,95 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
pci_read_config_dword(dev, 0x48, &region);
quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi);
static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
{
u32 val;
u32 size, base;
pci_read_config_dword(dev, reg, &val);
/* Enabled? */
if (!(val & 1))
return;
base = val & 0xfffc;
if (dynsize) {
/*
* This is not correct. It is 16, 32 or 64 bytes depending on
* register D31:F0:ADh bits 5:4.
*
* But this gets us at least _part_ of it.
*/
size = 16;
} else {
size = 128;
}
base &= ~(size-1);
/* Just print it out for now. We should reserve it after more debugging */
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
}
static void __devinit quirk_ich6_lpc(struct pci_dev *dev)
{
/* Shared ACPI/GPIO decode with all ICH6+ */
ich6_lpc_acpi_gpio(dev);
/* ICH6-specific generic IO decode */
ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
{
u32 val;
u32 mask, base;
pci_read_config_dword(dev, reg, &val);
/* Enabled? */
if (!(val & 1))
return;
/*
* IO base in bits 15:2, mask in bits 23:18, both
* are dword-based
*/
base = val & 0xfffc;
mask = (val >> 16) & 0xfc;
mask |= 3;
/* Just print it out for now. We should reserve it after more debugging */
dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
}
/* ICH7-10 has the same common LPC generic IO decode registers */
static void __devinit quirk_ich7_lpc(struct pci_dev *dev)
{
/* We share the common ACPI/DPIO decode with ICH6 */
ich6_lpc_acpi_gpio(dev);
/* And have 4 ICH7+ generic decodes */
ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
/*
* VIA ACPI: One IO region pointed to by longword at
@ -2074,7 +2149,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
#endif /* CONFIG_PCI_MSI */
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
while (f < end) {
if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&

Просмотреть файл

@ -536,9 +536,8 @@ static void pci_bus_dump_res(struct pci_bus *bus)
if (!res)
continue;
printk(KERN_INFO "bus: %02x index %x %s: %pR\n",
bus->number, i,
(res->flags & IORESOURCE_IO) ? "io port" : "mmio", res);
dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i,
(res->flags & IORESOURCE_IO) ? "io: " : "mem:", res);
}
}

Просмотреть файл

@ -26,11 +26,13 @@
#include "pci.h"
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
void pci_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
u32 new, check, mask;
int reg;
enum pci_bar_type type;
struct resource *res = dev->resource + resno;
/*
* Ignore resources for unimplemented BARs and unused resource slots
@ -61,17 +63,13 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
else
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
if (resno < 6) {
reg = PCI_BASE_ADDRESS_0 + 4 * resno;
} else if (resno == PCI_ROM_RESOURCE) {
reg = pci_resource_bar(dev, resno, &type);
if (!reg)
return;
if (type != pci_bar_unknown) {
if (!(res->flags & IORESOURCE_ROM_ENABLE))
return;
new |= PCI_ROM_ADDRESS_ENABLE;
reg = dev->rom_base_reg;
} else {
/* Hmm, non-standard resource. */
return; /* kill uninitialised var warning */
}
pci_write_config_dword(dev, reg, new);
@ -134,7 +132,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
align = resource_alignment(res);
if (!align) {
dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
"alignment) %pR flags %#lx\n",
resno, res, res->flags);
return -EINVAL;
@ -157,12 +155,12 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
}
if (ret) {
dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
} else {
res->flags &= ~IORESOURCE_STARTALIGN;
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, res, resno);
pci_update_resource(dev, resno);
}
return ret;
@ -197,7 +195,7 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
} else if (resno < PCI_BRIDGE_RESOURCES) {
pci_update_resource(dev, res, resno);
pci_update_resource(dev, resno);
}
return ret;

Просмотреть файл

@ -49,6 +49,7 @@ struct resource_list {
#define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */
#define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
#define IORESOURCE_AUTO 0x40000000
@ -133,13 +134,16 @@ static inline unsigned long resource_type(struct resource *res)
}
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
#define request_mem_region_exclusive(start,n,name) \
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
extern struct resource * __request_region(struct resource *,
resource_size_t start,
resource_size_t n, const char *name);
resource_size_t n, const char *name, int relaxed);
/* Compatibility cruft */
#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
@ -175,6 +179,7 @@ extern struct resource * __devm_request_region(struct device *dev,
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern int iomem_is_exclusive(u64 addr);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */

Просмотреть файл

@ -8,6 +8,8 @@
#ifndef _PCI_ACPI_H_
#define _PCI_ACPI_H_
#include <linux/acpi.h>
#define OSC_QUERY_TYPE 0
#define OSC_SUPPORT_TYPE 1
#define OSC_CONTROL_TYPE 2
@ -48,15 +50,7 @@
#ifdef CONFIG_ACPI
extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
extern acpi_status __pci_osc_support_set(u32 flags, const char *hid);
static inline acpi_status pci_osc_support_set(u32 flags)
{
return __pci_osc_support_set(flags, PCI_ROOT_HID_STRING);
}
static inline acpi_status pcie_osc_support_set(u32 flags)
{
return __pci_osc_support_set(flags, PCI_EXPRESS_ROOT_HID_STRING);
}
int pci_acpi_osc_support(acpi_handle handle, u32 flags);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
/* Find root host bridge */
@ -66,6 +60,15 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus),
pdev->bus->number);
}
static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
{
int seg = pci_domain_nr(pbus), busnr = pbus->number;
struct pci_dev *bridge = pbus->self;
if (bridge)
return DEVICE_ACPI_HANDLE(&(bridge->dev));
return acpi_get_pci_rootbridge_handle(seg, busnr);
}
#else
#if !defined(AE_ERROR)
typedef u32 acpi_status;
@ -73,8 +76,6 @@ typedef u32 acpi_status;
#endif
static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
{return AE_ERROR;}
static inline acpi_status pci_osc_support_set(u32 flags) {return AE_ERROR;}
static inline acpi_status pcie_osc_support_set(u32 flags) {return AE_ERROR;}
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{ return NULL; }
#endif

Просмотреть файл

@ -82,7 +82,30 @@ enum pci_mmap_state {
#define PCI_DMA_FROMDEVICE 2
#define PCI_DMA_NONE 3
#define DEVICE_COUNT_RESOURCE 12
/*
* For PCI devices, the region numbers are assigned this way:
*/
enum {
/* #0-5: standard PCI resources */
PCI_STD_RESOURCES,
PCI_STD_RESOURCE_END = 5,
/* #6: expansion ROM resource */
PCI_ROM_RESOURCE,
/* resources assigned to buses behind the bridge */
#define PCI_BRIDGE_RESOURCE_NUM 4
PCI_BRIDGE_RESOURCES,
PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
PCI_BRIDGE_RESOURCE_NUM - 1,
/* total resources associated with a PCI device */
PCI_NUM_RESOURCES,
/* preserve this for compatibility */
DEVICE_COUNT_RESOURCE
};
typedef int __bitwise pci_power_t;
@ -274,18 +297,6 @@ static inline void pci_add_saved_cap(struct pci_dev *pci_dev,
hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
}
/*
* For PCI devices, the region numbers are assigned this way:
*
* 0-5 standard PCI regions
* 6 expansion ROM
* 7-10 bridges: address space assigned to buses behind the bridge
*/
#define PCI_ROM_RESOURCE 6
#define PCI_BRIDGE_RESOURCES 7
#define PCI_NUM_RESOURCES 11
#ifndef PCI_BUS_NUM_RESOURCES
#define PCI_BUS_NUM_RESOURCES 16
#endif
@ -325,6 +336,15 @@ struct pci_bus {
#define pci_bus_b(n) list_entry(n, struct pci_bus, node)
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
return pci_dev->msi_enabled || pci_dev->msix_enabled;
}
#else
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
#endif
/*
* Error values that may be returned by PCI functions.
*/
@ -532,7 +552,9 @@ int __must_check pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
extern void pci_dev_put(struct pci_dev *dev);
extern void pci_remove_bus(struct pci_bus *b);
@ -629,6 +651,7 @@ static inline int pci_is_managed(struct pci_dev *pdev)
void pci_disable_device(struct pci_dev *dev);
void pci_set_master(struct pci_dev *dev);
void pci_clear_master(struct pci_dev *dev);
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
#define HAVE_PCI_SET_MWI
int __must_check pci_set_mwi(struct pci_dev *dev);
@ -647,7 +670,7 @@ int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
int pci_reset_function(struct pci_dev *dev);
int pci_execute_reset_function(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
@ -674,6 +697,11 @@ int pci_back_from_sleep(struct pci_dev *dev);
/* Functions for PCI Hotplug drivers to use */
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
/* Vital product data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
int pci_vpd_truncate(struct pci_dev *dev, size_t size);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
void pci_bus_assign_resources(struct pci_bus *bus);
void pci_bus_size_bridges(struct pci_bus *bus);
@ -686,10 +714,13 @@ void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
int (*)(struct pci_dev *, u8, u8));
#define HAVE_PCI_REQ_REGIONS 2
int __must_check pci_request_regions(struct pci_dev *, const char *);
int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
void pci_release_regions(struct pci_dev *);
int __must_check pci_request_region(struct pci_dev *, int, const char *);
int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
void pci_release_region(struct pci_dev *, int);
int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
void pci_release_selected_regions(struct pci_dev *, int);
/* drivers/pci/bus.c */
@ -779,6 +810,10 @@ static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev)
static inline void pci_restore_msi_state(struct pci_dev *dev)
{ }
static inline int pci_msi_enabled(void)
{
return 0;
}
#else
extern int pci_enable_msi(struct pci_dev *dev);
extern void pci_msi_shutdown(struct pci_dev *dev);
@ -789,6 +824,16 @@ extern void pci_msix_shutdown(struct pci_dev *dev);
extern void pci_disable_msix(struct pci_dev *dev);
extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
extern void pci_restore_msi_state(struct pci_dev *dev);
extern int pci_msi_enabled(void);
#endif
#ifndef CONFIG_PCIEASPM
static inline int pcie_aspm_enabled(void)
{
return 0;
}
#else
extern int pcie_aspm_enabled(void);
#endif
#ifdef CONFIG_HT_IRQ
@ -1140,20 +1185,9 @@ static inline void pci_mmcfg_early_init(void) { }
static inline void pci_mmcfg_late_init(void) { }
#endif
#ifdef CONFIG_HAS_IOMEM
static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
/*
* Make sure the BAR is actually a memory resource, not an IO resource
*/
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
WARN_ON(1);
return NULL;
}
return ioremap_nocache(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
#endif
int pci_ext_cfg_avail(struct pci_dev *dev);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */

Просмотреть файл

@ -228,6 +228,8 @@ extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
struct hotplug_params *hpp);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
int acpi_root_bridge(acpi_handle handle);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(struct pci_bus *pbus);
#endif
#endif

Просмотреть файл

@ -210,6 +210,7 @@
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4
@ -316,6 +317,17 @@
#define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */
#define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */
/* PCI Advanced Feature registers */
#define PCI_AF_LENGTH 2
#define PCI_AF_CAP 3
#define PCI_AF_CAP_TP 0x01
#define PCI_AF_CAP_FLR 0x02
#define PCI_AF_CTRL 4
#define PCI_AF_CTRL_FLR 0x01
#define PCI_AF_STATUS 5
#define PCI_AF_STATUS_TP 0x01
/* PCI-X registers */
#define PCI_X_CMD 2 /* Modes & Features */
@ -399,20 +411,70 @@
#define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */
#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
#define PCI_EXP_LNKCAP_ASPMS 0xc00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x7000 /* L0s Exit Latency */
#define PCI_EXP_LNKCAP_L1EL 0x38000 /* L1 Exit Latency */
#define PCI_EXP_LNKCAP_CLKPM 0x40000 /* L1 Clock Power Management */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */
#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* L1 Clock Power Management */
#define PCI_EXP_LNKCAP_SDERC 0x00080000 /* Suprise Down Error Reporting Capable */
#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */
#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
#define PCI_EXP_LNKCTL 16 /* Link Control */
#define PCI_EXP_LNKCTL_RL 0x20 /* Retrain Link */
#define PCI_EXP_LNKCTL_CCC 0x40 /* Common Clock COnfiguration */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
#define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */
#define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */
#define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */
#define PCI_EXP_LNKCTL_CCC 0x0040 /* Common Clock Configuration */
#define PCI_EXP_LNKCTL_ES 0x0080 /* Extended Synch */
#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */
#define PCI_EXP_LNKSTA 18 /* Link Status */
#define PCI_EXP_LNKSTA_LT 0x800 /* Link Training */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */
#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */
#define PCI_EXP_SLTCAP_AIP 0x00000008 /* Attention Indicator Present */
#define PCI_EXP_SLTCAP_PIP 0x00000010 /* Power Indicator Present */
#define PCI_EXP_SLTCAP_HPS 0x00000020 /* Hot-Plug Surprise */
#define PCI_EXP_SLTCAP_HPC 0x00000040 /* Hot-Plug Capable */
#define PCI_EXP_SLTCAP_SPLV 0x00007f80 /* Slot Power Limit Value */
#define PCI_EXP_SLTCAP_SPLS 0x00018000 /* Slot Power Limit Scale */
#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */
#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */
#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */
#define PCI_EXP_SLTCTL 24 /* Slot Control */
#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */
#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */
#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */
#define PCI_EXP_SLTCTL_PDCE 0x0008 /* Presence Detect Changed Enable */
#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */
#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */
#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */
#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */
#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
#define PCI_EXP_SLTSTA 26 /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */
#define PCI_EXP_SLTSTA_PDC 0x0008 /* Presence Detect Changed */
#define PCI_EXP_SLTSTA_CC 0x0010 /* Command Completed */
#define PCI_EXP_SLTSTA_MRLSS 0x0020 /* MRL Sensor State */
#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
#define PCI_EXP_RTCTL 28 /* Root Control */
#define PCI_EXP_RTCTL_SECEE 0x01 /* System Error on Correctable Error */
#define PCI_EXP_RTCTL_SENFEE 0x02 /* System Error on Non-Fatal Error */

Просмотреть файл

@ -623,7 +623,7 @@ resource_size_t resource_alignment(struct resource *res)
*/
struct resource * __request_region(struct resource *parent,
resource_size_t start, resource_size_t n,
const char *name)
const char *name, int flags)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
@ -634,6 +634,7 @@ struct resource * __request_region(struct resource *parent,
res->start = start;
res->end = start + n - 1;
res->flags = IORESOURCE_BUSY;
res->flags |= flags;
write_lock(&resource_lock);
@ -679,7 +680,7 @@ int __check_region(struct resource *parent, resource_size_t start,
{
struct resource * res;
res = __request_region(parent, start, n, "check-region");
res = __request_region(parent, start, n, "check-region", 0);
if (!res)
return -EBUSY;
@ -776,7 +777,7 @@ struct resource * __devm_request_region(struct device *dev,
dr->start = start;
dr->n = n;
res = __request_region(parent, start, n, name);
res = __request_region(parent, start, n, name, 0);
if (res)
devres_add(dev, dr);
else
@ -876,3 +877,57 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
return err;
}
#ifdef CONFIG_STRICT_DEVMEM
static int strict_iomem_checks = 1;
#else
static int strict_iomem_checks;
#endif
/*
* check if an address is reserved in the iomem resource tree
* returns 1 if reserved, 0 if not reserved.
*/
int iomem_is_exclusive(u64 addr)
{
struct resource *p = &iomem_resource;
int err = 0;
loff_t l;
int size = PAGE_SIZE;
if (!strict_iomem_checks)
return 0;
addr = addr & PAGE_MASK;
read_lock(&resource_lock);
for (p = p->child; p ; p = r_next(NULL, p, &l)) {
/*
* We can probably skip the resources without
* IORESOURCE_IO attribute?
*/
if (p->start >= addr + size)
break;
if (p->end < addr)
continue;
if (p->flags & IORESOURCE_BUSY &&
p->flags & IORESOURCE_EXCLUSIVE) {
err = 1;
break;
}
}
read_unlock(&resource_lock);
return err;
}
static int __init strict_iomem(char *str)
{
if (strstr(str, "relaxed"))
strict_iomem_checks = 0;
if (strstr(str, "strict"))
strict_iomem_checks = 1;
return 1;
}
__setup("iomem=", strict_iomem);