Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (48 commits)
  x86/PCI: Prevent mmconfig memory corruption
  ACPI: Use GPE reference counting to support shared GPEs
  x86/PCI: use host bridge _CRS info by default on 2008 and newer machines
  PCI: augment bus resource table with a list
  PCI: add pci_bus_for_each_resource(), remove direct bus->resource[] refs
  PCI: read bridge windows before filling in subtractive decode resources
  PCI: split up pci_read_bridge_bases()
  PCIe PME: use pci_pcie_cap()
  PCI PM: Run-time callbacks for PCI bus type
  PCIe PME: use pci_is_pcie()
  PCI / ACPI / PM: Platform support for PCI PME wake-up
  ACPI / ACPICA: Multiple system notify handlers per device
  ACPI / PM: Add more run-time wake-up fields
  ACPI: Use GPE reference counting to support shared GPEs
  PCI PM: Make it possible to force using INTx for PCIe PME signaling
  PCI PM: PCIe PME root port service driver
  PCI PM: Add function for checking PME status of devices
  PCI: mark is_pcie obsolete
  PCI: set PCI_PREF_RANGE_TYPE_64 in pci_bridge_check_ranges
  PCI: pciehp: second try to get big range for pcie devices
  ...
This commit is contained in:
Linus Torvalds 2010-02-26 10:35:27 -08:00
Родитель a4a47bc03f bb8d41330c
Коммит 68c6b85984
113 изменённых файлов: 3039 добавлений и 1392 удалений

Просмотреть файл

@ -1948,8 +1948,12 @@ and is between 256 and 4096 characters. It is defined in the file
IRQ routing is enabled.
noacpi [X86] Do not use ACPI for IRQ routing
or for PCI scanning.
use_crs [X86] Use _CRS for PCI resource
allocation.
use_crs [X86] Use PCI host bridge window information
from ACPI. On BIOSes from 2008 or later, this
is enabled by default. If you need to use this,
please report a bug.
nocrs [X86] Ignore PCI host bridge windows from ACPI.
If you need to use this, please report a bug.
routeirq Do IRQ routing for all PCI devices.
This is normally done in pci_enable_device(),
so this option is a temporary workaround
@ -1998,6 +2002,14 @@ and is between 256 and 4096 characters. It is defined in the file
force Enable ASPM even on devices that claim not to support it.
WARNING: Forcing ASPM on may cause system lockups.
pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
off Do not use native PCIe PME signaling.
force Use native PCIe PME signaling even if the BIOS refuses
to allow the kernel to control the relevant PCIe config
registers.
nomsi Do not use MSI for native PCIe PME signaling (this makes
all PCIe root ports use INTx for everything).
pcmv= [HW,PCMCIA] BadgePAD 4
pd. [PARIDE]

Просмотреть файл

@ -126,8 +126,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
#define MB (1024*KB)
#define GB (1024*MB)
void
pcibios_align_resource(void *data, struct resource *res,
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
@ -184,7 +184,7 @@ pcibios_align_resource(void *data, struct resource *res,
}
}
res->start = start;
return start;
}
#undef KB
#undef MB

Просмотреть файл

@ -616,15 +616,17 @@ char * __init pcibios_setup(char *str)
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might be mirrored at 0x0100-0x03ff..
*/
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO && start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
res->start = (start + align - 1) & ~(align - 1);
start = (start + align - 1) & ~(align - 1);
return start;
}
/**

Просмотреть файл

@ -41,18 +41,16 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return 0;
}
void
pcibios_align_resource(void *data, struct resource *res,
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
resource_size_t start = res->start;
resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
return start
}
int pcibios_enable_resources(struct pci_dev *dev, int mask)

Просмотреть файл

@ -32,18 +32,16 @@
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
void
pcibios_align_resource(void *data, struct resource *res,
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
resource_size_t start = res->start;
resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
return start
}

Просмотреть файл

@ -98,6 +98,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
static inline void pci_acpi_crs_quirks(void) { }
const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type);

Просмотреть файл

@ -320,9 +320,9 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
static void __devinit
pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
{
int i, j;
int i;
j = 0;
pci_bus_remove_resources(bus);
for (i = 0; i < ctrl->windows; i++) {
struct resource *res = &ctrl->window[i].resource;
/* HP's firmware has a hack to work around a Windows bug.
@ -330,13 +330,7 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
if ((res->flags & IORESOURCE_MEM) &&
(res->end - res->start < 16))
continue;
if (j >= PCI_BUS_NUM_RESOURCES) {
dev_warn(&bus->dev,
"ignoring host bridge window %pR (no space)\n",
res);
continue;
}
bus->resource[j++] = res;
pci_bus_add_resource(bus, res, 0);
}
}
@ -452,13 +446,12 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
{
unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
struct resource *devr = &dev->resource[idx];
struct resource *devr = &dev->resource[idx], *busr;
if (!dev->bus)
return 0;
for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) {
struct resource *busr = dev->bus->resource[i];
pci_bus_for_each_resource(dev->bus, busr, i) {
if (!busr || ((busr->flags ^ devr->flags) & type_mask))
continue;
if ((devr->start) && (devr->start >= busr->start) &&
@ -547,10 +540,11 @@ pcibios_disable_device (struct pci_dev *dev)
acpi_pci_irq_disable(dev);
}
void
pcibios_align_resource (void *data, struct resource *res,
resource_size_t
pcibios_align_resource (void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
return res->start;
}
/*

Просмотреть файл

@ -49,8 +49,8 @@ static int pci_initialized;
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
void
pcibios_align_resource(void *data, struct resource *res,
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
@ -73,7 +73,7 @@ pcibios_align_resource(void *data, struct resource *res,
start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
}
res->start = start;
return start;
}
static void __devinit pcibios_scanbus(struct pci_controller *hose)

Просмотреть файл

@ -345,14 +345,13 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pcibios_enable_resources(dev);
}
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
resource_size_t start = res->start;
/* We need to avoid collisions with `mirrored' VGA ports
and other strange ISA hardware, so we always want the
addresses kilobyte aligned. */
@ -363,8 +362,9 @@ void pcibios_align_resource(void *data, struct resource *res,
}
start = (start + 1024 - 1) & ~(1024 - 1);
res->start = start;
}
return start;
}
struct pci_ops titan_pci_ops = {

Просмотреть файл

@ -31,9 +31,11 @@
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
#if 0
struct pci_dev *dev = data;
@ -47,14 +49,10 @@ void pcibios_align_resource(void *data, struct resource *res,
);
#endif
if (res->flags & IORESOURCE_IO) {
unsigned long start = res->start;
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
return start;
}

Просмотреть файл

@ -331,12 +331,10 @@ static int __init pci_check_direct(void)
static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
{
unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
struct resource *devr = &dev->resource[idx];
struct resource *devr = &dev->resource[idx], *busr;
if (dev->bus) {
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *busr = dev->bus->resource[i];
pci_bus_for_each_resource(dev->bus, busr, i) {
if (!busr || (busr->flags ^ devr->flags) & type_mask)
continue;

Просмотреть файл

@ -257,10 +257,10 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
* Since we are just checking candidates, don't use any fields other
* than res->start.
*/
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t alignment)
{
resource_size_t mask, align;
resource_size_t mask, align, start = res->start;
DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
pci_name(((struct pci_dev *) data)),
@ -272,10 +272,10 @@ void pcibios_align_resource(void *data, struct resource *res,
/* Align to largest of MIN or input size */
mask = max(alignment, align) - 1;
res->start += mask;
res->start &= ~mask;
start += mask;
start &= ~mask;
/* The caller updates the end field, we don't. */
return start;
}

Просмотреть файл

@ -1047,10 +1047,8 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
struct pci_dev *dev = bus->self;
for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
if ((res = bus->resource[i]) == NULL)
continue;
if (!res->flags)
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->flags)
continue;
if (i >= 3 && bus->self->transparent)
continue;
@ -1181,21 +1179,20 @@ static int skip_isa_ioresource_align(struct pci_dev *dev)
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
resource_size_t start = res->start;
if (skip_isa_ioresource_align(dev))
return;
if (start & 0x300) {
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
return start;
}
EXPORT_SYMBOL(pcibios_align_resource);
@ -1278,9 +1275,8 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
pci_domain_nr(bus), bus->number);
for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
if ((res = bus->resource[i]) == NULL || !res->flags
|| res->start > res->end || res->parent)
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->flags || res->start > res->end || res->parent)
continue;
if (bus->parent == NULL)
pr = (res->flags & IORESOURCE_IO) ?

Просмотреть файл

@ -222,6 +222,7 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev)
int i;
u8 *dummy;
struct pci_bus *bus = dev->bus;
struct resource *res;
resource_size_t end = 0;
for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) {
@ -230,13 +231,12 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev)
end = pci_resource_end(dev, i);
}
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
if ((bus->resource[i]) &&
(bus->resource[i]->flags & IORESOURCE_MEM)) {
if (bus->resource[i]->end == end)
dummy = ioremap(bus->resource[i]->start, 0x4);
pci_bus_for_each_resource(bus, res, i) {
if (res && res->flags & IORESOURCE_MEM) {
if (res->end == end)
dummy = ioremap(res->start, 0x4);
else
dummy = ioremap(bus->resource[i]->end - 3, 0x4);
dummy = ioremap(res->end - 3, 0x4);
if (dummy) {
in_8(dummy);
iounmap(dummy);

Просмотреть файл

@ -148,8 +148,8 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*/
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
struct pci_channel *chan = dev->sysdata;
@ -171,7 +171,7 @@ void pcibios_align_resource(void *data, struct resource *res,
start = PCIBIOS_MIN_MEM + chan->mem_resource->start;
}
res->start = start;
return start;
}
void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,

Просмотреть файл

@ -722,9 +722,10 @@ void pcibios_update_irq(struct pci_dev *pdev, int irq)
{
}
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
return res->start;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)

Просмотреть файл

@ -768,9 +768,10 @@ char * __devinit pcibios_setup(char *str)
return str;
}
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
return res->start;
}
int pcibios_enable_device(struct pci_dev *pdev, int mask)

Просмотреть файл

@ -29,6 +29,7 @@
#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
#define PCI_HAS_IO_ECS 0x40000
#define PCI_NOASSIGN_ROMS 0x80000
#define PCI_ROOT_NO_CRS 0x100000
extern unsigned int pci_probe;
extern unsigned long pirq_table_addr;

Просмотреть файл

@ -15,6 +15,51 @@ struct pci_root_info {
int busnum;
};
static bool pci_use_crs = true;
static int __init set_use_crs(const struct dmi_system_id *id)
{
pci_use_crs = true;
return 0;
}
static const struct dmi_system_id pci_use_crs_table[] __initconst = {
/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
{
.callback = set_use_crs,
.ident = "IBM System x3800",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
},
},
{}
};
void __init pci_acpi_crs_quirks(void)
{
int year;
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
pci_use_crs = false;
dmi_check_system(pci_use_crs_table);
/*
* If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
* takes precedence over anything we figured out above.
*/
if (pci_probe & PCI_ROOT_NO_CRS)
pci_use_crs = false;
else if (pci_probe & PCI_USE__CRS)
pci_use_crs = true;
printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
"if necessary, use \"pci=%s\" and report a bug\n",
pci_use_crs ? "Using" : "Ignoring",
pci_use_crs ? "nocrs" : "use_crs");
}
static acpi_status
resource_to_addr(struct acpi_resource *resource,
struct acpi_resource_address64 *addr)
@ -45,20 +90,6 @@ count_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
static int
bus_has_transparent_bridge(struct pci_bus *bus)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 class = dev->class >> 8;
if (class == PCI_CLASS_BRIDGE_PCI && dev->transparent)
return true;
}
return false;
}
static void
align_resource(struct acpi_device *bridge, struct resource *res)
{
@ -92,12 +123,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
acpi_status status;
unsigned long flags;
struct resource *root;
int max_root_bus_resources = PCI_BUS_NUM_RESOURCES;
u64 start, end;
if (bus_has_transparent_bridge(info->bus))
max_root_bus_resources -= 3;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
return AE_OK;
@ -115,15 +142,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
start = addr.minimum + addr.translation_offset;
end = start + addr.address_length - 1;
if (info->res_num >= max_root_bus_resources) {
if (pci_probe & PCI_USE__CRS)
printk(KERN_WARNING "PCI: Failed to allocate "
"0x%lx-0x%lx from %s for %s due to _CRS "
"returning more than %d resource descriptors\n",
(unsigned long) start, (unsigned long) end,
root->name, info->name, max_root_bus_resources);
return AE_OK;
}
res = &info->res[info->res_num];
res->name = info->name;
@ -133,7 +151,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
res->child = NULL;
align_resource(info->bridge, res);
if (!(pci_probe & PCI_USE__CRS)) {
if (!pci_use_crs) {
dev_printk(KERN_DEBUG, &info->bridge->dev,
"host bridge window %pR (ignored)\n", res);
return AE_OK;
@ -143,7 +161,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
dev_err(&info->bridge->dev,
"can't allocate host bridge window %pR\n", res);
} else {
info->bus->resource[info->res_num] = res;
pci_bus_add_resource(info->bus, res, 0);
info->res_num++;
if (addr.translation_offset)
dev_info(&info->bridge->dev, "host bridge window %pR "
@ -164,10 +182,8 @@ get_current_resources(struct acpi_device *device, int busnum,
struct pci_root_info info;
size_t size;
if (!(pci_probe & PCI_USE__CRS))
dev_info(&device->dev,
"ignoring host bridge windows from ACPI; "
"boot with \"pci=use_crs\" to use them\n");
if (pci_use_crs)
pci_bus_remove_resources(bus);
info.bridge = device;
info.bus = bus;

Просмотреть файл

@ -36,13 +36,14 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
b->number);
pci_bus_remove_resources(b);
info = &pci_root_info[i];
for (j = 0; j < info->res_num; j++) {
struct resource *res;
struct resource *root;
res = &info->res[j];
b->resource[j] = res;
pci_bus_add_resource(b, res, 0);
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else

Просмотреть файл

@ -2,8 +2,7 @@
/*
* sub bus (transparent) will use entres from 3 to store extra from
* root, so need to make sure we have enough slot there, Should we
* increase PCI_BUS_NUM_RESOURCES?
* root, so need to make sure we have enough slot there.
*/
#define RES_NUM 16
struct pci_root_info {

Просмотреть файл

@ -520,6 +520,9 @@ char * __devinit pcibios_setup(char *str)
} else if (!strcmp(str, "use_crs")) {
pci_probe |= PCI_USE__CRS;
return NULL;
} else if (!strcmp(str, "nocrs")) {
pci_probe |= PCI_ROOT_NO_CRS;
return NULL;
} else if (!strcmp(str, "earlydump")) {
pci_early_dump_regs = 1;
return NULL;

Просмотреть файл

@ -60,22 +60,20 @@ skip_isa_ioresource_align(struct pci_dev *dev) {
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
void
pcibios_align_resource(void *data, struct resource *res,
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
resource_size_t start = res->start;
if (skip_isa_ioresource_align(dev))
return;
if (start & 0x300) {
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
return start;
}
EXPORT_SYMBOL(pcibios_align_resource);

Просмотреть файл

@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
case PCI_DEVICE_ID_INTEL_CPT_LPC1:
case PCI_DEVICE_ID_INTEL_CPT_LPC2:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;

Просмотреть файл

@ -303,22 +303,17 @@ static void __init pci_mmcfg_check_end_bus_number(void)
{
struct pci_mmcfg_region *cfg, *cfgx;
/* last one*/
cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
if (cfg)
if (cfg->end_bus < cfg->start_bus)
cfg->end_bus = 255;
if (list_is_singular(&pci_mmcfg_list))
return;
/* don't overlap please */
/* Fixup overlaps */
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
if (cfg->end_bus < cfg->start_bus)
cfg->end_bus = 255;
/* Don't access the list head ! */
if (cfg->list.next == &pci_mmcfg_list)
break;
cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
if (cfg->end_bus >= cfgx->start_bus)
cfg->end_bus = cfgx->start_bus - 1;
}
}

Просмотреть файл

@ -69,26 +69,25 @@ static int pci_bus_count;
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
void
pcibios_align_resource(void *data, struct resource *res, resource_size_t size,
resource_size_t align)
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
resource_size_t start = res->start;
if (size > 0x100) {
printk(KERN_ERR "PCI: I/O Region %s/%d too large"
" (%ld bytes)\n", pci_name(dev),
dev->resource - res, size);
}
if (start & 0x300) {
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
return start;
}
int

Просмотреть файл

@ -76,12 +76,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
* evgpe - GPE handling and dispatch
*/
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
u8 type);
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
acpi_status
acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
u8 write_to_hardware);
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
@ -121,9 +118,6 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
acpi_status
acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type);
acpi_status
acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);

Просмотреть файл

@ -426,6 +426,8 @@ struct acpi_gpe_event_info {
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
u8 runtime_count;
u8 wakeup_count;
};
/* Information about a GPE register pair, one per each status/enable pair in an array */

Просмотреть файл

@ -287,8 +287,10 @@ struct acpi_object_buffer_field {
struct acpi_object_notify_handler {
ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */
u32 handler_type;
acpi_notify_handler handler;
void *context;
struct acpi_object_notify_handler *next;
};
struct acpi_object_addr_handler {

Просмотреть файл

@ -52,56 +52,11 @@ ACPI_MODULE_NAME("evgpe")
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
/*******************************************************************************
*
* FUNCTION: acpi_ev_set_gpe_type
*
* PARAMETERS: gpe_event_info - GPE to set
* Type - New type
*
* RETURN: Status
*
* DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
*
******************************************************************************/
acpi_status
acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_set_gpe_type);
/* Validate type and update register enable masks */
switch (type) {
case ACPI_GPE_TYPE_WAKE:
case ACPI_GPE_TYPE_RUNTIME:
case ACPI_GPE_TYPE_WAKE_RUN:
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Disable the GPE if currently enabled */
status = acpi_ev_disable_gpe(gpe_event_info);
/* Clear the type bits and insert the new Type */
gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK;
gpe_event_info->flags |= type;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_update_gpe_enable_masks
*
* PARAMETERS: gpe_event_info - GPE to update
* Type - What to do: ACPI_GPE_DISABLE or
* ACPI_GPE_ENABLE
*
* RETURN: Status
*
@ -110,8 +65,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
******************************************************************************/
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
u8 type)
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
u8 register_bit;
@ -127,37 +81,14 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
/* 1) Disable case. Simply clear all enable bits */
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
if (type == ACPI_GPE_DISABLE) {
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
return_ACPI_STATUS(AE_OK);
}
/* 2) Enable case. Set/Clear the appropriate enable bits */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
break;
case ACPI_GPE_TYPE_RUNTIME:
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
register_bit);
if (gpe_event_info->runtime_count)
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
if (gpe_event_info->wakeup_count)
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
}
@ -167,8 +98,6 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
* FUNCTION: acpi_ev_enable_gpe
*
* PARAMETERS: gpe_event_info - GPE to enable
* write_to_hardware - Enable now, or just mark data structs
* (WAKE GPEs should be deferred)
*
* RETURN: Status
*
@ -176,9 +105,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
*
******************************************************************************/
acpi_status
acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
u8 write_to_hardware)
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
@ -186,47 +113,20 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
/* Make sure HW enable masks are updated */
status =
acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
if (ACPI_FAILURE(status)) {
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
if (ACPI_FAILURE(status))
return_ACPI_STATUS(status);
}
/* Mark wake-enabled or HW enable, or both */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
if (gpe_event_info->runtime_count) {
/* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status))
return_ACPI_STATUS(status);
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
/*lint -fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
if (write_to_hardware) {
/* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Enable the requested runtime GPE */
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
}
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
/* Enable the requested runtime GPE */
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
}
return_ACPI_STATUS(AE_OK);
@ -252,34 +152,9 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
/* Make sure HW enable masks are updated */
status =
acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
if (ACPI_FAILURE(status)) {
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
if (ACPI_FAILURE(status))
return_ACPI_STATUS(status);
}
/* Clear the appropriate enabled flags for this GPE */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
/* fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
/* Disable the requested runtime GPE */
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
break;
default:
break;
}
/*
* Even if we don't know the GPE type, make sure that we always
@ -521,7 +396,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
/* Set the GPE flags for return to enabled state */
(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
/*
* Take a snapshot of the GPE info for this level - we copy the info to

Просмотреть файл

@ -258,7 +258,6 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
u32 gpe_number;
char name[ACPI_NAME_SIZE + 1];
u8 type;
acpi_status status;
ACPI_FUNCTION_TRACE(ev_save_method_info);
@ -325,26 +324,20 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
/*
* Now we can add this information to the gpe_event_info block for use
* during dispatch of this GPE. Default type is RUNTIME, although this may
* change when the _PRW methods are executed later.
* during dispatch of this GPE.
*/
gpe_event_info =
&gpe_block->event_info[gpe_number - gpe_block->block_base_number];
gpe_event_info->flags = (u8)
(type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
gpe_event_info->dispatch.method_node =
(struct acpi_namespace_node *)obj_handle;
/* Update enable mask, but don't enable the HW GPE as of yet */
status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Registered GPE method %s as GPE number 0x%.2X\n",
name, gpe_number));
return_ACPI_STATUS(status);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@ -454,20 +447,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
gpe_block->
block_base_number];
/* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
gpe_event_info->flags &=
~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
status =
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
status =
acpi_ev_update_gpe_enable_masks(gpe_event_info,
ACPI_GPE_DISABLE);
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
}
cleanup:
@ -989,7 +969,6 @@ acpi_status
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info *gpe_block)
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_walk_info gpe_info;
u32 wake_gpe_count;
@ -1019,42 +998,50 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_info.gpe_block = gpe_block;
gpe_info.gpe_device = gpe_device;
status =
acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ev_match_prw_and_gpe, NULL,
&gpe_info, NULL);
}
/*
* Enable all GPEs in this block that have these attributes:
* 1) are "runtime" or "run/wake" GPEs, and
* 2) have a corresponding _Lxx or _Exx method
*
* Any other GPEs within this block must be enabled via the
* acpi_enable_gpe() external interface.
* Enable all GPEs that have a corresponding method and aren't
* capable of generating wakeups. Any other GPEs within this block
* must be enabled via the acpi_enable_gpe() interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
if (gpe_device == acpi_gbl_fadt_gpe_device)
gpe_device = NULL;
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < 8; j++) {
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
acpi_status status;
acpi_size gpe_index;
int gpe_number;
/* Get the info block for this particular GPE */
gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
gpe_event_info = &gpe_block->event_info[((acpi_size) i *
ACPI_GPE_REGISTER_WIDTH)
+ j];
if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD) &&
(gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
gpe_enabled_count++;
}
if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
wake_gpe_count++;
if (acpi_gbl_leave_wake_gpes_disabled)
continue;
}
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
continue;
gpe_number = gpe_index + gpe_block->block_base_number;
status = acpi_enable_gpe(gpe_device, gpe_number,
ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status))
ACPI_ERROR((AE_INFO,
"Failed to enable GPE %02X\n",
gpe_number));
else
gpe_enabled_count++;
}
}
@ -1062,15 +1049,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
"Found %u Wake, Enabled %u Runtime GPEs in this block\n",
wake_gpe_count, gpe_enabled_count));
/* Enable all valid runtime GPEs found above */
status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
gpe_block));
}
return_ACPI_STATUS(status);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************

Просмотреть файл

@ -259,9 +259,15 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
handler_obj = notify_info->notify.handler_obj;
if (handler_obj) {
handler_obj->notify.handler(notify_info->notify.node,
notify_info->notify.value,
handler_obj->notify.context);
struct acpi_object_notify_handler *notifier;
notifier = &handler_obj->notify;
while (notifier) {
notifier->handler(notify_info->notify.node,
notify_info->notify.value,
notifier->context);
notifier = notifier->next;
}
}
/* All done with the info object */

Просмотреть файл

@ -216,6 +216,72 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
/*******************************************************************************
*
* FUNCTION: acpi_populate_handler_object
*
* PARAMETERS: handler_obj - Handler object to populate
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
* ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
* ACPI_ALL_NOTIFY: both system and device
* handler - Address of the handler
* context - Value passed to the handler on each GPE
* next - Address of a handler object to link to
*
* RETURN: None
*
* DESCRIPTION: Populate a handler object.
*
******************************************************************************/
static void
acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj,
u32 handler_type,
acpi_notify_handler handler, void *context,
struct acpi_object_notify_handler *next)
{
handler_obj->handler_type = handler_type;
handler_obj->handler = handler;
handler_obj->context = context;
handler_obj->next = next;
}
/*******************************************************************************
*
* FUNCTION: acpi_add_handler_object
*
* PARAMETERS: parent_obj - Parent of the new object
* handler - Address of the handler
* context - Value passed to the handler on each GPE
*
* RETURN: Status
*
* DESCRIPTION: Create a new handler object and populate it.
*
******************************************************************************/
static acpi_status
acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
acpi_notify_handler handler, void *context)
{
struct acpi_object_notify_handler *handler_obj;
/* The parent must not be a defice notify handler object. */
if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY)
return AE_BAD_PARAMETER;
handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj));
if (!handler_obj)
return AE_NO_MEMORY;
acpi_populate_handler_object(handler_obj,
ACPI_SYSTEM_NOTIFY,
handler, context,
parent_obj->next);
parent_obj->next = handler_obj;
return AE_OK;
}
/*******************************************************************************
*
* FUNCTION: acpi_install_notify_handler
@ -316,15 +382,32 @@ acpi_install_notify_handler(acpi_handle device,
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
/* Object exists - make sure there's no handler */
/* Object exists. */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
obj_desc->common_notify.system_notify) ||
((handler_type & ACPI_DEVICE_NOTIFY) &&
obj_desc->common_notify.device_notify)) {
/* For a device notify, make sure there's no handler. */
if ((handler_type & ACPI_DEVICE_NOTIFY) &&
obj_desc->common_notify.device_notify) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
/* System notifies may have more handlers installed. */
notify_obj = obj_desc->common_notify.system_notify;
if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) {
struct acpi_object_notify_handler *parent_obj;
if (handler_type & ACPI_DEVICE_NOTIFY) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
parent_obj = &notify_obj->notify;
status = acpi_add_handler_object(parent_obj,
handler,
context);
goto unlock_and_exit;
}
} else {
/* Create a new object */
@ -356,9 +439,10 @@ acpi_install_notify_handler(acpi_handle device,
goto unlock_and_exit;
}
notify_obj->notify.node = node;
notify_obj->notify.handler = handler;
notify_obj->notify.context = context;
acpi_populate_handler_object(&notify_obj->notify,
handler_type,
handler, context,
NULL);
if (handler_type & ACPI_SYSTEM_NOTIFY) {
obj_desc->common_notify.system_notify = notify_obj;
@ -418,6 +502,10 @@ acpi_remove_notify_handler(acpi_handle device,
goto exit;
}
/* Make sure all deferred tasks are completed */
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
@ -445,15 +533,6 @@ acpi_remove_notify_handler(acpi_handle device,
goto unlock_and_exit;
}
/* Make sure all deferred tasks are completed */
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
if (handler_type & ACPI_SYSTEM_NOTIFY) {
acpi_gbl_system_notify.node = NULL;
acpi_gbl_system_notify.handler = NULL;
@ -488,28 +567,60 @@ acpi_remove_notify_handler(acpi_handle device,
/* Object exists - make sure there's an existing handler */
if (handler_type & ACPI_SYSTEM_NOTIFY) {
struct acpi_object_notify_handler *handler_obj;
struct acpi_object_notify_handler *parent_obj;
notify_obj = obj_desc->common_notify.system_notify;
if (!notify_obj) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
if (notify_obj->notify.handler != handler) {
handler_obj = &notify_obj->notify;
parent_obj = NULL;
while (handler_obj->handler != handler) {
if (handler_obj->next) {
parent_obj = handler_obj;
handler_obj = handler_obj->next;
} else {
break;
}
}
if (handler_obj->handler != handler) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Make sure all deferred tasks are completed */
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
/*
* Remove the handler. There are three possible cases.
* First, we may need to remove a non-embedded object.
* Second, we may need to remove the embedded object's
* handler data, while non-embedded objects exist.
* Finally, we may need to remove the embedded object
* entirely along with its container.
*/
if (parent_obj) {
/* Non-embedded object is being removed. */
parent_obj->next = handler_obj->next;
ACPI_FREE(handler_obj);
} else if (notify_obj->notify.next) {
/*
* The handler matches the embedded object, but
* there are more handler objects in the list.
* Replace the embedded object's data with the
* first next object's data and remove that
* object.
*/
parent_obj = &notify_obj->notify;
handler_obj = notify_obj->notify.next;
*parent_obj = *handler_obj;
ACPI_FREE(handler_obj);
} else {
/* No more handler objects in the list. */
obj_desc->common_notify.system_notify = NULL;
acpi_ut_remove_reference(notify_obj);
}
/* Remove the handler */
obj_desc->common_notify.system_notify = NULL;
acpi_ut_remove_reference(notify_obj);
}
if (handler_type & ACPI_DEVICE_NOTIFY) {
@ -523,14 +634,6 @@ acpi_remove_notify_handler(acpi_handle device,
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Make sure all deferred tasks are completed */
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
/* Remove the handler */
obj_desc->common_notify.device_notify = NULL;
@ -617,13 +720,6 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
/* Disable the GPE before installing the handler */
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/* Install the handler */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@ -707,13 +803,6 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
goto unlock_and_exit;
}
/* Disable the GPE before removing the handler */
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/* Make sure all deferred tasks are completed */
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);

Просмотреть файл

@ -201,67 +201,25 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
/*******************************************************************************
*
* FUNCTION: acpi_set_gpe_type
* FUNCTION: acpi_set_gpe
*
* PARAMETERS: gpe_device - Parent GPE Device
* gpe_number - GPE level within the GPE block
* Type - New GPE type
*
* RETURN: Status
*
* DESCRIPTION: Set the type of an individual GPE
*
******************************************************************************/
acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) {
return_ACPI_STATUS(AE_OK);
}
/* Set the new type (will disable GPE if currently enabled) */
status = acpi_ev_set_gpe_type(gpe_event_info, type);
unlock_and_exit:
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
/*******************************************************************************
*
* FUNCTION: acpi_enable_gpe
*
* PARAMETERS: gpe_device - Parent GPE Device
* gpe_number - GPE level within the GPE block
* Flags - Just enable, or also wake enable?
* action - Enable or disable
* Called from ISR or not
*
* RETURN: Status
*
* DESCRIPTION: Enable an ACPI event (general purpose)
* DESCRIPTION: Enable or disable an ACPI event (general purpose)
*
******************************************************************************/
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
acpi_status status = AE_OK;
acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
ACPI_FUNCTION_TRACE(acpi_set_gpe);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@ -273,15 +231,90 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
goto unlock_and_exit;
}
/* Perform the enable */
/* Perform the action */
status = acpi_ev_enable_gpe(gpe_event_info, TRUE);
switch (action) {
case ACPI_GPE_ENABLE:
status = acpi_ev_enable_gpe(gpe_event_info);
break;
case ACPI_GPE_DISABLE:
status = acpi_ev_disable_gpe(gpe_event_info);
break;
default:
ACPI_ERROR((AE_INFO, "Invalid action\n"));
status = AE_BAD_PARAMETER;
break;
}
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_set_gpe)
/*******************************************************************************
*
* FUNCTION: acpi_enable_gpe
*
* PARAMETERS: gpe_device - Parent GPE Device
* gpe_number - GPE level within the GPE block
* type - Purpose the GPE will be used for
*
* RETURN: Status
*
* DESCRIPTION: Take a reference to a GPE and enable it if necessary
*
******************************************************************************/
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
{
acpi_status status = AE_OK;
acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
return_ACPI_STATUS(AE_BAD_PARAMETER);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
if (type & ACPI_GPE_TYPE_RUNTIME) {
if (++gpe_event_info->runtime_count == 1) {
status = acpi_ev_enable_gpe(gpe_event_info);
if (ACPI_FAILURE(status))
gpe_event_info->runtime_count--;
}
}
if (type & ACPI_GPE_TYPE_WAKE) {
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/*
* Wake-up GPEs are only enabled right prior to putting the
* system into a sleep state.
*/
if (++gpe_event_info->wakeup_count == 1)
acpi_ev_update_gpe_enable_masks(gpe_event_info);
}
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
/*******************************************************************************
@ -290,15 +323,14 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
*
* PARAMETERS: gpe_device - Parent GPE Device
* gpe_number - GPE level within the GPE block
* Flags - Just disable, or also wake disable?
* Called from ISR or not
* type - Purpose the GPE won't be used for any more
*
* RETURN: Status
*
* DESCRIPTION: Disable an ACPI event (general purpose)
* DESCRIPTION: Release a reference to a GPE and disable it if necessary
*
******************************************************************************/
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
{
acpi_status status = AE_OK;
acpi_cpu_flags flags;
@ -306,6 +338,9 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
return_ACPI_STATUS(AE_BAD_PARAMETER);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@ -315,13 +350,24 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
goto unlock_and_exit;
}
status = acpi_ev_disable_gpe(gpe_event_info);
if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
if (--gpe_event_info->runtime_count == 0)
status = acpi_ev_disable_gpe(gpe_event_info);
}
if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
/*
* Wake-up GPEs are not enabled after leaving system sleep
* states, so we don't need to disable them here.
*/
if (--gpe_event_info->wakeup_count == 0)
acpi_ev_update_gpe_enable_masks(gpe_event_info);
}
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
/*******************************************************************************

Просмотреть файл

@ -422,11 +422,10 @@ static int acpi_button_add(struct acpi_device *device)
if (device->wakeup.flags.valid) {
/* Button's GPE is run-wake GPE */
acpi_set_gpe_type(device->wakeup.gpe_device,
device->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
acpi_enable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number);
device->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
device->wakeup.run_wake_count++;
device->wakeup.state.enabled = 1;
}
@ -446,6 +445,14 @@ static int acpi_button_remove(struct acpi_device *device, int type)
{
struct acpi_button *button = acpi_driver_data(device);
if (device->wakeup.flags.valid) {
acpi_disable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
device->wakeup.run_wake_count--;
device->wakeup.state.enabled = 0;
}
acpi_button_remove_fs(device);
input_unregister_device(button->input);
kfree(button);

Просмотреть файл

@ -307,7 +307,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
pr_debug(PREFIX "transaction start\n");
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
acpi_disable_gpe(NULL, ec->gpe);
/*
* It has to be disabled at the hardware level regardless of the
* GPE reference counting, so that it doesn't trigger.
*/
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
}
status = acpi_ec_transaction_unlocked(ec, t);
@ -316,8 +320,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
ec_check_sci_sync(ec, acpi_ec_read_status(ec));
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
msleep(1);
/* it is safe to enable GPE outside of transaction */
acpi_enable_gpe(NULL, ec->gpe);
/*
* It is safe to enable the GPE outside of the transaction. Use
* acpi_set_gpe() for that, since we used it to disable the GPE
* above.
*/
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
} else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
pr_info(PREFIX "GPE storm detected, "
"transactions will use polling mode\n");
@ -788,8 +796,8 @@ static int ec_install_handlers(struct acpi_ec *ec)
&acpi_ec_gpe_handler, ec);
if (ACPI_FAILURE(status))
return -ENODEV;
acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
acpi_enable_gpe(NULL, ec->gpe);
acpi_enable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
@ -806,6 +814,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
} else {
acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler);
acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
return -ENODEV;
}
}
@ -816,6 +825,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
static void ec_remove_handlers(struct acpi_ec *ec)
{
acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
pr_err(PREFIX "failed to remove space handler\n");
@ -1057,16 +1067,16 @@ error:
static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
{
struct acpi_ec *ec = acpi_driver_data(device);
/* Stop using GPE */
acpi_disable_gpe(NULL, ec->gpe);
/* Stop using the GPE, but keep it reference counted. */
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
return 0;
}
static int acpi_ec_resume(struct acpi_device *device)
{
struct acpi_ec *ec = acpi_driver_data(device);
/* Enable use of GPE back */
acpi_enable_gpe(NULL, ec->gpe);
/* Enable the GPE again, but don't reference count it once more. */
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
return 0;
}

Просмотреть файл

@ -36,8 +36,6 @@ static inline int acpi_debug_init(void) { return 0; }
int acpi_power_init(void);
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
int acpi_power_get_inferred_state(struct acpi_device *device);
int acpi_power_transition(struct acpi_device *device, int state);
extern int acpi_power_nocheck;

Просмотреть файл

@ -26,7 +26,9 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@ -38,7 +40,13 @@ static int acpi_pci_unbind(struct acpi_device *device)
struct pci_dev *dev;
dev = acpi_get_pci_dev(device->handle);
if (!dev || !dev->subordinate)
if (!dev)
goto out;
device_set_run_wake(&dev->dev, false);
pci_acpi_remove_pm_notifier(device);
if (!dev->subordinate)
goto out;
acpi_pci_irq_del_prt(dev->subordinate);
@ -62,6 +70,10 @@ static int acpi_pci_bind(struct acpi_device *device)
if (!dev)
return 0;
pci_acpi_add_pm_notifier(device, dev);
if (device->wakeup.flags.run_wake)
device_set_run_wake(&dev->dev, true);
/*
* Install the 'bind' function to facilitate callbacks for
* children of the P2P bridge.

Просмотреть файл

@ -30,6 +30,7 @@
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/acpi.h>
@ -528,6 +529,10 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
return 0;
end:
@ -549,6 +554,9 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
{
struct acpi_pci_root *root = acpi_driver_data(device);
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
kfree(root);
return 0;
}
@ -558,6 +566,7 @@ static int __init acpi_pci_root_init(void)
if (acpi_pci_disabled)
return 0;
pci_acpi_crs_quirks();
if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
return -ENODEV;

Просмотреть файл

@ -741,19 +741,40 @@ acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
return AE_OK;
}
static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
{
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
int psw_error;
struct acpi_device_id button_device_ids[] = {
{"PNP0C0D", 0},
{"PNP0C0C", 0},
{"PNP0C0E", 0},
{"", 0},
};
acpi_status status;
acpi_event_status event_status;
device->wakeup.run_wake_count = 0;
device->wakeup.flags.notifier_present = 0;
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids)) {
device->wakeup.flags.run_wake = 1;
device->wakeup.flags.always_enabled = 1;
return;
}
status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
ACPI_NOT_ISR, &event_status);
if (status == AE_OK)
device->wakeup.flags.run_wake =
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
}
static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
int psw_error;
/* _PRW */
status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
@ -773,6 +794,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
device->wakeup.flags.valid = 1;
device->wakeup.prepare_count = 0;
acpi_bus_set_run_wake_flags(device);
/* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
* The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
@ -784,10 +806,6 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"error in _DSW or _PSW evaluation\n"));
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1;
end:
if (ACPI_FAILURE(status))
device->flags.wake_capable = 0;

Просмотреть файл

@ -745,9 +745,18 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
return -ENODEV;
}
error = enable ?
acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
acpi_disable_wakeup_device_power(adev);
if (enable) {
error = acpi_enable_wakeup_device_power(adev,
acpi_target_sleep_state);
if (!error)
acpi_enable_gpe(adev->wakeup.gpe_device,
adev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE);
} else {
acpi_disable_gpe(adev->wakeup.gpe_device, adev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE);
error = acpi_disable_wakeup_device_power(adev);
}
if (!error)
dev_info(dev, "wake-up capability %s by ACPI\n",
enable ? "enabled" : "disabled");

Просмотреть файл

@ -387,10 +387,10 @@ static ssize_t counter_set(struct kobject *kobj,
if (index < num_gpes) {
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_gpe(handle, index);
result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_gpe(handle, index);
result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);

Просмотреть файл

@ -21,12 +21,12 @@
ACPI_MODULE_NAME("wakeup_devices")
/**
* acpi_enable_wakeup_device_prep - prepare wakeup devices
* @sleep_state: ACPI state
* Enable all wakup devices power if the devices' wakeup level
* is higher than requested sleep level
* acpi_enable_wakeup_device_prep - Prepare wake-up devices.
* @sleep_state: ACPI system sleep state.
*
* Enable all wake-up devices' power, unless the requested system sleep state is
* too deep.
*/
void acpi_enable_wakeup_device_prep(u8 sleep_state)
{
struct list_head *node, *next;
@ -36,9 +36,8 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
struct acpi_device,
wakeup_list);
if (!dev->wakeup.flags.valid ||
!dev->wakeup.state.enabled ||
(sleep_state > (u32) dev->wakeup.sleep_state))
if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
|| (sleep_state > (u32) dev->wakeup.sleep_state))
continue;
acpi_enable_wakeup_device_power(dev, sleep_state);
@ -46,9 +45,12 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
}
/**
* acpi_enable_wakeup_device - enable wakeup devices
* @sleep_state: ACPI state
* Enable all wakup devices's GPE
* acpi_enable_wakeup_device - Enable wake-up device GPEs.
* @sleep_state: ACPI system sleep state.
*
* Enable all wake-up devices' GPEs, with the assumption that
* acpi_disable_all_gpes() was executed before, so we don't need to disable any
* GPEs here.
*/
void acpi_enable_wakeup_device(u8 sleep_state)
{
@ -65,29 +67,22 @@ void acpi_enable_wakeup_device(u8 sleep_state)
if (!dev->wakeup.flags.valid)
continue;
/* If users want to disable run-wake GPE,
* we only disable it for wake and leave it for runtime
*/
if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
|| sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) {
/* set_gpe_type will disable GPE, leave it like that */
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
}
|| sleep_state > (u32) dev->wakeup.sleep_state)
continue;
}
if (!dev->wakeup.flags.run_wake)
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number);
/* The wake-up power should have been enabled already. */
acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_ENABLE);
}
}
/**
* acpi_disable_wakeup_device - disable devices' wakeup capability
* @sleep_state: ACPI state
* Disable all wakup devices's GPE and wakeup capability
* acpi_disable_wakeup_device - Disable devices' wakeup capability.
* @sleep_state: ACPI system sleep state.
*
* This function only affects devices with wakeup.state.enabled set, which means
* that it reverses the changes made by acpi_enable_wakeup_device_prep().
*/
void acpi_disable_wakeup_device(u8 sleep_state)
{
@ -97,30 +92,11 @@ void acpi_disable_wakeup_device(u8 sleep_state)
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid)
if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
|| (sleep_state > (u32) dev->wakeup.sleep_state))
continue;
if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
|| sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) {
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
/* Re-enable it, since set_gpe_type will disable it */
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number);
}
continue;
}
acpi_disable_wakeup_device_power(dev);
/* Never disable run-wake GPE */
if (!dev->wakeup.flags.run_wake) {
acpi_disable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number);
acpi_clear_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number, ACPI_NOT_ISR);
}
}
}
@ -134,13 +110,11 @@ int __init acpi_wakeup_device_init(void)
struct acpi_device,
wakeup_list);
/* In case user doesn't load button driver */
if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
if (!dev->wakeup.flags.always_enabled ||
dev->wakeup.state.enabled)
continue;
acpi_set_gpe_type(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE_RUN);
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number);
acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE);
dev->wakeup.state.enabled = 1;
}
mutex_unlock(&acpi_device_lock);

Просмотреть файл

@ -109,7 +109,7 @@ config HISAX_16_3
config HISAX_TELESPCI
bool "Teles PCI"
depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
help
This enables HiSax support for the Teles PCI.
See <file:Documentation/isdn/README.HiSax> on how to configure it.
@ -237,7 +237,7 @@ config HISAX_MIC
config HISAX_NETJET
bool "NETjet card"
depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
help
This enables HiSax support for the NetJet from Traverse
Technologies.
@ -248,7 +248,7 @@ config HISAX_NETJET
config HISAX_NETJET_U
bool "NETspider U card"
depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
help
This enables HiSax support for the Netspider U interface ISDN card
from Traverse Technologies.
@ -287,7 +287,7 @@ config HISAX_HSTSAPHIR
config HISAX_BKM_A4T
bool "Telekom A4T card"
depends on PCI && PCI_LEGACY
depends on PCI
help
This enables HiSax support for the Telekom A4T card.
@ -297,7 +297,7 @@ config HISAX_BKM_A4T
config HISAX_SCT_QUADRO
bool "Scitel Quadro card"
depends on PCI && PCI_LEGACY
depends on PCI
help
This enables HiSax support for the Scitel Quadro card.
@ -316,7 +316,7 @@ config HISAX_GAZEL
config HISAX_HFC_PCI
bool "HFC PCI-Bus cards"
depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
help
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
@ -325,7 +325,7 @@ config HISAX_HFC_PCI
config HISAX_W6692
bool "Winbond W6692 based cards"
depends on PCI && PCI_LEGACY
depends on PCI
help
This enables HiSax support for Winbond W6692 based PCI ISDN cards.
@ -341,7 +341,7 @@ config HISAX_HFC_SX
config HISAX_ENTERNOW_PCI
bool "Formula-n enter:now PCI card"
depends on HISAX_NETJET && PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
help
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
@ -412,7 +412,7 @@ config HISAX_HFC4S8S
config HISAX_FRITZ_PCIPNP
tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)"
depends on PCI && PCI_LEGACY && EXPERIMENTAL
depends on PCI && EXPERIMENTAL
help
This enables the driver for the AVM Fritz!Card PCI,
Fritz!Card PCI v2 and Fritz!Card PnP.

Просмотреть файл

@ -822,7 +822,7 @@ static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
#endif /* __ISAPNP__ */
#ifndef CONFIG_PCI_LEGACY
#ifndef CONFIG_PCI
static int __devinit avm_pci_setup(struct IsdnCardState *cs)
{
@ -835,7 +835,7 @@ static struct pci_dev *dev_avm __devinitdata = NULL;
static int __devinit avm_pci_setup(struct IsdnCardState *cs)
{
if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM,
if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM,
PCI_DEVICE_ID_AVM_A1, dev_avm))) {
if (pci_enable_device(dev_avm))
@ -864,7 +864,7 @@ static int __devinit avm_pci_setup(struct IsdnCardState *cs)
return (1);
}
#endif /* CONFIG_PCI_LEGACY */
#endif /* CONFIG_PCI */
int __devinit
setup_avm_pcipnp(struct IsdnCard *card)

Просмотреть файл

@ -340,7 +340,7 @@ setup_bkm_a4t(struct IsdnCard *card)
} else
return (0);
while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
while ((dev_a4t = hisax_find_pci_device(PCI_VENDOR_ID_ZORAN,
PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
if (!ret)

Просмотреть файл

@ -301,7 +301,7 @@ setup_sct_quadro(struct IsdnCard *card)
(sub_vendor_id != PCI_VENDOR_ID_BERKOM)))
return (0);
if (cs->subtyp == SCT_1) {
while ((dev_a8 = pci_find_device(PCI_VENDOR_ID_PLX,
while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_9050, dev_a8))) {
sub_vendor_id = dev_a8->subsystem_vendor;

Просмотреть файл

@ -1148,7 +1148,7 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card)
#endif /* ISAPNP */
#ifdef CONFIG_PCI_LEGACY
#ifdef CONFIG_PCI
static struct pci_dev *dev_diva __devinitdata = NULL;
static struct pci_dev *dev_diva_u __devinitdata = NULL;
static struct pci_dev *dev_diva201 __devinitdata = NULL;
@ -1159,21 +1159,21 @@ static int __devinit setup_diva_pci(struct IsdnCard *card)
struct IsdnCardState *cs = card->cs;
cs->subtyp = 0;
if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON,
if ((dev_diva = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) {
if (pci_enable_device(dev_diva))
return(0);
cs->subtyp = DIVA_PCI;
cs->irq = dev_diva->irq;
cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2);
} else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON,
} else if ((dev_diva_u = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) {
if (pci_enable_device(dev_diva_u))
return(0);
cs->subtyp = DIVA_PCI;
cs->irq = dev_diva_u->irq;
cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2);
} else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON,
} else if ((dev_diva201 = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) {
if (pci_enable_device(dev_diva201))
return(0);
@ -1183,7 +1183,7 @@ static int __devinit setup_diva_pci(struct IsdnCard *card)
(ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096);
cs->hw.diva.cfg_reg =
(ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096);
} else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON,
} else if ((dev_diva202 = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) {
if (pci_enable_device(dev_diva202))
return(0);
@ -1229,14 +1229,14 @@ static int __devinit setup_diva_pci(struct IsdnCard *card)
return (1); /* card found */
}
#else /* if !CONFIG_PCI_LEGACY */
#else /* if !CONFIG_PCI */
static int __devinit setup_diva_pci(struct IsdnCard *card)
{
return (-1); /* card not found; continue search */
}
#endif /* CONFIG_PCI_LEGACY */
#endif /* CONFIG_PCI */
int __devinit
setup_diva(struct IsdnCard *card)

Просмотреть файл

@ -1025,7 +1025,7 @@ setup_elsa_pcmcia(struct IsdnCard *card)
cs->irq);
}
#ifdef CONFIG_PCI_LEGACY
#ifdef CONFIG_PCI
static struct pci_dev *dev_qs1000 __devinitdata = NULL;
static struct pci_dev *dev_qs3000 __devinitdata = NULL;
@ -1035,7 +1035,7 @@ setup_elsa_pci(struct IsdnCard *card)
struct IsdnCardState *cs = card->cs;
cs->subtyp = 0;
if ((dev_qs1000 = pci_find_device(PCI_VENDOR_ID_ELSA,
if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) {
if (pci_enable_device(dev_qs1000))
return(0);
@ -1043,7 +1043,7 @@ setup_elsa_pci(struct IsdnCard *card)
cs->irq = dev_qs1000->irq;
cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1);
cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3);
} else if ((dev_qs3000 = pci_find_device(PCI_VENDOR_ID_ELSA,
} else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) {
if (pci_enable_device(dev_qs3000))
return(0);
@ -1093,7 +1093,7 @@ setup_elsa_pci(struct IsdnCard *card)
{
return (1);
}
#endif /* CONFIG_PCI_LEGACY */
#endif /* CONFIG_PCI */
static int __devinit
setup_elsa_common(struct IsdnCard *card)

Просмотреть файл

@ -406,7 +406,7 @@ setup_enternow_pci(struct IsdnCard *card)
for ( ;; )
{
if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
ret = en_pci_probe(dev_netjet, cs);
if (!ret)

Просмотреть файл

@ -531,7 +531,7 @@ setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
return (0);
}
#ifdef CONFIG_PCI_LEGACY
#ifdef CONFIG_PCI
static struct pci_dev *dev_tel __devinitdata = NULL;
static int __devinit
@ -546,7 +546,7 @@ setup_gazelpci(struct IsdnCardState *cs)
found = 0;
seekcard = PCI_DEVICE_ID_PLX_R685;
for (nbseek = 0; nbseek < 4; nbseek++) {
if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX,
if ((dev_tel = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
seekcard, dev_tel))) {
if (pci_enable_device(dev_tel))
return 1;
@ -620,7 +620,7 @@ setup_gazelpci(struct IsdnCardState *cs)
return (0);
}
#endif /* CONFIG_PCI_LEGACY */
#endif /* CONFIG_PCI */
int __devinit
setup_gazel(struct IsdnCard *card)
@ -640,7 +640,7 @@ setup_gazel(struct IsdnCard *card)
return (0);
} else {
#ifdef CONFIG_PCI_LEGACY
#ifdef CONFIG_PCI
if (setup_gazelpci(cs))
return (0);
#else

Просмотреть файл

@ -1658,7 +1658,7 @@ setup_hfcpci(struct IsdnCard *card)
i = 0;
while (id_list[i].vendor_id) {
tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id,
id_list[i].device_id,
dev_hfcpci);
i++;

Просмотреть файл

@ -1323,3 +1323,26 @@ void release_tei(struct IsdnCardState *cs);
char *HiSax_getrev(const char *revision);
int TeiNew(void);
void TeiFree(void);
#ifdef CONFIG_PCI
#include <linux/pci.h>
/* adaptation wrapper for old usage
* WARNING! This is unfit for use in a PCI hotplug environment,
* as the returned PCI device can disappear at any moment in time.
* Callers should be converted to use pci_get_device() instead.
*/
static inline struct pci_dev *hisax_find_pci_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
{
struct pci_dev *pdev;
pci_dev_get(from);
pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
pci_dev_put(pdev);
return pdev;
}
#endif

Просмотреть файл

@ -297,12 +297,12 @@ int __devinit setup_niccy(struct IsdnCard *card)
return 0;
}
} else {
#ifdef CONFIG_PCI_LEGACY
#ifdef CONFIG_PCI
static struct pci_dev *niccy_dev __devinitdata;
u_int pci_ioaddr;
cs->subtyp = 0;
if ((niccy_dev = pci_find_device(PCI_VENDOR_ID_SATSAGEM,
if ((niccy_dev = hisax_find_pci_device(PCI_VENDOR_ID_SATSAGEM,
PCI_DEVICE_ID_SATSAGEM_NICCY,
niccy_dev))) {
if (pci_enable_device(niccy_dev))
@ -354,7 +354,7 @@ int __devinit setup_niccy(struct IsdnCard *card)
printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n");
printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n");
return 0;
#endif /* CONFIG_PCI_LEGACY */
#endif /* CONFIG_PCI */
}
printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n",
(cs->subtyp == 1) ? "PnP" : "PCI",

Просмотреть файл

@ -276,7 +276,7 @@ setup_netjet_s(struct IsdnCard *card)
for ( ;; )
{
if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
ret = njs_pci_probe(dev_netjet, cs);
if (!ret)

Просмотреть файл

@ -240,7 +240,7 @@ setup_netjet_u(struct IsdnCard *card)
for ( ;; )
{
if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
ret = nju_pci_probe(dev_netjet, cs);
if (!ret)

Просмотреть файл

@ -598,7 +598,7 @@ setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
}
#endif /* __ISAPNP__ */
#ifdef CONFIG_PCI_LEGACY
#ifdef CONFIG_PCI
static struct pci_dev *dev_sedl __devinitdata = NULL;
static int __devinit
@ -607,7 +607,7 @@ setup_sedlbauer_pci(struct IsdnCard *card)
struct IsdnCardState *cs = card->cs;
u16 sub_vendor_id, sub_id;
if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET,
if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) {
if (pci_enable_device(dev_sedl))
return(0);
@ -673,7 +673,7 @@ setup_sedlbauer_pci(struct IsdnCard *card)
return (1);
}
#endif /* CONFIG_PCI_LEGACY */
#endif /* CONFIG_PCI */
int __devinit
setup_sedlbauer(struct IsdnCard *card)

Просмотреть файл

@ -300,7 +300,7 @@ setup_telespci(struct IsdnCard *card)
if (cs->typ != ISDN_CTYPE_TELESPCI)
return (0);
if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) {
if ((dev_tel = hisax_find_pci_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) {
if (pci_enable_device(dev_tel))
return(0);
cs->irq = dev_tel->irq;

Просмотреть файл

@ -1007,7 +1007,7 @@ setup_w6692(struct IsdnCard *card)
return (0);
while (id_list[id_idx].vendor_id) {
dev_w6692 = pci_find_device(id_list[id_idx].vendor_id,
dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id,
id_list[id_idx].device_id,
dev_w6692);
if (dev_w6692) {

Просмотреть файл

@ -21,17 +21,6 @@ config PCI_MSI
If you don't know what to do here, say N.
config PCI_LEGACY
bool "Enable deprecated pci_find_* API"
depends on PCI
default y
help
Say Y here if you want to include support for the deprecated
pci_find_device() API. Most drivers have been converted over
to using the proper hotplug APIs, so this option serves to
include/exclude only a few drivers that are still using this
API.
config PCI_DEBUG
bool "PCI Debugging"
depends on PCI && DEBUG_KERNEL

Просмотреть файл

@ -2,14 +2,13 @@
# Makefile for the PCI bus specific drivers.
#
obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
obj-y += access.o bus.o probe.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
obj-$(CONFIG_PCI_LEGACY) += legacy.o
CFLAGS_legacy.o += -Wno-deprecated-declarations
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/

Просмотреть файл

@ -17,6 +17,52 @@
#include "pci.h"
void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
unsigned int flags)
{
struct pci_bus_resource *bus_res;
bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
if (!bus_res) {
dev_err(&bus->dev, "can't add %pR resource\n", res);
return;
}
bus_res->res = res;
bus_res->flags = flags;
list_add_tail(&bus_res->list, &bus->resources);
}
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
{
struct pci_bus_resource *bus_res;
if (n < PCI_BRIDGE_RESOURCE_NUM)
return bus->resource[n];
n -= PCI_BRIDGE_RESOURCE_NUM;
list_for_each_entry(bus_res, &bus->resources, list) {
if (n-- == 0)
return bus_res->res;
}
return NULL;
}
EXPORT_SYMBOL_GPL(pci_bus_resource_n);
void pci_bus_remove_resources(struct pci_bus *bus)
{
struct pci_bus_resource *bus_res, *tmp;
int i;
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
bus->resource[i] = 0;
list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
list_del(&bus_res->list);
kfree(bus_res);
}
}
/**
* pci_bus_alloc_resource - allocate a resource from a parent bus
* @bus: PCI bus
@ -36,11 +82,14 @@ int
pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned int type_mask,
void (*alignf)(void *, struct resource *, resource_size_t,
resource_size_t),
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data)
{
int i, ret = -ENOMEM;
struct resource *r;
resource_size_t max = -1;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
@ -49,8 +98,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
if (!(res->flags & IORESOURCE_MEM_64))
max = PCIBIOS_MAX_MEM_32;
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;

Просмотреть файл

@ -332,8 +332,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->attention_status = 0;
slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot);
slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
acpiphp_slot->slot = slot;
snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);

Просмотреть файл

@ -162,6 +162,7 @@ static int __init cpcihp_generic_init(void)
dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0));
if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
err("Invalid bridge device %s", bridge);
pci_dev_put(dev);
return -EINVAL;
}
bus = dev->subordinate;

Просмотреть файл

@ -310,8 +310,6 @@ struct controller {
u8 first_slot;
u8 add_support;
u8 push_flag;
enum pci_bus_speed speed;
enum pci_bus_speed speed_capability;
u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */
u8 slot_switch_type; /* 0 = no switch, 1 = switch present */
u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */

Просмотреть файл

@ -583,30 +583,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = ctrl->speed_capability;
return 0;
}
static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = ctrl->speed;
return 0;
}
static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = process_SI,
@ -616,8 +592,6 @@ static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status,
.get_max_bus_speed = get_max_bus_speed,
.get_cur_bus_speed = get_cur_bus_speed,
};
#define SLOT_NAME_SIZE 10
@ -629,6 +603,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *hotplug_slot_info;
struct pci_bus *bus = ctrl->pci_bus;
u8 number_of_slots;
u8 slot_device;
u8 slot_number;
@ -694,7 +669,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
if (is_slot66mhz(slot))
slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
if (ctrl->speed == PCI_SPEED_66MHz)
if (bus->cur_bus_speed == PCI_SPEED_66MHz)
slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
ctrl_slot =
@ -844,6 +819,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 rc;
struct controller *ctrl;
struct pci_func *func;
struct pci_bus *bus;
int err;
err = pci_enable_device(pdev);
@ -852,6 +828,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_name(pdev), err);
return err;
}
bus = pdev->subordinate;
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
* discovery
@ -929,22 +906,22 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_byte(pdev, 0x41, &bus_cap);
if (bus_cap & 0x80) {
dbg("bus max supports 133MHz PCI-X\n");
ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
bus->max_bus_speed = PCI_SPEED_133MHz_PCIX;
break;
}
if (bus_cap & 0x40) {
dbg("bus max supports 100MHz PCI-X\n");
ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
break;
}
if (bus_cap & 20) {
dbg("bus max supports 66MHz PCI-X\n");
ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
break;
}
if (bus_cap & 10) {
dbg("bus max supports 66MHz PCI\n");
ctrl->speed_capability = PCI_SPEED_66MHz;
bus->max_bus_speed = PCI_SPEED_66MHz;
break;
}
@ -955,7 +932,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case PCI_SUB_HPC_ID:
/* Original 6500/7000 implementation */
ctrl->slot_switch_type = 1;
ctrl->speed_capability = PCI_SPEED_33MHz;
bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 0;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@ -966,7 +943,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* First Pushbutton implementation */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
ctrl->speed_capability = PCI_SPEED_33MHz;
bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@ -976,7 +953,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case PCI_SUB_HPC_ID_INTC:
/* Third party (6500/7000) */
ctrl->slot_switch_type = 1;
ctrl->speed_capability = PCI_SPEED_33MHz;
bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 0;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@ -987,7 +964,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* First 66 Mhz implementation */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
ctrl->speed_capability = PCI_SPEED_66MHz;
bus->max_bus_speed = PCI_SPEED_66MHz;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@ -998,7 +975,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* First PCI-X implementation, 100MHz */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@ -1015,9 +992,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case PCI_VENDOR_ID_INTEL:
/* Check for speed capability (0=33, 1=66) */
if (subsystem_deviceid & 0x0001)
ctrl->speed_capability = PCI_SPEED_66MHz;
bus->max_bus_speed = PCI_SPEED_66MHz;
else
ctrl->speed_capability = PCI_SPEED_33MHz;
bus->max_bus_speed = PCI_SPEED_33MHz;
/* Check for push button */
if (subsystem_deviceid & 0x0002)
@ -1079,7 +1056,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->bus->number);
dbg("Hotplug controller capabilities:\n");
dbg(" speed_capability %d\n", ctrl->speed_capability);
dbg(" speed_capability %d\n", bus->max_bus_speed);
dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ?
"switch present" : "no switch");
dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ?
@ -1142,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Check for 66Mhz operation */
ctrl->speed = get_controller_speed(ctrl);
bus->cur_bus_speed = get_controller_speed(ctrl);
/********************************************************

Просмотреть файл

@ -1130,12 +1130,13 @@ static int is_bridge(struct pci_func * func)
static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot)
{
struct slot *slot;
struct pci_bus *bus = ctrl->pci_bus;
u8 reg;
u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
u16 reg16;
u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
if (ctrl->speed == adapter_speed)
if (bus->cur_bus_speed == adapter_speed)
return 0;
/* We don't allow freq/mode changes if we find another adapter running
@ -1152,7 +1153,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
* lower speed/mode, we allow the new adapter to function at
* this rate if supported
*/
if (ctrl->speed < adapter_speed)
if (bus->cur_bus_speed < adapter_speed)
return 0;
return 1;
@ -1161,20 +1162,20 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
/* If the controller doesn't support freq/mode changes and the
* controller is running at a higher mode, we bail
*/
if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability))
return 1;
/* But we allow the adapter to run at a lower rate if possible */
if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability))
return 0;
/* We try to set the max speed supported by both the adapter and
* controller
*/
if (ctrl->speed_capability < adapter_speed) {
if (ctrl->speed == ctrl->speed_capability)
if (bus->max_bus_speed < adapter_speed) {
if (bus->cur_bus_speed == bus->max_bus_speed)
return 0;
adapter_speed = ctrl->speed_capability;
adapter_speed = bus->max_bus_speed;
}
writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
@ -1229,8 +1230,8 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
/* Only if mode change...*/
if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
@ -1243,7 +1244,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
ctrl->speed = adapter_speed;
bus->cur_bus_speed = adapter_speed;
slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
info("Successfully changed frequency/mode for adapter in slot %d\n",
@ -1269,6 +1270,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
*/
static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
{
struct pci_bus *bus = ctrl->pci_bus;
u8 hp_slot;
u8 temp_byte;
u8 adapter_speed;
@ -1309,7 +1311,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
wait_for_ctrl_irq (ctrl);
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (bus->cur_bus_speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
@ -1426,6 +1428,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
u32 temp_register = 0xFFFFFFFF;
u32 rc = 0;
struct pci_func *new_slot = NULL;
struct pci_bus *bus = ctrl->pci_bus;
struct slot *p_slot;
struct resource_lists res_lists;
@ -1456,7 +1459,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
wait_for_ctrl_irq (ctrl);
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (bus->cur_bus_speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;

Просмотреть файл

@ -395,89 +395,40 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value)
return rc;
}
static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
static int get_max_bus_speed(struct slot *slot)
{
int rc = -ENODEV;
struct slot *pslot;
int rc;
u8 mode = 0;
enum pci_bus_speed speed;
struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__,
hotplug_slot, value);
debug("%s - Entry slot[%p]\n", __func__, slot);
ibmphp_lock_operations();
mode = slot->supported_bus_mode;
speed = slot->supported_speed;
ibmphp_unlock_operations();
if (hotplug_slot) {
pslot = hotplug_slot->private;
if (pslot) {
rc = 0;
mode = pslot->supported_bus_mode;
*value = pslot->supported_speed;
switch (*value) {
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
if (mode == BUS_MODE_PCIX)
*value += 0x01;
break;
case BUS_SPEED_100:
case BUS_SPEED_133:
*value = pslot->supported_speed + 0x01;
break;
default:
/* Note (will need to change): there would be soon 256, 512 also */
rc = -ENODEV;
}
}
switch (speed) {
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
if (mode == BUS_MODE_PCIX)
speed += 0x01;
break;
case BUS_SPEED_100:
case BUS_SPEED_133:
speed += 0x01;
break;
default:
/* Note (will need to change): there would be soon 256, 512 also */
rc = -ENODEV;
}
ibmphp_unlock_operations();
debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value);
return rc;
}
if (!rc)
bus->max_bus_speed = speed;
static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{
int rc = -ENODEV;
struct slot *pslot;
u8 mode = 0;
debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__,
hotplug_slot, value);
ibmphp_lock_operations();
if (hotplug_slot) {
pslot = hotplug_slot->private;
if (pslot) {
rc = get_cur_bus_info(&pslot);
if (!rc) {
mode = pslot->bus_on->current_bus_mode;
*value = pslot->bus_on->current_speed;
switch (*value) {
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
if (mode == BUS_MODE_PCIX)
*value += 0x01;
else if (mode == BUS_MODE_PCI)
;
else
*value = PCI_SPEED_UNKNOWN;
break;
case BUS_SPEED_100:
case BUS_SPEED_133:
*value += 0x01;
break;
default:
/* Note of change: there would also be 256, 512 soon */
rc = -ENODEV;
}
}
}
}
ibmphp_unlock_operations();
debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value);
debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed);
return rc;
}
@ -572,6 +523,7 @@ static int __init init_ops(void)
if (slot_cur->bus_on->current_speed == 0xFF)
if (get_cur_bus_info(&slot_cur))
return -1;
get_max_bus_speed(slot_cur);
if (slot_cur->ctrl->options == 0xFF)
if (get_hpc_options(slot_cur, &slot_cur->ctrl->options))
@ -655,6 +607,7 @@ static int validate(struct slot *slot_cur, int opn)
int ibmphp_update_slot_info(struct slot *slot_cur)
{
struct hotplug_slot_info *info;
struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus;
int rc;
u8 bus_speed;
u8 mode;
@ -700,8 +653,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
bus_speed = PCI_SPEED_UNKNOWN;
}
info->cur_bus_speed = bus_speed;
info->max_bus_speed = slot_cur->hotplug_slot->info->max_bus_speed;
bus->cur_bus_speed = bus_speed;
// To do: bus_names
rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
@ -1326,8 +1278,6 @@ struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_present,
.get_max_bus_speed = get_max_bus_speed,
.get_cur_bus_speed = get_cur_bus_speed,
/* .get_max_adapter_speed = get_max_adapter_speed,
.get_bus_name_status = get_bus_name,
*/

Просмотреть файл

@ -245,7 +245,7 @@ static void __init print_ebda_hpc (void)
int __init ibmphp_access_ebda (void)
{
u8 format, num_ctlrs, rio_complete, hs_complete;
u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz;
u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base;
int rc = 0;
@ -260,7 +260,16 @@ int __init ibmphp_access_ebda (void)
iounmap (io_mem);
debug ("returned ebda segment: %x\n", ebda_seg);
io_mem = ioremap(ebda_seg<<4, 1024);
io_mem = ioremap(ebda_seg<<4, 1);
if (!io_mem)
return -ENOMEM;
ebda_sz = readb(io_mem);
iounmap(io_mem);
debug("ebda size: %d(KiB)\n", ebda_sz);
if (ebda_sz == 0)
return -ENOMEM;
io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024));
if (!io_mem )
return -ENOMEM;
next_offset = 0x180;

Просмотреть файл

@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/kthread.h>
#include "ibmphp.h"

Просмотреть файл

@ -64,32 +64,6 @@ static int debug;
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
/* these strings match up with the values in pci_bus_speed */
static char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
"66 MHz PCI-X", /* 0x02 */
"100 MHz PCI-X", /* 0x03 */
"133 MHz PCI-X", /* 0x04 */
NULL, /* 0x05 */
NULL, /* 0x06 */
NULL, /* 0x07 */
NULL, /* 0x08 */
"66 MHz PCI-X 266", /* 0x09 */
"100 MHz PCI-X 266", /* 0x0a */
"133 MHz PCI-X 266", /* 0x0b */
NULL, /* 0x0c */
NULL, /* 0x0d */
NULL, /* 0x0e */
NULL, /* 0x0f */
NULL, /* 0x10 */
"66 MHz PCI-X 533", /* 0x11 */
"100 MHz PCI-X 533", /* 0x12 */
"133 MHz PCI-X 533", /* 0x13 */
"2.5 GT/s PCIe", /* 0x14 */
"5.0 GT/s PCIe", /* 0x15 */
};
#ifdef CONFIG_HOTPLUG_PCI_CPCI
extern int cpci_hotplug_init(int debug);
extern void cpci_hotplug_exit(void);
@ -118,8 +92,6 @@ GET_STATUS(power_status, u8)
GET_STATUS(attention_status, u8)
GET_STATUS(latch_status, u8)
GET_STATUS(adapter_status, u8)
GET_STATUS(max_bus_speed, enum pci_bus_speed)
GET_STATUS(cur_bus_speed, enum pci_bus_speed)
static ssize_t power_read_file(struct pci_slot *slot, char *buf)
{
@ -263,60 +235,6 @@ static struct pci_slot_attribute hotplug_slot_attr_presence = {
.show = presence_read_file,
};
static char *unknown_speed = "Unknown bus speed";
static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf)
{
char *speed_string;
int retval;
enum pci_bus_speed value;
retval = get_max_bus_speed(slot->hotplug, &value);
if (retval)
goto exit;
if (value == PCI_SPEED_UNKNOWN)
speed_string = unknown_speed;
else
speed_string = pci_bus_speed_strings[value];
retval = sprintf (buf, "%s\n", speed_string);
exit:
return retval;
}
static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = {
.attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO},
.show = max_bus_speed_read_file,
};
static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf)
{
char *speed_string;
int retval;
enum pci_bus_speed value;
retval = get_cur_bus_speed(slot->hotplug, &value);
if (retval)
goto exit;
if (value == PCI_SPEED_UNKNOWN)
speed_string = unknown_speed;
else
speed_string = pci_bus_speed_strings[value];
retval = sprintf (buf, "%s\n", speed_string);
exit:
return retval;
}
static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = {
.attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO},
.show = cur_bus_speed_read_file,
};
static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count)
{
@ -391,26 +309,6 @@ static bool has_adapter_file(struct pci_slot *pci_slot)
return false;
}
static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->get_max_bus_speed)
return true;
return false;
}
static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->get_cur_bus_speed)
return true;
return false;
}
static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
@ -456,20 +354,6 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit_adapter;
}
if (has_max_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_max_bus_speed.attr);
if (retval)
goto exit_max_speed;
}
if (has_cur_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_cur_bus_speed.attr);
if (retval)
goto exit_cur_speed;
}
if (has_test_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_test.attr);
@ -480,14 +364,6 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit;
exit_test:
if (has_cur_bus_speed_file(slot))
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_cur_bus_speed.attr);
exit_cur_speed:
if (has_max_bus_speed_file(slot))
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_max_bus_speed.attr);
exit_max_speed:
if (has_adapter_file(slot))
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
@ -523,14 +399,6 @@ static void fs_remove_slot(struct pci_slot *slot)
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
if (has_max_bus_speed_file(slot))
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_max_bus_speed.attr);
if (has_cur_bus_speed_file(slot))
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_cur_bus_speed.attr);
if (has_test_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);

Просмотреть файл

@ -69,8 +69,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
/**
* release_slot - free up the memory used by a slot
@ -113,8 +111,6 @@ static int init_slot(struct controller *ctrl)
ops->disable_slot = disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
ops->get_max_bus_speed = get_max_bus_speed;
ops->get_cur_bus_speed = get_cur_bus_speed;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
@ -227,27 +223,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return pciehp_get_adapter_status(slot, value);
}
static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
enum pci_bus_speed *value)
{
struct slot *slot = hotplug_slot->private;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
return pciehp_get_max_link_speed(slot, value);
}
static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{
struct slot *slot = hotplug_slot->private;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
return pciehp_get_cur_link_speed(slot, value);
}
static int pciehp_probe(struct pcie_device *dev)
{
int rc;

Просмотреть файл

@ -341,6 +341,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
p_slot->state = POWERON_STATE;
break;
default:
kfree(info);
goto out;
}
queue_work(pciehp_wq, &info->work);

Просмотреть файл

@ -492,6 +492,7 @@ int pciehp_power_on_slot(struct slot * slot)
u16 slot_cmd;
u16 cmd_mask;
u16 slot_status;
u16 lnk_status;
int retval = 0;
/* Clear sticky power-fault bit from previous power failures */
@ -523,6 +524,14 @@ int pciehp_power_on_slot(struct slot * slot)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
__func__);
return retval;
}
pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
return retval;
}
@ -610,37 +619,6 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_speed lnk_speed;
u32 lnk_cap;
int retval = 0;
retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
return retval;
}
switch (lnk_cap & 0x000F) {
case 1:
lnk_speed = PCIE_2_5GB;
break;
case 2:
lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
break;
}
*value = lnk_speed;
ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed);
return retval;
}
int pciehp_get_max_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
@ -691,38 +669,6 @@ int pciehp_get_max_lnk_width(struct slot *slot,
return retval;
}
int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
int retval = 0;
u16 lnk_status;
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
__func__);
return retval;
}
switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
case 1:
lnk_speed = PCIE_2_5GB;
break;
case 2:
lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
break;
}
*value = lnk_speed;
ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed);
return retval;
}
int pciehp_get_cur_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{

Просмотреть файл

@ -53,17 +53,15 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev)
busnr = pci_scan_bridge(parent, dev, busnr, pass);
if (!dev->subordinate)
return -1;
pci_bus_size_bridges(dev->subordinate);
pci_bus_assign_resources(parent);
pci_enable_bridges(parent);
pci_bus_add_devices(parent);
return 0;
}
int pciehp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
struct pci_dev *bridge = p_slot->ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
int num, fn;
struct controller *ctrl = p_slot->ctrl;
@ -96,12 +94,25 @@ int pciehp_configure_device(struct slot *p_slot)
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
pciehp_add_bridge(dev);
}
pci_dev_put(dev);
}
pci_assign_unassigned_bridge_resources(bridge);
for (fn = 0; fn < 8; fn++) {
dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
if (!dev)
continue;
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
pci_dev_put(dev);
continue;
}
pci_configure_slot(dev);
pci_dev_put(dev);
}
pci_bus_assign_resources(parent);
pci_bus_add_devices(parent);
return 0;
}

Просмотреть файл

@ -130,10 +130,9 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value)
return 0;
}
static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
{
struct slot *slot = (struct slot *)hotplug_slot->private;
enum pci_bus_speed speed;
switch (slot->type) {
case 1:
case 2:
@ -141,30 +140,30 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
case 4:
case 5:
case 6:
*value = PCI_SPEED_33MHz; /* speed for case 1-6 */
speed = PCI_SPEED_33MHz; /* speed for case 1-6 */
break;
case 7:
case 8:
*value = PCI_SPEED_66MHz;
speed = PCI_SPEED_66MHz;
break;
case 11:
case 14:
*value = PCI_SPEED_66MHz_PCIX;
speed = PCI_SPEED_66MHz_PCIX;
break;
case 12:
case 15:
*value = PCI_SPEED_100MHz_PCIX;
speed = PCI_SPEED_100MHz_PCIX;
break;
case 13:
case 16:
*value = PCI_SPEED_133MHz_PCIX;
speed = PCI_SPEED_133MHz_PCIX;
break;
default:
*value = PCI_SPEED_UNKNOWN;
speed = PCI_SPEED_UNKNOWN;
break;
}
return 0;
return speed;
}
static int get_children_props(struct device_node *dn, const int **drc_indexes,
@ -408,6 +407,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
slot->state = NOT_VALID;
return -EINVAL;
}
slot->bus->max_bus_speed = get_max_bus_speed(slot);
return 0;
}
@ -429,7 +430,6 @@ struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
.get_power_status = get_power_status,
.get_attention_status = get_attention_status,
.get_adapter_status = get_adapter_status,
.get_max_bus_speed = get_max_bus_speed,
};
module_init(rpaphp_init);

Просмотреть файл

@ -333,8 +333,6 @@ struct hpc_ops {
int (*set_attention_status)(struct slot *slot, u8 status);
int (*get_latch_status)(struct slot *slot, u8 *status);
int (*get_adapter_status)(struct slot *slot, u8 *status);
int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode);
int (*get_prog_int)(struct slot *slot, u8 *prog_int);

Просмотреть файл

@ -65,8 +65,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
@ -76,8 +74,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status,
.get_max_bus_speed = get_max_bus_speed,
.get_cur_bus_speed = get_cur_bus_speed,
};
/**
@ -279,37 +275,6 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
enum pci_bus_speed *value)
{
struct slot *slot = get_slot(hotplug_slot);
int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
retval = slot->hpc_ops->get_max_bus_speed(slot, value);
if (retval < 0)
*value = PCI_SPEED_UNKNOWN;
return 0;
}
static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{
struct slot *slot = get_slot(hotplug_slot);
int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
if (retval < 0)
*value = PCI_SPEED_UNKNOWN;
return 0;
}
static int is_shpc_capable(struct pci_dev *dev)
{
if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==

Просмотреть файл

@ -285,17 +285,8 @@ static int board_added(struct slot *p_slot)
return WRONG_BUS_FREQUENCY;
}
rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp);
if (rc) {
ctrl_err(ctrl, "Can't get bus operation speed\n");
return WRONG_BUS_FREQUENCY;
}
rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp);
if (rc) {
ctrl_err(ctrl, "Can't get max bus operation speed\n");
msp = bsp;
}
bsp = ctrl->pci_dev->bus->cur_bus_speed;
msp = ctrl->pci_dev->bus->max_bus_speed;
/* Check if there are other slots or devices on the same bus */
if (!list_empty(&ctrl->pci_dev->subordinate->devices))
@ -462,6 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
p_slot->state = POWERON_STATE;
break;
default:
kfree(info);
goto out;
}
queue_work(shpchp_wq, &info->work);

Просмотреть файл

@ -660,6 +660,75 @@ static int hpc_slot_disable(struct slot * slot)
return retval;
}
static int shpc_get_cur_bus_speed(struct controller *ctrl)
{
int retval = 0;
struct pci_bus *bus = ctrl->pci_dev->subordinate;
enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
if ((pi == 1) && (speed_mode > 4)) {
retval = -ENODEV;
goto out;
}
switch (speed_mode) {
case 0x0:
bus_speed = PCI_SPEED_33MHz;
break;
case 0x1:
bus_speed = PCI_SPEED_66MHz;
break;
case 0x2:
bus_speed = PCI_SPEED_66MHz_PCIX;
break;
case 0x3:
bus_speed = PCI_SPEED_100MHz_PCIX;
break;
case 0x4:
bus_speed = PCI_SPEED_133MHz_PCIX;
break;
case 0x5:
bus_speed = PCI_SPEED_66MHz_PCIX_ECC;
break;
case 0x6:
bus_speed = PCI_SPEED_100MHz_PCIX_ECC;
break;
case 0x7:
bus_speed = PCI_SPEED_133MHz_PCIX_ECC;
break;
case 0x8:
bus_speed = PCI_SPEED_66MHz_PCIX_266;
break;
case 0x9:
bus_speed = PCI_SPEED_100MHz_PCIX_266;
break;
case 0xa:
bus_speed = PCI_SPEED_133MHz_PCIX_266;
break;
case 0xb:
bus_speed = PCI_SPEED_66MHz_PCIX_533;
break;
case 0xc:
bus_speed = PCI_SPEED_100MHz_PCIX_533;
break;
case 0xd:
bus_speed = PCI_SPEED_133MHz_PCIX_533;
break;
default:
retval = -ENODEV;
break;
}
out:
bus->cur_bus_speed = bus_speed;
dbg("Current bus speed = %d\n", bus_speed);
return retval;
}
static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
{
int retval;
@ -720,6 +789,8 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
retval = shpc_write_cmd(slot, 0, cmd);
if (retval)
ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
else
shpc_get_cur_bus_speed(ctrl);
return retval;
}
@ -803,10 +874,10 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
static int shpc_get_max_bus_speed(struct controller *ctrl)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
struct pci_bus *bus = ctrl->pci_dev->subordinate;
enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1);
@ -842,79 +913,12 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
retval = -ENODEV;
}
*value = bus_speed;
bus->max_bus_speed = bus_speed;
ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed);
return retval;
}
static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
if ((pi == 1) && (speed_mode > 4)) {
*value = PCI_SPEED_UNKNOWN;
return -ENODEV;
}
switch (speed_mode) {
case 0x0:
*value = PCI_SPEED_33MHz;
break;
case 0x1:
*value = PCI_SPEED_66MHz;
break;
case 0x2:
*value = PCI_SPEED_66MHz_PCIX;
break;
case 0x3:
*value = PCI_SPEED_100MHz_PCIX;
break;
case 0x4:
*value = PCI_SPEED_133MHz_PCIX;
break;
case 0x5:
*value = PCI_SPEED_66MHz_PCIX_ECC;
break;
case 0x6:
*value = PCI_SPEED_100MHz_PCIX_ECC;
break;
case 0x7:
*value = PCI_SPEED_133MHz_PCIX_ECC;
break;
case 0x8:
*value = PCI_SPEED_66MHz_PCIX_266;
break;
case 0x9:
*value = PCI_SPEED_100MHz_PCIX_266;
break;
case 0xa:
*value = PCI_SPEED_133MHz_PCIX_266;
break;
case 0xb:
*value = PCI_SPEED_66MHz_PCIX_533;
break;
case 0xc:
*value = PCI_SPEED_100MHz_PCIX_533;
break;
case 0xd:
*value = PCI_SPEED_133MHz_PCIX_533;
break;
default:
*value = PCI_SPEED_UNKNOWN;
retval = -ENODEV;
break;
}
ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
return retval;
}
static struct hpc_ops shpchp_hpc_ops = {
.power_on_slot = hpc_power_on_slot,
.slot_enable = hpc_slot_enable,
@ -926,8 +930,6 @@ static struct hpc_ops shpchp_hpc_ops = {
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
.get_max_bus_speed = hpc_get_max_bus_speed,
.get_cur_bus_speed = hpc_get_cur_bus_speed,
.get_adapter_speed = hpc_get_adapter_speed,
.get_mode1_ECC_cap = hpc_get_mode1_ECC_cap,
.get_prog_int = hpc_get_prog_int,
@ -1086,6 +1088,9 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
}
ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq);
shpc_get_max_bus_speed(ctrl);
shpc_get_cur_bus_speed(ctrl);
/*
* If this is the first controller to be initialized,
* initialize the shpchpd work queue

Просмотреть файл

@ -47,8 +47,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
bus = pdev->subordinate;
out += sprintf(buf, "Free resources: memory\n");
for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) {
res = bus->resource[index];
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
!(res->flags & IORESOURCE_PREFETCH)) {
out += sprintf(out, "start = %8.8llx, "
@ -58,8 +57,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
}
}
out += sprintf(out, "Free resources: prefetchable memory\n");
for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) {
res = bus->resource[index];
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
(res->flags & IORESOURCE_PREFETCH)) {
out += sprintf(out, "start = %8.8llx, "
@ -69,8 +67,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
}
}
out += sprintf(out, "Free resources: IO\n");
for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) {
res = bus->resource[index];
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_IO)) {
out += sprintf(out, "start = %8.8llx, "
"length = %8.8llx\n",

Просмотреть файл

@ -1,34 +0,0 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include "pci.h"
/**
* pci_find_device - begin or continue searching for a PCI device by vendor/device id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
* @from: Previous PCI device found in search, or %NULL for new search.
*
* Iterates through the list of known PCI devices. If a PCI device is found
* with a matching @vendor and @device, a pointer to its device structure is
* returned. Otherwise, %NULL is returned.
* A new search is initiated by passing %NULL as the @from argument.
* Otherwise if @from is not %NULL, searches continue from next device
* on the global list.
*
* NOTE: Do not use this function any more; use pci_get_device() instead, as
* the PCI device returned by this function can disappear at any moment in
* time.
*/
struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
struct pci_dev *from)
{
struct pci_dev *pdev;
pci_dev_get(from);
pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
pci_dev_put(pdev);
return pdev;
}
EXPORT_SYMBOL(pci_find_device);

Просмотреть файл

@ -16,8 +16,144 @@
#include <acpi/acpi_bus.h>
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include "pci.h"
static DEFINE_MUTEX(pci_acpi_pm_notify_mtx);
/**
* pci_acpi_wake_bus - Wake-up notification handler for root buses.
* @handle: ACPI handle of a device the notification is for.
* @event: Type of the signaled event.
* @context: PCI root bus to wake up devices on.
*/
static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context)
{
struct pci_bus *pci_bus = context;
if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus)
pci_pme_wakeup_bus(pci_bus);
}
/**
* pci_acpi_wake_dev - Wake-up notification handler for PCI devices.
* @handle: ACPI handle of a device the notification is for.
* @event: Type of the signaled event.
* @context: PCI device object to wake up.
*/
static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
{
struct pci_dev *pci_dev = context;
if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
pci_check_pme_status(pci_dev);
pm_runtime_resume(&pci_dev->dev);
if (pci_dev->subordinate)
pci_pme_wakeup_bus(pci_dev->subordinate);
}
}
/**
* add_pm_notifier - Register PM notifier for given ACPI device.
* @dev: ACPI device to add the notifier for.
* @context: PCI device or bus to check for PME status if an event is signaled.
*
* NOTE: @dev need not be a run-wake or wake-up device to be a valid source of
* PM wake-up events. For example, wake-up events may be generated for bridges
* if one of the devices below the bridge is signaling PME, even if the bridge
* itself doesn't have a wake-up GPE associated with it.
*/
static acpi_status add_pm_notifier(struct acpi_device *dev,
acpi_notify_handler handler,
void *context)
{
acpi_status status = AE_ALREADY_EXISTS;
mutex_lock(&pci_acpi_pm_notify_mtx);
if (dev->wakeup.flags.notifier_present)
goto out;
status = acpi_install_notify_handler(dev->handle,
ACPI_SYSTEM_NOTIFY,
handler, context);
if (ACPI_FAILURE(status))
goto out;
dev->wakeup.flags.notifier_present = true;
out:
mutex_unlock(&pci_acpi_pm_notify_mtx);
return status;
}
/**
* remove_pm_notifier - Unregister PM notifier from given ACPI device.
* @dev: ACPI device to remove the notifier from.
*/
static acpi_status remove_pm_notifier(struct acpi_device *dev,
acpi_notify_handler handler)
{
acpi_status status = AE_BAD_PARAMETER;
mutex_lock(&pci_acpi_pm_notify_mtx);
if (!dev->wakeup.flags.notifier_present)
goto out;
status = acpi_remove_notify_handler(dev->handle,
ACPI_SYSTEM_NOTIFY,
handler);
if (ACPI_FAILURE(status))
goto out;
dev->wakeup.flags.notifier_present = false;
out:
mutex_unlock(&pci_acpi_pm_notify_mtx);
return status;
}
/**
* pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
* @dev: ACPI device to add the notifier for.
* @pci_bus: PCI bus to walk checking for PME status if an event is signaled.
*/
acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
struct pci_bus *pci_bus)
{
return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
}
/**
* pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier.
* @dev: ACPI device to remove the notifier from.
*/
acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
{
return remove_pm_notifier(dev, pci_acpi_wake_bus);
}
/**
* pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
* @dev: ACPI device to add the notifier for.
* @pci_dev: PCI device to check for the PME status if an event is signaled.
*/
acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
struct pci_dev *pci_dev)
{
return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
}
/**
* pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier.
* @dev: ACPI device to remove the notifier from.
*/
acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
{
return remove_pm_notifier(dev, pci_acpi_wake_dev);
}
/*
* _SxD returns the D-state with the highest power
* (lowest D-state number) supported in the S-state "x".
@ -131,12 +267,87 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
return 0;
}
/**
* acpi_dev_run_wake - Enable/disable wake-up for given device.
* @phys_dev: Device to enable/disable the platform to wake-up the system for.
* @enable: Whether enable or disable the wake-up functionality.
*
* Find the ACPI device object corresponding to @pci_dev and try to
* enable/disable the GPE associated with it.
*/
static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
{
struct acpi_device *dev;
acpi_handle handle;
int error = -ENODEV;
if (!device_run_wake(phys_dev))
return -EINVAL;
handle = DEVICE_ACPI_HANDLE(phys_dev);
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
__func__);
return -ENODEV;
}
if (enable) {
if (!dev->wakeup.run_wake_count++) {
acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
}
} else if (dev->wakeup.run_wake_count > 0) {
if (!--dev->wakeup.run_wake_count) {
acpi_disable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
acpi_disable_wakeup_device_power(dev);
}
} else {
error = -EALREADY;
}
return error;
}
static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
struct pci_dev *bridge = bus->self;
if (bridge->pme_interrupt)
return;
if (!acpi_dev_run_wake(&bridge->dev, enable))
return;
bus = bus->parent;
}
/* We have reached the root bus. */
if (bus->bridge)
acpi_dev_run_wake(bus->bridge, enable);
}
static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
{
if (dev->pme_interrupt)
return 0;
if (!acpi_dev_run_wake(&dev->dev, enable))
return 0;
acpi_pci_propagate_run_wake(dev->bus, enable);
return 0;
}
static struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.choose_state = acpi_pci_choose_state,
.can_wakeup = acpi_pci_can_wakeup,
.sleep_wake = acpi_pci_sleep_wake,
.run_wake = acpi_pci_run_wake,
};
/* ACPI bus type */

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/cpu.h>
#include <linux/pm_runtime.h>
#include "pci.h"
struct pci_dynid {
@ -404,6 +405,35 @@ static void pci_device_shutdown(struct device *dev)
pci_msix_shutdown(pci_dev);
}
#ifdef CONFIG_PM_OPS
/* Auxiliary functions used for system resume and run-time resume. */
/**
* pci_restore_standard_config - restore standard config registers of PCI device
* @pci_dev: PCI device to handle
*/
static int pci_restore_standard_config(struct pci_dev *pci_dev)
{
pci_update_current_state(pci_dev, PCI_UNKNOWN);
if (pci_dev->current_state != PCI_D0) {
int error = pci_set_power_state(pci_dev, PCI_D0);
if (error)
return error;
}
return pci_restore_state(pci_dev);
}
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_restore_standard_config(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
#endif
#ifdef CONFIG_PM_SLEEP
/*
@ -520,29 +550,6 @@ static int pci_legacy_resume(struct device *dev)
/* Auxiliary functions used by the new power management framework */
/**
* pci_restore_standard_config - restore standard config registers of PCI device
* @pci_dev: PCI device to handle
*/
static int pci_restore_standard_config(struct pci_dev *pci_dev)
{
pci_update_current_state(pci_dev, PCI_UNKNOWN);
if (pci_dev->current_state != PCI_D0) {
int error = pci_set_power_state(pci_dev, PCI_D0);
if (error)
return error;
}
return pci_restore_state(pci_dev);
}
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
{
pci_restore_standard_config(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
static void pci_pm_default_resume(struct pci_dev *pci_dev)
{
pci_fixup_device(pci_fixup_resume, pci_dev);
@ -581,6 +588,17 @@ static int pci_pm_prepare(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
/*
* PCI devices suspended at run time need to be resumed at this
* point, because in general it is necessary to reconfigure them for
* system suspend. Namely, if the device is supposed to wake up the
* system from the sleep state, we may need to reconfigure it for this
* purpose. In turn, if the device is not supposed to wake up the
* system from the sleep state, we'll have to prevent it from signaling
* wake-up.
*/
pm_runtime_resume(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
@ -595,6 +613,13 @@ static void pci_pm_complete(struct device *dev)
drv->pm->complete(dev);
}
#else /* !CONFIG_PM_SLEEP */
#define pci_pm_prepare NULL
#define pci_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
static int pci_pm_suspend(struct device *dev)
@ -681,7 +706,7 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
pci_pm_default_resume_noirq(pci_dev);
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@ -879,7 +904,7 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
pci_pm_default_resume_noirq(pci_dev);
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@ -931,6 +956,84 @@ static int pci_pm_restore(struct device *dev)
#endif /* !CONFIG_HIBERNATION */
#ifdef CONFIG_PM_RUNTIME
static int pci_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
pci_power_t prev = pci_dev->current_state;
int error;
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
error = pm->runtime_suspend(dev);
suspend_report_result(pm->runtime_suspend, error);
if (error)
return error;
pci_fixup_device(pci_fixup_suspend, pci_dev);
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
"PCI PM: State of device not saved by %pF\n",
pm->runtime_suspend);
return 0;
}
if (!pci_dev->state_saved)
pci_save_state(pci_dev);
pci_finish_runtime_suspend(pci_dev);
return 0;
}
static int pci_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (!pm || !pm->runtime_resume)
return -ENOSYS;
pci_pm_default_resume_early(pci_dev);
__pci_enable_wake(pci_dev, PCI_D0, true, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
return pm->runtime_resume(dev);
}
static int pci_pm_runtime_idle(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (!pm)
return -ENOSYS;
if (pm->runtime_idle) {
int ret = pm->runtime_idle(dev);
if (ret)
return ret;
}
pm_runtime_suspend(dev);
return 0;
}
#else /* !CONFIG_PM_RUNTIME */
#define pci_pm_runtime_suspend NULL
#define pci_pm_runtime_resume NULL
#define pci_pm_runtime_idle NULL
#endif /* !CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_OPS
const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
.complete = pci_pm_complete,
@ -946,15 +1049,18 @@ const struct dev_pm_ops pci_dev_pm_ops = {
.thaw_noirq = pci_pm_thaw_noirq,
.poweroff_noirq = pci_pm_poweroff_noirq,
.restore_noirq = pci_pm_restore_noirq,
.runtime_suspend = pci_pm_runtime_suspend,
.runtime_resume = pci_pm_runtime_resume,
.runtime_idle = pci_pm_runtime_idle,
};
#define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
#else /* !CONFIG_PM_SLEEP */
#else /* !COMFIG_PM_OPS */
#define PCI_PM_OPS_PTR NULL
#endif /* !CONFIG_PM_SLEEP */
#endif /* !COMFIG_PM_OPS */
/**
* __pci_register_driver - register a new pci driver

Просмотреть файл

@ -19,8 +19,8 @@
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <asm/setup.h>
#include "pci.h"
@ -29,6 +29,12 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
unsigned int pci_pm_d3_delay;
static void pci_dev_d3_sleep(struct pci_dev *dev)
@ -380,10 +386,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
{
const struct pci_bus *bus = dev->bus;
int i;
struct resource *best = NULL;
struct resource *best = NULL, *r;
for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
if (res->start && !(res->start >= r->start && res->end <= r->end))
@ -457,6 +462,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
}
static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
{
return pci_platform_pm ?
pci_platform_pm->run_wake(dev, enable) : -ENODEV;
}
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
@ -1189,6 +1200,66 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
return pcibios_set_pcie_reset_state(dev, state);
}
/**
* pci_check_pme_status - Check if given device has generated PME.
* @dev: Device to check.
*
* Check the PME status of the device and if set, clear it and clear PME enable
* (if set). Return 'true' if PME status and PME enable were both set or
* 'false' otherwise.
*/
bool pci_check_pme_status(struct pci_dev *dev)
{
int pmcsr_pos;
u16 pmcsr;
bool ret = false;
if (!dev->pm_cap)
return false;
pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
pci_read_config_word(dev, pmcsr_pos, &pmcsr);
if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
return false;
/* Clear PME status. */
pmcsr |= PCI_PM_CTRL_PME_STATUS;
if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
/* Disable PME to avoid interrupt flood. */
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
ret = true;
}
pci_write_config_word(dev, pmcsr_pos, pmcsr);
return ret;
}
/**
* pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
* @dev: Device to handle.
* @ign: Ignored.
*
* Check if @dev has generated PME and queue a resume request for it in that
* case.
*/
static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
{
if (pci_check_pme_status(dev))
pm_request_resume(&dev->dev);
return 0;
}
/**
* pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
* @bus: Top bus of the subtree to walk.
*/
void pci_pme_wakeup_bus(struct pci_bus *bus)
{
if (bus)
pci_walk_bus(bus, pci_pme_wakeup, NULL);
}
/**
* pci_pme_capable - check the capability of PCI device to generate PME#
* @dev: PCI device to handle.
@ -1230,9 +1301,10 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
}
/**
* pci_enable_wake - enable PCI device as wakeup event source
* __pci_enable_wake - enable PCI device as wakeup event source
* @dev: PCI device affected
* @state: PCI state from which device will issue wakeup events
* @runtime: True if the events are to be generated at run time
* @enable: True to enable event generation; false to disable
*
* This enables the device as a wakeup event source, or disables it.
@ -1248,11 +1320,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool runtime, bool enable)
{
int ret = 0;
if (enable && !device_may_wakeup(&dev->dev))
if (enable && !runtime && !device_may_wakeup(&dev->dev))
return -EINVAL;
/* Don't do the same thing twice in a row for one device. */
@ -1272,19 +1345,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
pci_pme_active(dev, true);
else
ret = 1;
error = platform_pci_sleep_wake(dev, true);
error = runtime ? platform_pci_run_wake(dev, true) :
platform_pci_sleep_wake(dev, true);
if (ret)
ret = error;
if (!ret)
dev->wakeup_prepared = true;
} else {
platform_pci_sleep_wake(dev, false);
if (runtime)
platform_pci_run_wake(dev, false);
else
platform_pci_sleep_wake(dev, false);
pci_pme_active(dev, false);
dev->wakeup_prepared = false;
}
return ret;
}
EXPORT_SYMBOL(__pci_enable_wake);
/**
* pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
@ -1393,6 +1471,66 @@ int pci_back_from_sleep(struct pci_dev *dev)
return pci_set_power_state(dev, PCI_D0);
}
/**
* pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
* @dev: PCI device being suspended.
*
* Prepare @dev to generate wake-up events at run time and put it into a low
* power state.
*/
int pci_finish_runtime_suspend(struct pci_dev *dev)
{
pci_power_t target_state = pci_target_state(dev);
int error;
if (target_state == PCI_POWER_ERROR)
return -EIO;
__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
if (error)
__pci_enable_wake(dev, target_state, true, false);
return error;
}
/**
* pci_dev_run_wake - Check if device can generate run-time wake-up events.
* @dev: Device to check.
*
* Return true if the device itself is cabable of generating wake-up events
* (through the platform or using the native PCIe PME) or if the device supports
* PME and one of its upstream bridges can generate wake-up events.
*/
bool pci_dev_run_wake(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
if (device_run_wake(&dev->dev))
return true;
if (!dev->pme_support)
return false;
while (bus->parent) {
struct pci_dev *bridge = bus->self;
if (device_run_wake(&bridge->dev))
return true;
bus = bus->parent;
}
/* We have reached the root bus. */
if (bus->bridge)
return device_run_wake(bus->bridge);
return false;
}
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
@ -2871,7 +3009,6 @@ EXPORT_SYMBOL(pci_save_state);
EXPORT_SYMBOL(pci_restore_state);
EXPORT_SYMBOL(pci_pme_capable);
EXPORT_SYMBOL(pci_pme_active);
EXPORT_SYMBOL(pci_enable_wake);
EXPORT_SYMBOL(pci_wake_from_d3);
EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);

Просмотреть файл

@ -35,6 +35,10 @@ int pci_probe_reset_function(struct pci_dev *dev);
*
* @sleep_wake: enables/disables the system wake up capability of given device
*
* @run_wake: enables/disables the platform to generate run-time wake-up events
* for given device (the device's wake-up capability has to be
* enabled by @sleep_wake for this feature to work)
*
* If given platform is generally capable of power managing PCI devices, all of
* these callbacks are mandatory.
*/
@ -44,11 +48,16 @@ struct pci_platform_pm_ops {
pci_power_t (*choose_state)(struct pci_dev *dev);
bool (*can_wakeup)(struct pci_dev *dev);
int (*sleep_wake)(struct pci_dev *dev, bool enable);
int (*run_wake)(struct pci_dev *dev, bool enable);
};
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
extern void pci_disable_enabled_device(struct pci_dev *dev);
extern bool pci_check_pme_status(struct pci_dev *dev);
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
extern void pci_pme_wakeup_bus(struct pci_bus *bus);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
@ -319,6 +328,13 @@ struct pci_dev_reset_methods {
int (*reset)(struct pci_dev *dev, int probe);
};
#ifdef CONFIG_PCI_QUIRKS
extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
#else
static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
return -ENOTTY;
}
#endif
#endif /* DRIVERS_PCI_H */

Просмотреть файл

@ -46,3 +46,7 @@ config PCIEASPM_DEBUG
help
This enables PCI Express ASPM debug support. It will add per-device
interface to control ASPM.
config PCIE_PME
def_bool y
depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI

Просмотреть файл

@ -11,3 +11,5 @@ obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
# Build PCI Express AER if needed
obj-$(CONFIG_PCIEAER) += aer/
obj-$(CONFIG_PCIE_PME) += pme/

Просмотреть файл

@ -0,0 +1,8 @@
#
# Makefile for PCI-Express Root Port PME signaling driver
#
obj-$(CONFIG_PCIE_PME) += pmedriver.o
pmedriver-objs := pcie_pme.o
pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o

Просмотреть файл

@ -0,0 +1,505 @@
/*
* PCIe Native PME support
*
* Copyright (C) 2007 - 2009 Intel Corp
* Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License V2. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pcieport_if.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include "../../pci.h"
#include "pcie_pme.h"
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
/*
* If set, this switch will prevent the PCIe root port PME service driver from
* being registered. Consequently, the interrupt-based PCIe PME signaling will
* not be used by any PCIe root ports in that case.
*/
static bool pcie_pme_disabled;
/*
* The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
* "In order to maintain compatibility with non-PCI Express-aware system
* software, system power management logic must be configured by firmware to use
* the legacy mechanism of signaling PME by default. PCI Express-aware system
* software must notify the firmware prior to enabling native, interrupt-based
* PME signaling." However, if the platform doesn't provide us with a suitable
* notification mechanism or the notification fails, it is not clear whether or
* not we are supposed to use the interrupt-based PCIe PME signaling. The
* switch below can be used to indicate the desired behaviour. When set, it
* will make the kernel use the interrupt-based PCIe PME signaling regardless of
* the platform notification status, although the kernel will attempt to notify
* the platform anyway. When unset, it will prevent the kernel from using the
* the interrupt-based PCIe PME signaling if the platform notification fails,
* which is the default.
*/
static bool pcie_pme_force_enable;
/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
* that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
* wake-up from system sleep states.
*/
bool pcie_pme_msi_disabled;
static int __init pcie_pme_setup(char *str)
{
if (!strcmp(str, "off"))
pcie_pme_disabled = true;
else if (!strcmp(str, "force"))
pcie_pme_force_enable = true;
else if (!strcmp(str, "nomsi"))
pcie_pme_msi_disabled = true;
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
/**
* pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
* @srv: PCIe PME root port service to use for carrying out the check.
*
* Notify the platform that the native PCIe PME is going to be used and return
* 'true' if the control of the PCIe PME registers has been acquired from the
* platform.
*/
static bool pcie_pme_platform_setup(struct pcie_device *srv)
{
if (!pcie_pme_platform_notify(srv))
return true;
return pcie_pme_force_enable;
}
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
struct work_struct work;
bool noirq; /* Don't enable the PME interrupt used by this service. */
};
/**
* pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
* @dev: PCIe root port or event collector.
* @enable: Enable or disable the interrupt.
*/
static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
int rtctl_pos;
u16 rtctl;
rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL;
pci_read_config_word(dev, rtctl_pos, &rtctl);
if (enable)
rtctl |= PCI_EXP_RTCTL_PMEIE;
else
rtctl &= ~PCI_EXP_RTCTL_PMEIE;
pci_write_config_word(dev, rtctl_pos, rtctl);
}
/**
* pcie_pme_clear_status - Clear root port PME interrupt status.
* @dev: PCIe root port or event collector.
*/
static void pcie_pme_clear_status(struct pci_dev *dev)
{
int rtsta_pos;
u32 rtsta;
rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
pci_read_config_dword(dev, rtsta_pos, &rtsta);
rtsta |= PCI_EXP_RTSTA_PME;
pci_write_config_dword(dev, rtsta_pos, rtsta);
}
/**
* pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
* @bus: PCI bus to scan.
*
* Scan given PCI bus and all buses under it for devices asserting PME#.
*/
static bool pcie_pme_walk_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
bool ret = false;
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip PCIe devices in case we started from a root port. */
if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
pm_request_resume(&dev->dev);
ret = true;
}
if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
ret = true;
}
return ret;
}
/**
* pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
* @bus: Secondary bus of the bridge.
* @devfn: Device/function number to check.
*
* PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
* PCIe PME message. In such that case the bridge should use the Requester ID
* of device/function number 0 on its secondary bus.
*/
static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
{
struct pci_dev *dev;
bool found = false;
if (devfn)
return false;
dev = pci_dev_get(bus->self);
if (!dev)
return false;
if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
down_read(&pci_bus_sem);
if (pcie_pme_walk_bus(bus))
found = true;
up_read(&pci_bus_sem);
}
pci_dev_put(dev);
return found;
}
/**
* pcie_pme_handle_request - Find device that generated PME and handle it.
* @port: Root port or event collector that generated the PME interrupt.
* @req_id: PCIe Requester ID of the device that generated the PME.
*/
static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
{
u8 busnr = req_id >> 8, devfn = req_id & 0xff;
struct pci_bus *bus;
struct pci_dev *dev;
bool found = false;
/* First, check if the PME is from the root port itself. */
if (port->devfn == devfn && port->bus->number == busnr) {
if (pci_check_pme_status(port)) {
pm_request_resume(&port->dev);
found = true;
} else {
/*
* Apparently, the root port generated the PME on behalf
* of a non-PCIe device downstream. If this is done by
* a root port, the Requester ID field in its status
* register may contain either the root port's, or the
* source device's information (PCI Express Base
* Specification, Rev. 2.0, Section 6.1.9).
*/
down_read(&pci_bus_sem);
found = pcie_pme_walk_bus(port->subordinate);
up_read(&pci_bus_sem);
}
goto out;
}
/* Second, find the bus the source device is on. */
bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
if (!bus)
goto out;
/* Next, check if the PME is from a PCIe-PCI bridge. */
found = pcie_pme_from_pci_bridge(bus, devfn);
if (found)
goto out;
/* Finally, try to find the PME source on the bus. */
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list) {
pci_dev_get(dev);
if (dev->devfn == devfn) {
found = true;
break;
}
pci_dev_put(dev);
}
up_read(&pci_bus_sem);
if (found) {
/* The device is there, but we have to check its PME status. */
found = pci_check_pme_status(dev);
if (found)
pm_request_resume(&dev->dev);
pci_dev_put(dev);
} else if (devfn) {
/*
* The device is not there, but we can still try to recover by
* assuming that the PME was reported by a PCIe-PCI bridge that
* used devfn different from zero.
*/
dev_dbg(&port->dev, "PME interrupt generated for "
"non-existent device %02x:%02x.%d\n",
busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
found = pcie_pme_from_pci_bridge(bus, 0);
}
out:
if (!found)
dev_dbg(&port->dev, "Spurious native PME interrupt!\n");
}
/**
* pcie_pme_work_fn - Work handler for PCIe PME interrupt.
* @work: Work structure giving access to service data.
*/
static void pcie_pme_work_fn(struct work_struct *work)
{
struct pcie_pme_service_data *data =
container_of(work, struct pcie_pme_service_data, work);
struct pci_dev *port = data->srv->port;
int rtsta_pos;
u32 rtsta;
rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
spin_lock_irq(&data->lock);
for (;;) {
if (data->noirq)
break;
pci_read_config_dword(port, rtsta_pos, &rtsta);
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
* pending PMEs, the status will be set again.
*/
pcie_pme_clear_status(port);
spin_unlock_irq(&data->lock);
pcie_pme_handle_request(port, rtsta & 0xffff);
spin_lock_irq(&data->lock);
continue;
}
/* No need to loop if there are no more PMEs pending. */
if (!(rtsta & PCI_EXP_RTSTA_PENDING))
break;
spin_unlock_irq(&data->lock);
cpu_relax();
spin_lock_irq(&data->lock);
}
if (!data->noirq)
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
}
/**
* pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
* @irq: Interrupt vector.
* @context: Interrupt context pointer.
*/
static irqreturn_t pcie_pme_irq(int irq, void *context)
{
struct pci_dev *port;
struct pcie_pme_service_data *data;
int rtsta_pos;
u32 rtsta;
unsigned long flags;
port = ((struct pcie_device *)context)->port;
data = get_service_data((struct pcie_device *)context);
rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
spin_lock_irqsave(&data->lock, flags);
pci_read_config_dword(port, rtsta_pos, &rtsta);
if (!(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
pcie_pme_interrupt_enable(port, false);
spin_unlock_irqrestore(&data->lock, flags);
/* We don't use pm_wq, because it's freezable. */
schedule_work(&data->work);
return IRQ_HANDLED;
}
/**
* pcie_pme_set_native - Set the PME interrupt flag for given device.
* @dev: PCI device to handle.
* @ign: Ignored.
*/
static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
{
dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
device_set_run_wake(&dev->dev, true);
dev->pme_interrupt = true;
return 0;
}
/**
* pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port.
* @port: PCIe root port or event collector to handle.
*
* For each device below given root port, including the port itself (or for each
* root complex integrated endpoint if @port is a root complex event collector)
* set the flag indicating that it can signal run-time wake-up events via PCIe
* PME interrupts.
*/
static void pcie_pme_mark_devices(struct pci_dev *port)
{
pcie_pme_set_native(port, NULL);
if (port->subordinate) {
pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
} else {
struct pci_bus *bus = port->bus;
struct pci_dev *dev;
/* Check if this is a root port event collector. */
if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus)
return;
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list)
if (pci_is_pcie(dev)
&& dev->pcie_type == PCI_EXP_TYPE_RC_END)
pcie_pme_set_native(dev, NULL);
up_read(&pci_bus_sem);
}
}
/**
* pcie_pme_probe - Initialize PCIe PME service for given root port.
* @srv: PCIe service to initialize.
*/
static int pcie_pme_probe(struct pcie_device *srv)
{
struct pci_dev *port;
struct pcie_pme_service_data *data;
int ret;
if (!pcie_pme_platform_setup(srv))
return -EACCES;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->lock);
INIT_WORK(&data->work, pcie_pme_work_fn);
data->srv = srv;
set_service_data(srv, data);
port = srv->port;
pcie_pme_interrupt_enable(port, false);
pcie_pme_clear_status(port);
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
kfree(data);
} else {
pcie_pme_mark_devices(port);
pcie_pme_interrupt_enable(port, true);
}
return ret;
}
/**
* pcie_pme_suspend - Suspend PCIe PME service device.
* @srv: PCIe service device to suspend.
*/
static int pcie_pme_suspend(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
spin_lock_irq(&data->lock);
pcie_pme_interrupt_enable(port, false);
pcie_pme_clear_status(port);
data->noirq = true;
spin_unlock_irq(&data->lock);
synchronize_irq(srv->irq);
return 0;
}
/**
* pcie_pme_resume - Resume PCIe PME service device.
* @srv - PCIe service device to resume.
*/
static int pcie_pme_resume(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
spin_lock_irq(&data->lock);
data->noirq = false;
pcie_pme_clear_status(port);
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
return 0;
}
/**
* pcie_pme_remove - Prepare PCIe PME service device for removal.
* @srv - PCIe service device to resume.
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
pcie_pme_suspend(srv);
free_irq(srv->irq, srv);
kfree(get_service_data(srv));
}
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
.service = PCIE_PORT_SERVICE_PME,
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
/**
* pcie_pme_service_init - Register the PCIe PME service driver.
*/
static int __init pcie_pme_service_init(void)
{
return pcie_pme_disabled ?
-ENODEV : pcie_port_service_register(&pcie_pme_driver);
}
module_init(pcie_pme_service_init);

Просмотреть файл

@ -0,0 +1,28 @@
/*
* drivers/pci/pcie/pme/pcie_pme.h
*
* PCI Express Root Port PME signaling support
*
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*/
#ifndef _PCIE_PME_H_
#define _PCIE_PME_H_
struct pcie_device;
#ifdef CONFIG_ACPI
extern int pcie_pme_acpi_setup(struct pcie_device *srv);
static inline int pcie_pme_platform_notify(struct pcie_device *srv)
{
return pcie_pme_acpi_setup(srv);
}
#else /* !CONFIG_ACPI */
static inline int pcie_pme_platform_notify(struct pcie_device *srv)
{
return 0;
}
#endif /* !CONFIG_ACPI */
#endif

Просмотреть файл

@ -0,0 +1,54 @@
/*
* PCIe Native PME support, ACPI-related part
*
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License V2. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/pcieport_if.h>
/**
* pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
* @srv - PCIe PME service for a root port or event collector.
*
* Invoked when the PCIe bus type loads PCIe PME service driver. To avoid
* conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
* control to the kernel.
*/
int pcie_pme_acpi_setup(struct pcie_device *srv)
{
acpi_status status = AE_NOT_FOUND;
struct pci_dev *port = srv->port;
acpi_handle handle;
int error = 0;
if (acpi_pci_disabled)
return -ENOSYS;
dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
handle = acpi_find_root_bridge_handle(port);
if (!handle)
return -EINVAL;
status = acpi_pci_osc_control_set(handle,
OSC_PCI_EXPRESS_PME_CONTROL |
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (ACPI_FAILURE(status)) {
dev_info(&port->dev,
"Failed to receive control of PCIe PME service: %s\n",
(status == AE_SUPPORT || status == AE_NOT_FOUND) ?
"no _OSC support" : "ACPI _OSC failed");
error = -ENODEV;
}
return error;
}

Просмотреть файл

@ -30,4 +30,21 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
extern int __must_check pcie_port_bus_register(void);
extern void pcie_port_bus_unregister(void);
#ifdef CONFIG_PCIE_PME
extern bool pcie_pme_msi_disabled;
static inline void pcie_pme_disable_msi(void)
{
pcie_pme_msi_disabled = true;
}
static inline bool pcie_pme_no_msi(void)
{
return pcie_pme_msi_disabled;
}
#else /* !CONFIG_PCIE_PME */
static inline void pcie_pme_disable_msi(void) {}
static inline bool pcie_pme_no_msi(void) { return false; }
#endif /* !CONFIG_PCIE_PME */
#endif /* _PORTDRV_H_ */

Просмотреть файл

@ -186,16 +186,24 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
*/
static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int i, irq;
int i, irq = -1;
/* We have to use INTx if MSI cannot be used for PCIe PME. */
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) {
if (dev->pin)
irq = dev->irq;
goto no_msi;
}
/* Try to use MSI-X if supported */
if (!pcie_port_enable_msix(dev, irqs, mask))
return 0;
/* We're not going to use MSI-X, so try MSI and fall back to INTx */
irq = -1;
if (!pci_enable_msi(dev) || dev->pin)
irq = dev->irq;
no_msi:
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
irqs[i] = irq;
irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;

Просмотреть файл

@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
#include <linux/dmi.h>
#include "portdrv.h"
#include "aer/aerdrv.h"
@ -273,10 +274,36 @@ static struct pci_driver pcie_portdriver = {
.driver.pm = PCIE_PORTDRV_PM_OPS,
};
static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
{
pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
d->ident);
pcie_pme_disable_msi();
return 0;
}
static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
/*
* Boxes that should not use MSI for PCIe PME signaling.
*/
{
.callback = dmi_pcie_pme_disable_msi,
.ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
},
},
{}
};
static int __init pcie_portdrv_init(void)
{
int retval;
dmi_check_system(pcie_portdrv_dmi_table);
retval = pcie_port_bus_register();
if (retval) {
printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval);

Просмотреть файл

@ -89,6 +89,7 @@ static void release_pcibus_dev(struct device *dev)
if (pci_bus->bridge)
put_device(pci_bus->bridge);
pci_bus_remove_resources(pci_bus);
kfree(pci_bus);
}
@ -281,26 +282,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
}
}
void __devinit pci_read_bridge_bases(struct pci_bus *child)
static void __devinit pci_read_bridge_io(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
u8 io_base_lo, io_limit_lo;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
struct resource *res;
int i;
if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
child->secondary, child->subordinate,
dev->transparent ? " (subtractive decode)": "");
if (dev->transparent) {
for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
child->resource[i] = child->parent->resource[i - 3];
}
res = child->resource[0];
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
@ -316,26 +303,50 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
limit |= (io_limit_hi << 16);
}
if (base <= limit) {
if (base && base <= limit) {
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
if (!res->start)
res->start = base;
if (!res->end)
res->end = limit + 0xfff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
" bridge window [io %04lx - %04lx] reg reading\n",
base, limit);
}
}
static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
struct resource *res;
res = child->resource[1];
pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
if (base <= limit) {
if (base && base <= limit) {
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
" bridge window [mem 0x%08lx - 0x%08lx] reg reading\n",
base, limit + 0xfffff);
}
}
static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
struct resource *res;
res = child->resource[2];
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
@ -366,7 +377,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
#endif
}
}
if (base <= limit) {
if (base && base <= limit) {
res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
IORESOURCE_MEM | IORESOURCE_PREFETCH;
if (res->flags & PCI_PREF_RANGE_TYPE_64)
@ -374,6 +385,44 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
" bridge window [mem 0x%08lx - %08lx pref] reg reading\n",
base, limit + 0xfffff);
}
}
void __devinit pci_read_bridge_bases(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
struct resource *res;
int i;
if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
child->secondary, child->subordinate,
dev->transparent ? " (subtractive decode)" : "");
pci_bus_remove_resources(child);
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
pci_read_bridge_io(child);
pci_read_bridge_mmio(child);
pci_read_bridge_mmio_pref(child);
if (dev->transparent) {
pci_bus_for_each_resource(child->parent, res, i) {
if (res) {
pci_bus_add_resource(child, res,
PCI_SUBTRACTIVE_DECODE);
dev_printk(KERN_DEBUG, &dev->dev,
" bridge window %pR (subtractive decode)\n",
res);
}
}
}
}
@ -387,10 +436,147 @@ static struct pci_bus * pci_alloc_bus(void)
INIT_LIST_HEAD(&b->children);
INIT_LIST_HEAD(&b->devices);
INIT_LIST_HEAD(&b->slots);
INIT_LIST_HEAD(&b->resources);
b->max_bus_speed = PCI_SPEED_UNKNOWN;
b->cur_bus_speed = PCI_SPEED_UNKNOWN;
}
return b;
}
static unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
PCI_SPEED_100MHz_PCIX, /* 2 */
PCI_SPEED_133MHz_PCIX, /* 3 */
PCI_SPEED_UNKNOWN, /* 4 */
PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
PCI_SPEED_UNKNOWN, /* 8 */
PCI_SPEED_66MHz_PCIX_266, /* 9 */
PCI_SPEED_100MHz_PCIX_266, /* A */
PCI_SPEED_133MHz_PCIX_266, /* B */
PCI_SPEED_UNKNOWN, /* C */
PCI_SPEED_66MHz_PCIX_533, /* D */
PCI_SPEED_100MHz_PCIX_533, /* E */
PCI_SPEED_133MHz_PCIX_533 /* F */
};
static unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCIE_SPEED_2_5GT, /* 1 */
PCIE_SPEED_5_0GT, /* 2 */
PCIE_SPEED_8_0GT, /* 3 */
PCI_SPEED_UNKNOWN, /* 4 */
PCI_SPEED_UNKNOWN, /* 5 */
PCI_SPEED_UNKNOWN, /* 6 */
PCI_SPEED_UNKNOWN, /* 7 */
PCI_SPEED_UNKNOWN, /* 8 */
PCI_SPEED_UNKNOWN, /* 9 */
PCI_SPEED_UNKNOWN, /* A */
PCI_SPEED_UNKNOWN, /* B */
PCI_SPEED_UNKNOWN, /* C */
PCI_SPEED_UNKNOWN, /* D */
PCI_SPEED_UNKNOWN, /* E */
PCI_SPEED_UNKNOWN /* F */
};
void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
{
bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
}
EXPORT_SYMBOL_GPL(pcie_update_link_speed);
static unsigned char agp_speeds[] = {
AGP_UNKNOWN,
AGP_1X,
AGP_2X,
AGP_4X,
AGP_8X
};
static enum pci_bus_speed agp_speed(int agp3, int agpstat)
{
int index = 0;
if (agpstat & 4)
index = 3;
else if (agpstat & 2)
index = 2;
else if (agpstat & 1)
index = 1;
else
goto out;
if (agp3) {
index += 2;
if (index == 5)
index = 0;
}
out:
return agp_speeds[index];
}
static void pci_set_bus_speed(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
int pos;
pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
if (!pos)
pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
if (pos) {
u32 agpstat, agpcmd;
pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
}
pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
if (pos) {
u16 status;
enum pci_bus_speed max;
pci_read_config_word(bridge, pos + 2, &status);
if (status & 0x8000) {
max = PCI_SPEED_133MHz_PCIX_533;
} else if (status & 0x4000) {
max = PCI_SPEED_133MHz_PCIX_266;
} else if (status & 0x0002) {
if (((status >> 12) & 0x3) == 2) {
max = PCI_SPEED_133MHz_PCIX_ECC;
} else {
max = PCI_SPEED_133MHz_PCIX;
}
} else {
max = PCI_SPEED_66MHz_PCIX;
}
bus->max_bus_speed = max;
bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
return;
}
pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (pos) {
u32 linkcap;
u16 linksta;
pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap);
bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta);
pcie_update_link_speed(bus, linksta);
}
}
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@ -430,6 +616,8 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->self = bridge;
child->bridge = get_device(&bridge->dev);
pci_set_bus_speed(child);
/* Set up default resource pointers and names.. */
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@ -1081,6 +1269,45 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
{
u16 cap;
unsigned pos, next_fn;
if (!dev)
return 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
if (!pos)
return 0;
pci_read_config_word(dev, pos + 4, &cap);
next_fn = cap >> 8;
if (next_fn <= fn)
return 0;
return next_fn;
}
static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
{
return (fn + 1) % 8;
}
static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
{
return 0;
}
static int only_one_child(struct pci_bus *bus)
{
struct pci_dev *parent = bus->self;
if (!parent || !pci_is_pcie(parent))
return 0;
if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
return 1;
return 0;
}
/**
* pci_scan_slot - scan a PCI slot on a bus for devices.
* @bus: PCI bus to scan
@ -1094,21 +1321,30 @@ EXPORT_SYMBOL(pci_scan_single_device);
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
int fn, nr = 0;
unsigned fn, nr = 0;
struct pci_dev *dev;
unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
dev = pci_scan_single_device(bus, devfn);
if (dev && !dev->is_added) /* new device? */
if (!dev)
return 0;
if (!dev->is_added)
nr++;
if (dev && dev->multifunction) {
for (fn = 1; fn < 8; fn++) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!dev->is_added)
nr++;
dev->multifunction = 1;
}
if (pci_ari_enabled(bus))
next_fn = next_ari_fn;
else if (dev->multifunction)
next_fn = next_trad_fn;
for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!dev->is_added)
nr++;
dev->multifunction = 1;
}
}

Просмотреть файл

@ -25,14 +25,9 @@
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
#include <linux/ioport.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
#ifdef CONFIG_PCI_QUIRKS
/*
* This quirk function disables memory decoding and releases memory resources
* of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@ -2612,6 +2607,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
}
pci_do_fixups(dev, start, end);
}
EXPORT_SYMBOL(pci_fixup_device);
static int __init pci_apply_final_quirks(void)
{
@ -2723,9 +2719,3 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
return -ENOTTY;
}
#else
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; }
#endif
EXPORT_SYMBOL(pci_fixup_device);

Просмотреть файл

@ -27,37 +27,83 @@
#include <linux/slab.h>
#include "pci.h"
static void pbus_assign_resources_sorted(const struct pci_bus *bus)
{
struct pci_dev *dev;
struct resource_list_x {
struct resource_list_x *next;
struct resource *res;
struct resource_list head, *list, *tmp;
int idx;
struct pci_dev *dev;
resource_size_t start;
resource_size_t end;
unsigned long flags;
};
head.next = NULL;
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 class = dev->class >> 8;
static void add_to_failed_list(struct resource_list_x *head,
struct pci_dev *dev, struct resource *res)
{
struct resource_list_x *list = head;
struct resource_list_x *ln = list->next;
struct resource_list_x *tmp;
/* Don't touch classless devices or host bridges or ioapics. */
if (class == PCI_CLASS_NOT_DEFINED ||
class == PCI_CLASS_BRIDGE_HOST)
continue;
/* Don't touch ioapic devices already enabled by firmware */
if (class == PCI_CLASS_SYSTEM_PIC) {
u16 command;
pci_read_config_word(dev, PCI_COMMAND, &command);
if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
continue;
}
pdev_sort_resources(dev, &head);
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
pr_warning("add_to_failed_list: kmalloc() failed!\n");
return;
}
for (list = head.next; list;) {
tmp->next = ln;
tmp->res = res;
tmp->dev = dev;
tmp->start = res->start;
tmp->end = res->end;
tmp->flags = res->flags;
list->next = tmp;
}
static void free_failed_list(struct resource_list_x *head)
{
struct resource_list_x *list, *tmp;
for (list = head->next; list;) {
tmp = list;
list = list->next;
kfree(tmp);
}
head->next = NULL;
}
static void __dev_sort_resources(struct pci_dev *dev,
struct resource_list *head)
{
u16 class = dev->class >> 8;
/* Don't touch classless devices or host bridges or ioapics. */
if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
return;
/* Don't touch ioapic devices already enabled by firmware */
if (class == PCI_CLASS_SYSTEM_PIC) {
u16 command;
pci_read_config_word(dev, PCI_COMMAND, &command);
if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
return;
}
pdev_sort_resources(dev, head);
}
static void __assign_resources_sorted(struct resource_list *head,
struct resource_list_x *fail_head)
{
struct resource *res;
struct resource_list *list, *tmp;
int idx;
for (list = head->next; list;) {
res = list->res;
idx = res - &list->dev->resource[0];
if (pci_assign_resource(list->dev, idx)) {
if (fail_head && !pci_is_root_bus(list->dev->bus))
add_to_failed_list(fail_head, list->dev, res);
res->start = 0;
res->end = 0;
res->flags = 0;
@ -68,6 +114,30 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus)
}
}
static void pdev_assign_resources_sorted(struct pci_dev *dev,
struct resource_list_x *fail_head)
{
struct resource_list head;
head.next = NULL;
__dev_sort_resources(dev, &head);
__assign_resources_sorted(&head, fail_head);
}
static void pbus_assign_resources_sorted(const struct pci_bus *bus,
struct resource_list_x *fail_head)
{
struct pci_dev *dev;
struct resource_list head;
head.next = NULL;
list_for_each_entry(dev, &bus->devices, bus_list)
__dev_sort_resources(dev, &head);
__assign_resources_sorted(&head, fail_head);
}
void pci_setup_cardbus(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
@ -134,18 +204,12 @@ EXPORT_SYMBOL(pci_setup_cardbus);
config space writes, so it's quite possible that an I/O window of
the bridge will have some undesirable address (e.g. 0) after the
first write. Ditto 64-bit prefetchable MMIO. */
static void pci_setup_bridge(struct pci_bus *bus)
static void pci_setup_bridge_io(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
if (pci_is_enabled(bridge))
return;
dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
bus->secondary, bus->subordinate);
u32 l, io_upper16;
/* Set up the top and bottom of the PCI I/O segment for this bus. */
res = bus->resource[0];
@ -158,8 +222,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
/* Set up upper 16 bits of I/O base/limit. */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
dev_info(&bridge->dev, " bridge window %pR\n", res);
}
else {
} else {
/* Clear upper 16 bits of I/O base/limit. */
io_upper16 = 0;
l = 0x00f0;
@ -171,21 +234,35 @@ static void pci_setup_bridge(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_IO_BASE, l);
/* Update upper 16 bits of I/O base/limit. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
/* Set up the top and bottom of the PCI Memory segment
for this bus. */
static void pci_setup_bridge_mmio(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus. */
res = bus->resource[1];
pcibios_resource_to_bus(bridge, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
dev_info(&bridge->dev, " bridge window %pR\n", res);
}
else {
} else {
l = 0x0000fff0;
dev_info(&bridge->dev, " bridge window [mem disabled]\n");
}
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l, bu, lu;
/* Clear out the upper 32 bits of PREF limit.
If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
@ -204,8 +281,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
lu = upper_32_bits(region.end);
}
dev_info(&bridge->dev, " bridge window %pR\n", res);
}
else {
} else {
l = 0x0000fff0;
dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
}
@ -214,10 +290,35 @@ static void pci_setup_bridge(struct pci_bus *bus)
/* Set the upper 32 bits of PREF base & limit. */
pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
}
static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_dev *bridge = bus->self;
dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
bus->secondary, bus->subordinate);
if (type & IORESOURCE_IO)
pci_setup_bridge_io(bus);
if (type & IORESOURCE_MEM)
pci_setup_bridge_mmio(bus);
if (type & IORESOURCE_PREFETCH)
pci_setup_bridge_mmio_pref(bus);
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
static void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
__pci_setup_bridge(bus, type);
}
/* Check whether the bridge supports optional I/O and
prefetchable memory ranges. If not, the respective
base/limit registers must be read-only and read as 0. */
@ -253,8 +354,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
if (pmem) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
PCI_PREF_RANGE_TYPE_64) {
b_res[2].flags |= IORESOURCE_MEM_64;
b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
}
}
/* double check if bridge does support 64 bit pref */
@ -283,8 +387,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
r = bus->resource[i];
pci_bus_for_each_resource(bus, r, i) {
if (r == &ioport_resource || r == &iomem_resource)
continue;
if (r && (r->flags & type_mask) == type && !r->parent)
@ -301,7 +404,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
unsigned long size = 0, size1 = 0;
unsigned long size = 0, size1 = 0, old_size;
if (!b_res)
return;
@ -326,12 +429,17 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
}
if (size < min_size)
size = min_size;
old_size = resource_size(b_res);
if (old_size == 1)
old_size = 0;
/* To be fixed in 2.5: we should have sort of HAVE_ISA
flag in the struct pci_bus. */
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
size = ALIGN(size + size1, 4096);
if (size < old_size)
size = old_size;
if (!size) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window "
@ -352,7 +460,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
unsigned long type, resource_size_t min_size)
{
struct pci_dev *dev;
resource_size_t min_align, align, size;
resource_size_t min_align, align, size, old_size;
resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
@ -402,6 +510,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
if (size < min_size)
size = min_size;
old_size = resource_size(b_res);
if (old_size == 1)
old_size = 0;
if (size < old_size)
size = old_size;
align = 0;
min_align = 0;
@ -538,23 +651,25 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
void __ref pci_bus_assign_resources(const struct pci_bus *bus)
static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
struct resource_list_x *fail_head)
{
struct pci_bus *b;
struct pci_dev *dev;
pbus_assign_resources_sorted(bus);
pbus_assign_resources_sorted(bus, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
b = dev->subordinate;
if (!b)
continue;
pci_bus_assign_resources(b);
__pci_bus_assign_resources(b, fail_head);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
pci_setup_bridge(b);
if (!pci_is_enabled(dev))
pci_setup_bridge(b);
break;
case PCI_CLASS_BRIDGE_CARDBUS:
@ -568,15 +683,130 @@ void __ref pci_bus_assign_resources(const struct pci_bus *bus)
}
}
}
void __ref pci_bus_assign_resources(const struct pci_bus *bus)
{
__pci_bus_assign_resources(bus, NULL);
}
EXPORT_SYMBOL(pci_bus_assign_resources);
static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
struct resource_list_x *fail_head)
{
struct pci_bus *b;
pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
b = bridge->subordinate;
if (!b)
return;
__pci_bus_assign_resources(b, fail_head);
switch (bridge->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
pci_setup_bridge(b);
break;
case PCI_CLASS_BRIDGE_CARDBUS:
pci_setup_cardbus(b);
break;
default:
dev_info(&bridge->dev, "not setting up bridge for bus "
"%04x:%02x\n", pci_domain_nr(b), b->number);
break;
}
}
static void pci_bridge_release_resources(struct pci_bus *bus,
unsigned long type)
{
int idx;
bool changed = false;
struct pci_dev *dev;
struct resource *r;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
dev = bus->self;
for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
idx++) {
r = &dev->resource[idx];
if ((r->flags & type_mask) != type)
continue;
if (!r->parent)
continue;
/*
* if there are children under that, we should release them
* all
*/
release_child_resources(r);
if (!release_resource(r)) {
dev_printk(KERN_DEBUG, &dev->dev,
"resource %d %pR released\n", idx, r);
/* keep the old size */
r->end = resource_size(r) - 1;
r->start = 0;
r->flags = 0;
changed = true;
}
}
if (changed) {
/* avoiding touch the one without PREF */
if (type & IORESOURCE_PREFETCH)
type = IORESOURCE_PREFETCH;
__pci_setup_bridge(bus, type);
}
}
enum release_type {
leaf_only,
whole_subtree,
};
/*
* try to release pci bridge resources that is from leaf bridge,
* so we can allocate big new one later
*/
static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
unsigned long type,
enum release_type rel_type)
{
struct pci_dev *dev;
bool is_leaf_bridge = true;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
if (!b)
continue;
is_leaf_bridge = false;
if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
continue;
if (rel_type == whole_subtree)
pci_bus_release_bridge_resources(b, type,
whole_subtree);
}
if (pci_is_root_bus(bus))
return;
if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return;
if ((rel_type == whole_subtree) || is_leaf_bridge)
pci_bridge_release_resources(bus, type);
}
static void pci_bus_dump_res(struct pci_bus *bus)
{
int i;
struct resource *res;
int i;
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *res = bus->resource[i];
if (!res || !res->end)
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->end || !res->flags)
continue;
dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
@ -600,11 +830,65 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
}
}
static int __init pci_bus_get_depth(struct pci_bus *bus)
{
int depth = 0;
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
int ret;
struct pci_bus *b = dev->subordinate;
if (!b)
continue;
ret = pci_bus_get_depth(b);
if (ret + 1 > depth)
depth = ret + 1;
}
return depth;
}
static int __init pci_get_max_depth(void)
{
int depth = 0;
struct pci_bus *bus;
list_for_each_entry(bus, &pci_root_buses, node) {
int ret;
ret = pci_bus_get_depth(bus);
if (ret > depth)
depth = ret;
}
return depth;
}
/*
* first try will not touch pci bridge res
* second and later try will clear small leaf bridge res
* will stop till to the max deepth if can not find good one
*/
void __init
pci_assign_unassigned_resources(void)
{
struct pci_bus *bus;
int tried_times = 0;
enum release_type rel_type = leaf_only;
struct resource_list_x head, *list;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
unsigned long failed_type;
int max_depth = pci_get_max_depth();
int pci_try_num;
head.next = NULL;
pci_try_num = max_depth + 1;
printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
max_depth, pci_try_num);
again:
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
list_for_each_entry(bus, &pci_root_buses, node) {
@ -612,12 +896,130 @@ pci_assign_unassigned_resources(void)
}
/* Depth last, allocate resources and update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node) {
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
__pci_bus_assign_resources(bus, &head);
}
tried_times++;
/* any device complain? */
if (!head.next)
goto enable_and_dump;
failed_type = 0;
for (list = head.next; list;) {
failed_type |= list->flags;
list = list->next;
}
/*
* io port are tight, don't try extra
* or if reach the limit, don't want to try more
*/
failed_type &= type_mask;
if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
free_failed_list(&head);
goto enable_and_dump;
}
printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
tried_times + 1);
/* third times and later will not check if it is leaf */
if ((tried_times + 1) > 2)
rel_type = whole_subtree;
/*
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
for (list = head.next; list;) {
bus = list->dev->bus;
pci_bus_release_bridge_resources(bus, list->flags & type_mask,
rel_type);
list = list->next;
}
/* restore size and flags */
for (list = head.next; list;) {
struct resource *res = list->res;
res->start = list->start;
res->end = list->end;
res->flags = list->flags;
if (list->dev->subordinate)
res->flags = 0;
list = list->next;
}
free_failed_list(&head);
goto again;
enable_and_dump:
/* Depth last, update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node)
pci_enable_bridges(bus);
/* dump the resource on buses */
list_for_each_entry(bus, &pci_root_buses, node) {
pci_bus_dump_resources(bus);
}
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
int tried_times = 0;
struct resource_list_x head, *list;
int retval;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
head.next = NULL;
again:
pci_bus_size_bridges(parent);
__pci_bridge_assign_resources(bridge, &head);
retval = pci_reenable_device(bridge);
pci_set_master(bridge);
pci_enable_bridges(parent);
tried_times++;
if (!head.next)
return;
if (tried_times >= 2) {
/* still fail, don't need to try more */
free_failed_list(&head);
return;
}
printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
tried_times + 1);
/*
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
for (list = head.next; list;) {
struct pci_bus *bus = list->dev->bus;
unsigned long flags = list->flags;
pci_bus_release_bridge_resources(bus, flags & type_mask,
whole_subtree);
list = list->next;
}
/* restore size and flags */
for (list = head.next; list;) {
struct resource *res = list->res;
res->start = list->start;
res->end = list->end;
res->flags = list->flags;
if (list->dev->subordinate)
res->flags = 0;
list = list->next;
}
free_failed_list(&head);
goto again;
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);

Просмотреть файл

@ -47,6 +47,55 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
slot->number);
}
/* these strings match up with the values in pci_bus_speed */
static char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
"66 MHz PCI-X", /* 0x02 */
"100 MHz PCI-X", /* 0x03 */
"133 MHz PCI-X", /* 0x04 */
NULL, /* 0x05 */
NULL, /* 0x06 */
NULL, /* 0x07 */
NULL, /* 0x08 */
"66 MHz PCI-X 266", /* 0x09 */
"100 MHz PCI-X 266", /* 0x0a */
"133 MHz PCI-X 266", /* 0x0b */
"Unknown AGP", /* 0x0c */
"1x AGP", /* 0x0d */
"2x AGP", /* 0x0e */
"4x AGP", /* 0x0f */
"8x AGP", /* 0x10 */
"66 MHz PCI-X 533", /* 0x11 */
"100 MHz PCI-X 533", /* 0x12 */
"133 MHz PCI-X 533", /* 0x13 */
"2.5 GT/s PCIe", /* 0x14 */
"5.0 GT/s PCIe", /* 0x15 */
"8.0 GT/s PCIe", /* 0x16 */
};
static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
{
const char *speed_string;
if (speed < ARRAY_SIZE(pci_bus_speed_strings))
speed_string = pci_bus_speed_strings[speed];
else
speed_string = "Unknown";
return sprintf(buf, "%s\n", speed_string);
}
static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
{
return bus_speed_read(slot->bus->max_bus_speed, buf);
}
static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
{
return bus_speed_read(slot->bus->cur_bus_speed, buf);
}
static void pci_slot_release(struct kobject *kobj)
{
struct pci_dev *dev;
@ -66,9 +115,15 @@ static void pci_slot_release(struct kobject *kobj)
static struct pci_slot_attribute pci_slot_attr_address =
__ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_max_speed =
__ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_cur_speed =
__ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL);
static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_address.attr,
&pci_slot_attr_max_speed.attr,
&pci_slot_attr_cur_speed.attr,
NULL,
};

Просмотреть файл

@ -114,22 +114,21 @@ struct pcmcia_align_data {
unsigned long offset;
};
static void pcmcia_align(void *align_data, struct resource *res,
unsigned long size, unsigned long align)
static resource_size_t pcmcia_align(void *align_data,
const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pcmcia_align_data *data = align_data;
unsigned long start;
resource_size_t start;
start = (res->start & ~data->mask) + data->offset;
if (start < res->start)
start += data->mask + 1;
res->start = start;
#ifdef CONFIG_X86
if (res->flags & IORESOURCE_IO) {
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
#endif
@ -137,9 +136,11 @@ static void pcmcia_align(void *align_data, struct resource *res,
#ifdef CONFIG_M68K
if (res->flags & IORESOURCE_IO) {
if ((res->start + size - 1) >= 1024)
res->start = res->end;
start = res->end;
}
#endif
return start;
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше