Merge branch 'genirq-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
This merges branches irq/genirq, irq/sparseirq-v4, timers/hpet-percpu and x86/uv. The sparseirq branch is just preliminary groundwork: no sparse IRQs are actually implemented by this tree anymore - just the new APIs are added while keeping the old way intact as well (the new APIs map 1:1 to irq_desc[]). The 'real' sparse IRQ support will then be a relatively small patch ontop of this - with a v2.6.29 merge target. * 'genirq-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (178 commits) genirq: improve include files intr_remapping: fix typo io_apic: make irq_mis_count available on 64-bit too genirq: fix name space collisions of nr_irqs in arch/* genirq: fix name space collision of nr_irqs in autoprobe.c genirq: use iterators for irq_desc loops proc: fixup irq iterator genirq: add reverse iterator for irq_desc x86: move ack_bad_irq() to irq.c x86: unify show_interrupts() and proc helpers x86: cleanup show_interrupts genirq: cleanup the sparseirq modifications genirq: remove artifacts from sparseirq removal genirq: revert dynarray genirq: remove irq_to_desc_alloc genirq: remove sparse irq code genirq: use inline function for irq_to_desc genirq: consolidate nr_irqs and for_each_irq_desc() x86: remove sparse irq from Kconfig genirq: define nr_irqs for architectures with GENERIC_HARDIRQS=n ...
This commit is contained in:
Коммит
9301975ec2
|
@ -47,7 +47,7 @@ typedef struct irq_swizzle_struct
|
|||
|
||||
static irq_swizzle_t *sable_lynx_irq_swizzle;
|
||||
|
||||
static void sable_lynx_init_irq(int nr_irqs);
|
||||
static void sable_lynx_init_irq(int nr_of_irqs);
|
||||
|
||||
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
|
||||
|
||||
|
@ -530,11 +530,11 @@ sable_lynx_srm_device_interrupt(unsigned long vector)
|
|||
}
|
||||
|
||||
static void __init
|
||||
sable_lynx_init_irq(int nr_irqs)
|
||||
sable_lynx_init_irq(int nr_of_irqs)
|
||||
{
|
||||
long i;
|
||||
|
||||
for (i = 0; i < nr_irqs; ++i) {
|
||||
for (i = 0; i < nr_of_irqs; ++i) {
|
||||
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
|
||||
irq_desc[i].chip = &sable_lynx_irq_type;
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ static struct irq_chip ixdp2x00_cpld_irq_chip = {
|
|||
.unmask = ixdp2x00_irq_unmask
|
||||
};
|
||||
|
||||
void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_irqs)
|
||||
void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_of_irqs)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
||||
|
@ -154,7 +154,7 @@ void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigne
|
|||
|
||||
board_irq_stat = stat_reg;
|
||||
board_irq_mask = mask_reg;
|
||||
board_irq_count = nr_irqs;
|
||||
board_irq_count = nr_of_irqs;
|
||||
|
||||
*board_irq_mask = 0xffffffff;
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank)
|
|||
|
||||
void __init omap_init_irq(void)
|
||||
{
|
||||
unsigned long nr_irqs = 0;
|
||||
unsigned long nr_of_irqs = 0;
|
||||
unsigned int nr_banks = 0;
|
||||
int i;
|
||||
|
||||
|
@ -133,14 +133,14 @@ void __init omap_init_irq(void)
|
|||
|
||||
omap_irq_bank_init_one(bank);
|
||||
|
||||
nr_irqs += bank->nr_irqs;
|
||||
nr_of_irqs += bank->nr_irqs;
|
||||
nr_banks++;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n",
|
||||
nr_irqs, nr_banks, nr_banks > 1 ? "s" : "");
|
||||
nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : "");
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
for (i = 0; i < nr_of_irqs; i++) {
|
||||
set_irq_chip(i, &omap_irq_chip);
|
||||
set_irq_handler(i, handle_level_irq);
|
||||
set_irq_flags(i, IRQF_VALID);
|
||||
|
|
|
@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
|
|||
struct eic *eic;
|
||||
struct resource *regs;
|
||||
unsigned int i;
|
||||
unsigned int nr_irqs;
|
||||
unsigned int nr_of_irqs;
|
||||
unsigned int int_irq;
|
||||
int ret;
|
||||
u32 pattern;
|
||||
|
@ -224,7 +224,7 @@ static int __init eic_probe(struct platform_device *pdev)
|
|||
eic_writel(eic, IDR, ~0UL);
|
||||
eic_writel(eic, MODE, ~0UL);
|
||||
pattern = eic_readl(eic, MODE);
|
||||
nr_irqs = fls(pattern);
|
||||
nr_of_irqs = fls(pattern);
|
||||
|
||||
/* Trigger on low level unless overridden by driver */
|
||||
eic_writel(eic, EDGE, 0UL);
|
||||
|
@ -232,7 +232,7 @@ static int __init eic_probe(struct platform_device *pdev)
|
|||
|
||||
eic->chip = &eic_chip;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
for (i = 0; i < nr_of_irqs; i++) {
|
||||
set_irq_chip_and_handler(eic->first_irq + i, &eic_chip,
|
||||
handle_level_irq);
|
||||
set_irq_chip_data(eic->first_irq + i, eic);
|
||||
|
@ -256,7 +256,7 @@ static int __init eic_probe(struct platform_device *pdev)
|
|||
eic->regs, int_irq);
|
||||
dev_info(&pdev->dev,
|
||||
"Handling %u external IRQs, starting with IRQ %u\n",
|
||||
nr_irqs, eic->first_irq);
|
||||
nr_of_irqs, eic->first_irq);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1242,14 +1242,6 @@ config EFI
|
|||
resultant kernel should continue to boot on existing non-EFI
|
||||
platforms.
|
||||
|
||||
config IRQBALANCE
|
||||
def_bool y
|
||||
prompt "Enable kernel irq balancing"
|
||||
depends on X86_32 && SMP && X86_IO_APIC
|
||||
help
|
||||
The default yes will allow the kernel to do irq load balancing.
|
||||
Saying no will keep the kernel from doing irq load balancing.
|
||||
|
||||
config SECCOMP
|
||||
def_bool y
|
||||
prompt "Enable seccomp to safely compute untrusted bytecode"
|
||||
|
|
|
@ -287,7 +287,6 @@ CONFIG_MTRR=y
|
|||
# CONFIG_MTRR_SANITIZER is not set
|
||||
CONFIG_X86_PAT=y
|
||||
CONFIG_EFI=y
|
||||
# CONFIG_IRQBALANCE is not set
|
||||
CONFIG_SECCOMP=y
|
||||
# CONFIG_HZ_100 is not set
|
||||
# CONFIG_HZ_250 is not set
|
||||
|
|
|
@ -23,7 +23,7 @@ CFLAGS_hpet.o := $(nostackp)
|
|||
CFLAGS_tsc.o := $(nostackp)
|
||||
|
||||
obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
|
||||
obj-y += traps.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += time_$(BITS).o ioport.o ldt.o
|
||||
obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
|
||||
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
|
||||
|
@ -60,8 +60,8 @@ obj-$(CONFIG_X86_32_SMP) += smpcommon.o
|
|||
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o
|
||||
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
|
||||
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
|
||||
obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o
|
||||
obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o
|
||||
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
|
||||
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
|
||||
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
|
||||
|
@ -108,7 +108,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
|
|||
# 64 bit specific files
|
||||
ifeq ($(CONFIG_X86_64),y)
|
||||
obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
|
||||
obj-y += bios_uv.o
|
||||
obj-y += bios_uv.o uv_irq.o uv_sysfs.o
|
||||
obj-y += genx2apic_cluster.o
|
||||
obj-y += genx2apic_phys.o
|
||||
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
|
||||
|
|
|
@ -1256,7 +1256,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
|
|||
|
||||
count =
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
|
||||
NR_IRQ_VECTORS);
|
||||
nr_irqs);
|
||||
if (count < 0) {
|
||||
printk(KERN_ERR PREFIX
|
||||
"Error parsing interrupt source overrides entry\n");
|
||||
|
@ -1276,7 +1276,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
|
|||
|
||||
count =
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
|
||||
NR_IRQ_VECTORS);
|
||||
nr_irqs);
|
||||
if (count < 0) {
|
||||
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
|
||||
/* TBD: Cleanup to allow fallback to MPS */
|
||||
|
|
|
@ -23,11 +23,13 @@
|
|||
#include <linux/mc146818rtc.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/acpi_pmtmr.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/dmar.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/smp.h>
|
||||
|
@ -36,8 +38,14 @@
|
|||
#include <asm/desc.h>
|
||||
#include <asm/arch_hooks.h>
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/i8253.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/i8259.h>
|
||||
|
||||
#include <mach_apic.h>
|
||||
#include <mach_apicdef.h>
|
||||
|
@ -50,16 +58,58 @@
|
|||
# error SPURIOUS_APIC_VECTOR definition error
|
||||
#endif
|
||||
|
||||
unsigned long mp_lapic_addr;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Knob to control our willingness to enable the local APIC.
|
||||
*
|
||||
* +1=force-enable
|
||||
*/
|
||||
static int force_enable_local_apic;
|
||||
int disable_apic;
|
||||
/*
|
||||
* APIC command line parameters
|
||||
*/
|
||||
static int __init parse_lapic(char *arg)
|
||||
{
|
||||
force_enable_local_apic = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("lapic", parse_lapic);
|
||||
/* Local APIC was disabled by the BIOS and enabled by the kernel */
|
||||
static int enabled_via_apicbase;
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static int apic_calibrate_pmtmr __initdata;
|
||||
static __init int setup_apicpmtimer(char *s)
|
||||
{
|
||||
apic_calibrate_pmtmr = 1;
|
||||
notsc_setup(NULL);
|
||||
return 0;
|
||||
}
|
||||
__setup("apicpmtimer", setup_apicpmtimer);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define HAVE_X2APIC
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_X2APIC
|
||||
int x2apic;
|
||||
/* x2apic enabled before OS handover */
|
||||
int x2apic_preenabled;
|
||||
int disable_x2apic;
|
||||
static __init int setup_nox2apic(char *str)
|
||||
{
|
||||
disable_x2apic = 1;
|
||||
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
|
||||
return 0;
|
||||
}
|
||||
early_param("nox2apic", setup_nox2apic);
|
||||
#endif
|
||||
|
||||
unsigned long mp_lapic_addr;
|
||||
int disable_apic;
|
||||
/* Disable local APIC timer from the kernel commandline or via dmi quirk */
|
||||
static int disable_apic_timer __cpuinitdata;
|
||||
/* Local APIC timer works in C2 */
|
||||
|
@ -110,9 +160,6 @@ static struct clock_event_device lapic_clockevent = {
|
|||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
/* Local APIC was disabled by the BIOS and enabled by the kernel */
|
||||
static int enabled_via_apicbase;
|
||||
|
||||
static unsigned long apic_phys;
|
||||
|
||||
/*
|
||||
|
@ -202,6 +249,42 @@ static struct apic_ops xapic_ops = {
|
|||
struct apic_ops __read_mostly *apic_ops = &xapic_ops;
|
||||
EXPORT_SYMBOL_GPL(apic_ops);
|
||||
|
||||
#ifdef HAVE_X2APIC
|
||||
static void x2apic_wait_icr_idle(void)
|
||||
{
|
||||
/* no need to wait for icr idle in x2apic */
|
||||
return;
|
||||
}
|
||||
|
||||
static u32 safe_x2apic_wait_icr_idle(void)
|
||||
{
|
||||
/* no need to wait for icr idle in x2apic */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void x2apic_icr_write(u32 low, u32 id)
|
||||
{
|
||||
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
|
||||
}
|
||||
|
||||
u64 x2apic_icr_read(void)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
|
||||
return val;
|
||||
}
|
||||
|
||||
static struct apic_ops x2apic_ops = {
|
||||
.read = native_apic_msr_read,
|
||||
.write = native_apic_msr_write,
|
||||
.icr_read = x2apic_icr_read,
|
||||
.icr_write = x2apic_icr_write,
|
||||
.wait_icr_idle = x2apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
|
||||
*/
|
||||
|
@ -219,6 +302,7 @@ void __cpuinit enable_NMI_through_LVT0(void)
|
|||
apic_write(APIC_LVT0, v);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/**
|
||||
* get_physical_broadcast - Get number of physical broadcast IDs
|
||||
*/
|
||||
|
@ -226,6 +310,7 @@ int get_physical_broadcast(void)
|
|||
{
|
||||
return modern_apic() ? 0xff : 0xf;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* lapic_get_maxlvt - get the maximum number of local vector table entries
|
||||
|
@ -247,11 +332,7 @@ int lapic_get_maxlvt(void)
|
|||
*/
|
||||
|
||||
/* Clock divisor */
|
||||
#ifdef CONFG_X86_64
|
||||
#define APIC_DIVISOR 1
|
||||
#else
|
||||
#define APIC_DIVISOR 16
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This function sets up the local APIC timer, with a timeout of
|
||||
|
@ -383,7 +464,7 @@ static void lapic_timer_broadcast(cpumask_t mask)
|
|||
* Setup the local APIC timer for this CPU. Copy the initilized values
|
||||
* of the boot CPU and register the clock event in the framework.
|
||||
*/
|
||||
static void __devinit setup_APIC_timer(void)
|
||||
static void __cpuinit setup_APIC_timer(void)
|
||||
{
|
||||
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
|
||||
|
||||
|
@ -453,14 +534,51 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static int __init calibrate_by_pmtimer(long deltapm, long *delta)
|
||||
{
|
||||
const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
|
||||
const long pm_thresh = pm_100ms / 100;
|
||||
unsigned long mult;
|
||||
u64 res;
|
||||
|
||||
#ifndef CONFIG_X86_PM_TIMER
|
||||
return -1;
|
||||
#endif
|
||||
|
||||
apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
|
||||
|
||||
/* Check, if the PM timer is available */
|
||||
if (!deltapm)
|
||||
return -1;
|
||||
|
||||
mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
|
||||
|
||||
if (deltapm > (pm_100ms - pm_thresh) &&
|
||||
deltapm < (pm_100ms + pm_thresh)) {
|
||||
apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
|
||||
} else {
|
||||
res = (((u64)deltapm) * mult) >> 22;
|
||||
do_div(res, 1000000);
|
||||
printk(KERN_WARNING "APIC calibration not consistent "
|
||||
"with PM Timer: %ldms instead of 100ms\n",
|
||||
(long)res);
|
||||
/* Correct the lapic counter value */
|
||||
res = (((u64)(*delta)) * pm_100ms);
|
||||
do_div(res, deltapm);
|
||||
printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
|
||||
"%lu (%ld)\n", (unsigned long)res, *delta);
|
||||
*delta = (long)res;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init calibrate_APIC_clock(void)
|
||||
{
|
||||
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
|
||||
const long pm_100ms = PMTMR_TICKS_PER_SEC/10;
|
||||
const long pm_thresh = pm_100ms/100;
|
||||
void (*real_handler)(struct clock_event_device *dev);
|
||||
unsigned long deltaj;
|
||||
long delta, deltapm;
|
||||
long delta;
|
||||
int pm_referenced = 0;
|
||||
|
||||
local_irq_disable();
|
||||
|
@ -470,10 +588,10 @@ static int __init calibrate_APIC_clock(void)
|
|||
global_clock_event->event_handler = lapic_cal_handler;
|
||||
|
||||
/*
|
||||
* Setup the APIC counter to 1e9. There is no way the lapic
|
||||
* Setup the APIC counter to maximum. There is no way the lapic
|
||||
* can underflow in the 100ms detection time frame
|
||||
*/
|
||||
__setup_APIC_LVTT(1000000000, 0, 0);
|
||||
__setup_APIC_LVTT(0xffffffff, 0, 0);
|
||||
|
||||
/* Let the interrupts run */
|
||||
local_irq_enable();
|
||||
|
@ -490,34 +608,9 @@ static int __init calibrate_APIC_clock(void)
|
|||
delta = lapic_cal_t1 - lapic_cal_t2;
|
||||
apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
|
||||
|
||||
/* Check, if the PM timer is available */
|
||||
deltapm = lapic_cal_pm2 - lapic_cal_pm1;
|
||||
apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
|
||||
|
||||
if (deltapm) {
|
||||
unsigned long mult;
|
||||
u64 res;
|
||||
|
||||
mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
|
||||
|
||||
if (deltapm > (pm_100ms - pm_thresh) &&
|
||||
deltapm < (pm_100ms + pm_thresh)) {
|
||||
apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
|
||||
} else {
|
||||
res = (((u64) deltapm) * mult) >> 22;
|
||||
do_div(res, 1000000);
|
||||
printk(KERN_WARNING "APIC calibration not consistent "
|
||||
"with PM Timer: %ldms instead of 100ms\n",
|
||||
(long)res);
|
||||
/* Correct the lapic counter value */
|
||||
res = (((u64) delta) * pm_100ms);
|
||||
do_div(res, deltapm);
|
||||
printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
|
||||
"%lu (%ld)\n", (unsigned long) res, delta);
|
||||
delta = (long) res;
|
||||
}
|
||||
pm_referenced = 1;
|
||||
}
|
||||
/* we trust the PM based calibration if possible */
|
||||
pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
|
||||
&delta);
|
||||
|
||||
/* Calculate the scaled math multiplication factor */
|
||||
lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
|
||||
|
@ -559,7 +652,10 @@ static int __init calibrate_APIC_clock(void)
|
|||
|
||||
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
|
||||
|
||||
/* We trust the pm timer based calibration */
|
||||
/*
|
||||
* PM timer calibration failed or not turned on
|
||||
* so lets try APIC timer based calibration
|
||||
*/
|
||||
if (!pm_referenced) {
|
||||
apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
|
||||
|
||||
|
@ -652,7 +748,7 @@ void __init setup_boot_APIC_clock(void)
|
|||
setup_APIC_timer();
|
||||
}
|
||||
|
||||
void __devinit setup_secondary_APIC_clock(void)
|
||||
void __cpuinit setup_secondary_APIC_clock(void)
|
||||
{
|
||||
setup_APIC_timer();
|
||||
}
|
||||
|
@ -718,6 +814,9 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
|
|||
* Besides, if we don't timer interrupts ignore the global
|
||||
* interrupt lock, which is the WrongThing (tm) to do.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
exit_idle();
|
||||
#endif
|
||||
irq_enter();
|
||||
local_apic_timer_interrupt();
|
||||
irq_exit();
|
||||
|
@ -991,40 +1090,43 @@ void __init init_bsp_APIC(void)
|
|||
|
||||
static void __cpuinit lapic_setup_esr(void)
|
||||
{
|
||||
unsigned long oldvalue, value, maxlvt;
|
||||
if (lapic_is_integrated() && !esr_disable) {
|
||||
if (esr_disable) {
|
||||
/*
|
||||
* Something untraceable is creating bad interrupts on
|
||||
* secondary quads ... for the moment, just leave the
|
||||
* ESR disabled - we can't do anything useful with the
|
||||
* errors anyway - mbligh
|
||||
*/
|
||||
printk(KERN_INFO "Leaving ESR disabled.\n");
|
||||
return;
|
||||
}
|
||||
/* !82489DX */
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
||||
apic_write(APIC_ESR, 0);
|
||||
oldvalue = apic_read(APIC_ESR);
|
||||
unsigned int oldvalue, value, maxlvt;
|
||||
|
||||
/* enables sending errors */
|
||||
value = ERROR_APIC_VECTOR;
|
||||
apic_write(APIC_LVTERR, value);
|
||||
/*
|
||||
* spec says clear errors after enabling vector.
|
||||
*/
|
||||
if (maxlvt > 3)
|
||||
apic_write(APIC_ESR, 0);
|
||||
value = apic_read(APIC_ESR);
|
||||
if (value != oldvalue)
|
||||
apic_printk(APIC_VERBOSE, "ESR value before enabling "
|
||||
"vector: 0x%08lx after: 0x%08lx\n",
|
||||
oldvalue, value);
|
||||
} else {
|
||||
if (!lapic_is_integrated()) {
|
||||
printk(KERN_INFO "No ESR for 82489DX.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (esr_disable) {
|
||||
/*
|
||||
* Something untraceable is creating bad interrupts on
|
||||
* secondary quads ... for the moment, just leave the
|
||||
* ESR disabled - we can't do anything useful with the
|
||||
* errors anyway - mbligh
|
||||
*/
|
||||
printk(KERN_INFO "Leaving ESR disabled.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
||||
apic_write(APIC_ESR, 0);
|
||||
oldvalue = apic_read(APIC_ESR);
|
||||
|
||||
/* enables sending errors */
|
||||
value = ERROR_APIC_VECTOR;
|
||||
apic_write(APIC_LVTERR, value);
|
||||
|
||||
/*
|
||||
* spec says clear errors after enabling vector.
|
||||
*/
|
||||
if (maxlvt > 3)
|
||||
apic_write(APIC_ESR, 0);
|
||||
value = apic_read(APIC_ESR);
|
||||
if (value != oldvalue)
|
||||
apic_printk(APIC_VERBOSE, "ESR value before enabling "
|
||||
"vector: 0x%08x after: 0x%08x\n",
|
||||
oldvalue, value);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1033,24 +1135,27 @@ static void __cpuinit lapic_setup_esr(void)
|
|||
*/
|
||||
void __cpuinit setup_local_APIC(void)
|
||||
{
|
||||
unsigned long value, integrated;
|
||||
unsigned int value;
|
||||
int i, j;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Pound the ESR really hard over the head with a big hammer - mbligh */
|
||||
if (esr_disable) {
|
||||
if (lapic_is_integrated() && esr_disable) {
|
||||
apic_write(APIC_ESR, 0);
|
||||
apic_write(APIC_ESR, 0);
|
||||
apic_write(APIC_ESR, 0);
|
||||
apic_write(APIC_ESR, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
integrated = lapic_is_integrated();
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Double-check whether this APIC is really registered.
|
||||
* This is meaningless in clustered apic mode, so we skip it.
|
||||
*/
|
||||
if (!apic_id_registered())
|
||||
WARN_ON_ONCE(1);
|
||||
BUG();
|
||||
|
||||
/*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
|
@ -1096,6 +1201,7 @@ void __cpuinit setup_local_APIC(void)
|
|||
*/
|
||||
value |= APIC_SPIV_APIC_ENABLED;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Some unknown Intel IO/APIC (or APIC) errata is biting us with
|
||||
* certain networking cards. If high frequency interrupts are
|
||||
|
@ -1116,8 +1222,13 @@ void __cpuinit setup_local_APIC(void)
|
|||
* See also the comment in end_level_ioapic_irq(). --macro
|
||||
*/
|
||||
|
||||
/* Enable focus processor (bit==0) */
|
||||
/*
|
||||
* - enable focus processor (bit==0)
|
||||
* - 64bit mode always use processor focus
|
||||
* so no need to set it
|
||||
*/
|
||||
value &= ~APIC_SPIV_FOCUS_DISABLED;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set spurious IRQ vector
|
||||
|
@ -1154,9 +1265,11 @@ void __cpuinit setup_local_APIC(void)
|
|||
value = APIC_DM_NMI;
|
||||
else
|
||||
value = APIC_DM_NMI | APIC_LVT_MASKED;
|
||||
if (!integrated) /* 82489DX */
|
||||
if (!lapic_is_integrated()) /* 82489DX */
|
||||
value |= APIC_LVT_LEVEL_TRIGGER;
|
||||
apic_write(APIC_LVT1, value);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void __cpuinit end_local_APIC_setup(void)
|
||||
|
@ -1177,6 +1290,153 @@ void __cpuinit end_local_APIC_setup(void)
|
|||
apic_pm_activate();
|
||||
}
|
||||
|
||||
#ifdef HAVE_X2APIC
|
||||
void check_x2apic(void)
|
||||
{
|
||||
int msr, msr2;
|
||||
|
||||
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
||||
|
||||
if (msr & X2APIC_ENABLE) {
|
||||
printk("x2apic enabled by BIOS, switching to x2apic ops\n");
|
||||
x2apic_preenabled = x2apic = 1;
|
||||
apic_ops = &x2apic_ops;
|
||||
}
|
||||
}
|
||||
|
||||
void enable_x2apic(void)
|
||||
{
|
||||
int msr, msr2;
|
||||
|
||||
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
||||
if (!(msr & X2APIC_ENABLE)) {
|
||||
printk("Enabling x2apic\n");
|
||||
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void enable_IR_x2apic(void)
|
||||
{
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (!cpu_has_x2apic)
|
||||
return;
|
||||
|
||||
if (!x2apic_preenabled && disable_x2apic) {
|
||||
printk(KERN_INFO
|
||||
"Skipped enabling x2apic and Interrupt-remapping "
|
||||
"because of nox2apic\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (x2apic_preenabled && disable_x2apic)
|
||||
panic("Bios already enabled x2apic, can't enforce nox2apic");
|
||||
|
||||
if (!x2apic_preenabled && skip_ioapic_setup) {
|
||||
printk(KERN_INFO
|
||||
"Skipped enabling x2apic and Interrupt-remapping "
|
||||
"because of skipping io-apic setup\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = dmar_table_init();
|
||||
if (ret) {
|
||||
printk(KERN_INFO
|
||||
"dmar_table_init() failed with %d:\n", ret);
|
||||
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
else
|
||||
printk(KERN_INFO
|
||||
"Not enabling x2apic,Intr-remapping\n");
|
||||
return;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
mask_8259A();
|
||||
|
||||
ret = save_mask_IO_APIC_setup();
|
||||
if (ret) {
|
||||
printk(KERN_INFO "Saving IO-APIC state failed: %d\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = enable_intr_remapping(1);
|
||||
|
||||
if (ret && x2apic_preenabled) {
|
||||
local_irq_restore(flags);
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto end_restore;
|
||||
|
||||
if (!x2apic) {
|
||||
x2apic = 1;
|
||||
apic_ops = &x2apic_ops;
|
||||
enable_x2apic();
|
||||
}
|
||||
|
||||
end_restore:
|
||||
if (ret)
|
||||
/*
|
||||
* IR enabling failed
|
||||
*/
|
||||
restore_IO_APIC_setup();
|
||||
else
|
||||
reinit_intr_remapped_IO_APIC(x2apic_preenabled);
|
||||
|
||||
end:
|
||||
unmask_8259A();
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (!ret) {
|
||||
if (!x2apic_preenabled)
|
||||
printk(KERN_INFO
|
||||
"Enabled x2apic and interrupt-remapping\n");
|
||||
else
|
||||
printk(KERN_INFO
|
||||
"Enabled Interrupt-remapping\n");
|
||||
} else
|
||||
printk(KERN_ERR
|
||||
"Failed to enable Interrupt-remapping and x2apic\n");
|
||||
#else
|
||||
if (!cpu_has_x2apic)
|
||||
return;
|
||||
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled prior OS handover,"
|
||||
" enable CONFIG_INTR_REMAP");
|
||||
|
||||
printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
|
||||
" and x2apic\n");
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* HAVE_X2APIC */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Detect and enable local APICs on non-SMP boards.
|
||||
* Original code written by Keir Fraser.
|
||||
* On AMD64 we trust the BIOS - if it says no APIC it is likely
|
||||
* not correctly set up (usually the APIC timer won't work etc.)
|
||||
*/
|
||||
static int __init detect_init_APIC(void)
|
||||
{
|
||||
if (!cpu_has_apic) {
|
||||
printk(KERN_INFO "No local APIC present\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
||||
boot_cpu_physical_apicid = 0;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Detect and initialize APIC
|
||||
*/
|
||||
|
@ -1255,12 +1515,46 @@ no_apic:
|
|||
printk(KERN_INFO "No local APIC present or hardware disabled\n");
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
void __init early_init_lapic_mapping(void)
|
||||
{
|
||||
unsigned long phys_addr;
|
||||
|
||||
/*
|
||||
* If no local APIC can be found then go out
|
||||
* : it means there is no mpatable and MADT
|
||||
*/
|
||||
if (!smp_found_config)
|
||||
return;
|
||||
|
||||
phys_addr = mp_lapic_addr;
|
||||
|
||||
set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
|
||||
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
|
||||
APIC_BASE, phys_addr);
|
||||
|
||||
/*
|
||||
* Fetch the APIC ID of the BSP in case we have a
|
||||
* default configuration (or the MP table is broken).
|
||||
*/
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* init_apic_mappings - initialize APIC mappings
|
||||
*/
|
||||
void __init init_apic_mappings(void)
|
||||
{
|
||||
#ifdef HAVE_X2APIC
|
||||
if (x2apic) {
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If no local APIC can be found then set up a fake all
|
||||
* zeroes page to simulate the local APIC and another
|
||||
|
@ -1273,8 +1567,8 @@ void __init init_apic_mappings(void)
|
|||
apic_phys = mp_lapic_addr;
|
||||
|
||||
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
|
||||
printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
|
||||
apic_phys);
|
||||
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
|
||||
APIC_BASE, apic_phys);
|
||||
|
||||
/*
|
||||
* Fetch the APIC ID of the BSP in case we have a
|
||||
|
@ -1282,18 +1576,27 @@ void __init init_apic_mappings(void)
|
|||
*/
|
||||
if (boot_cpu_physical_apicid == -1U)
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* This initializes the IO-APIC and APIC hardware if this is
|
||||
* a UP kernel.
|
||||
*/
|
||||
|
||||
int apic_version[MAX_APICS];
|
||||
|
||||
int __init APIC_init_uniprocessor(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
if (disable_apic) {
|
||||
printk(KERN_INFO "Apic disabled\n");
|
||||
return -1;
|
||||
}
|
||||
if (!cpu_has_apic) {
|
||||
disable_apic = 1;
|
||||
printk(KERN_INFO "Apic disabled by BIOS\n");
|
||||
return -1;
|
||||
}
|
||||
#else
|
||||
if (!smp_found_config && !cpu_has_apic)
|
||||
return -1;
|
||||
|
||||
|
@ -1302,39 +1605,68 @@ int __init APIC_init_uniprocessor(void)
|
|||
*/
|
||||
if (!cpu_has_apic &&
|
||||
APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
|
||||
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
|
||||
printk(KERN_ERR "BIOS bug, local APIC 0x%x not detected!...\n",
|
||||
boot_cpu_physical_apicid);
|
||||
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_X2APIC
|
||||
enable_IR_x2apic();
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
setup_apic_routing();
|
||||
#endif
|
||||
|
||||
verify_local_APIC();
|
||||
|
||||
connect_bsp_APIC();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
|
||||
#else
|
||||
/*
|
||||
* Hack: In case of kdump, after a crash, kernel might be booting
|
||||
* on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
|
||||
* might be zero if read from MP tables. Get it from LAPIC.
|
||||
*/
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
# ifdef CONFIG_CRASH_DUMP
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
# endif
|
||||
#endif
|
||||
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
|
||||
|
||||
setup_local_APIC();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Now enable IO-APICs, actually call clear_IO_APIC
|
||||
* We need clear_IO_APIC before enabling vector on BP
|
||||
*/
|
||||
if (!skip_ioapic_setup && nr_ioapics)
|
||||
enable_IO_APIC();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
|
||||
#endif
|
||||
localise_nmi_watchdog();
|
||||
end_local_APIC_setup();
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (smp_found_config)
|
||||
if (!skip_ioapic_setup && nr_ioapics)
|
||||
setup_IO_APIC();
|
||||
if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
|
||||
setup_IO_APIC();
|
||||
# ifdef CONFIG_X86_64
|
||||
else
|
||||
nr_ioapics = 0;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
setup_boot_APIC_clock();
|
||||
check_nmi_watchdog();
|
||||
#else
|
||||
setup_boot_clock();
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1348,8 +1680,11 @@ int __init APIC_init_uniprocessor(void)
|
|||
*/
|
||||
void smp_spurious_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long v;
|
||||
u32 v;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
exit_idle();
|
||||
#endif
|
||||
irq_enter();
|
||||
/*
|
||||
* Check if this really is a spurious interrupt and ACK it
|
||||
|
@ -1360,10 +1695,14 @@ void smp_spurious_interrupt(struct pt_regs *regs)
|
|||
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
|
||||
ack_APIC_irq();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
add_pda(irq_spurious_count, 1);
|
||||
#else
|
||||
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
|
||||
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
|
||||
"should never happen.\n", smp_processor_id());
|
||||
__get_cpu_var(irq_stat).irq_spurious_count++;
|
||||
#endif
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
|
@ -1372,8 +1711,11 @@ void smp_spurious_interrupt(struct pt_regs *regs)
|
|||
*/
|
||||
void smp_error_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long v, v1;
|
||||
u32 v, v1;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
exit_idle();
|
||||
#endif
|
||||
irq_enter();
|
||||
/* First tickle the hardware, only then report what went on. -- REW */
|
||||
v = apic_read(APIC_ESR);
|
||||
|
@ -1392,7 +1734,7 @@ void smp_error_interrupt(struct pt_regs *regs)
|
|||
6: Received illegal vector
|
||||
7: Illegal register address
|
||||
*/
|
||||
printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
|
||||
printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
|
||||
smp_processor_id(), v , v1);
|
||||
irq_exit();
|
||||
}
|
||||
|
@ -1565,6 +1907,13 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
|||
cpu_set(cpu, cpu_present_map);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
int hard_smp_processor_id(void)
|
||||
{
|
||||
return read_apic_id();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Power management
|
||||
*/
|
||||
|
@ -1640,7 +1989,7 @@ static int lapic_resume(struct sys_device *dev)
|
|||
|
||||
local_irq_save(flags);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef HAVE_X2APIC
|
||||
if (x2apic)
|
||||
enable_x2apic();
|
||||
else
|
||||
|
@ -1702,7 +2051,7 @@ static struct sys_device device_lapic = {
|
|||
.cls = &lapic_sysclass,
|
||||
};
|
||||
|
||||
static void __devinit apic_pm_activate(void)
|
||||
static void __cpuinit apic_pm_activate(void)
|
||||
{
|
||||
apic_pm_state.active = 1;
|
||||
}
|
||||
|
@ -1728,16 +2077,87 @@ static void apic_pm_activate(void) { }
|
|||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* apic_is_clustered_box() -- Check if we can expect good TSC
|
||||
*
|
||||
* Thus far, the major user of this is IBM's Summit2 series:
|
||||
*
|
||||
* Clustered boxes may have unsynced TSC problems if they are
|
||||
* multi-chassis. Use available data to take a good guess.
|
||||
* If in doubt, go HPET.
|
||||
*/
|
||||
__cpuinit int apic_is_clustered_box(void)
|
||||
{
|
||||
int i, clusters, zeros;
|
||||
unsigned id;
|
||||
u16 *bios_cpu_apicid;
|
||||
DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
|
||||
|
||||
/*
|
||||
* there is not this kind of box with AMD CPU yet.
|
||||
* Some AMD box with quadcore cpu and 8 sockets apicid
|
||||
* will be [4, 0x23] or [8, 0x27] could be thought to
|
||||
* vsmp box still need checking...
|
||||
*/
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
|
||||
return 0;
|
||||
|
||||
bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
|
||||
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
/* are we being called early in kernel startup? */
|
||||
if (bios_cpu_apicid) {
|
||||
id = bios_cpu_apicid[i];
|
||||
}
|
||||
else if (i < nr_cpu_ids) {
|
||||
if (cpu_present(i))
|
||||
id = per_cpu(x86_bios_cpu_apicid, i);
|
||||
else
|
||||
continue;
|
||||
}
|
||||
else
|
||||
break;
|
||||
|
||||
if (id != BAD_APICID)
|
||||
__set_bit(APIC_CLUSTERID(id), clustermap);
|
||||
}
|
||||
|
||||
/* Problem: Partially populated chassis may not have CPUs in some of
|
||||
* the APIC clusters they have been allocated. Only present CPUs have
|
||||
* x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
|
||||
* Since clusters are allocated sequentially, count zeros only if
|
||||
* they are bounded by ones.
|
||||
*/
|
||||
clusters = 0;
|
||||
zeros = 0;
|
||||
for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
|
||||
if (test_bit(i, clustermap)) {
|
||||
clusters += 1 + zeros;
|
||||
zeros = 0;
|
||||
} else
|
||||
++zeros;
|
||||
}
|
||||
|
||||
/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
|
||||
* not guaranteed to be synced between boards
|
||||
*/
|
||||
if (is_vsmp_box() && clusters > 1)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* If clusters > 2, then should be multi-chassis.
|
||||
* May have to revisit this when multi-core + hyperthreaded CPUs come
|
||||
* out, but AFAIK this will work even for them.
|
||||
*/
|
||||
return (clusters > 2);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* APIC command line parameters
|
||||
*/
|
||||
static int __init parse_lapic(char *arg)
|
||||
{
|
||||
force_enable_local_apic = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("lapic", parse_lapic);
|
||||
|
||||
static int __init setup_disableapic(char *arg)
|
||||
{
|
||||
disable_apic = 1;
|
||||
|
@ -1779,7 +2199,6 @@ static int __init apic_set_verbosity(char *arg)
|
|||
if (!arg) {
|
||||
#ifdef CONFIG_X86_64
|
||||
skip_ioapic_setup = 0;
|
||||
ioapic_force = 1;
|
||||
return 0;
|
||||
#endif
|
||||
return -EINVAL;
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,8 +1,6 @@
|
|||
/*
|
||||
* BIOS run time interface routines.
|
||||
*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
|
@ -16,33 +14,128 @@
|
|||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright (c) Russ Anderson
|
||||
*/
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <asm/efi.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/uv/bios.h>
|
||||
#include <asm/uv/uv_hub.h>
|
||||
|
||||
const char *
|
||||
x86_bios_strerror(long status)
|
||||
struct uv_systab uv_systab;
|
||||
|
||||
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
||||
{
|
||||
const char *str;
|
||||
switch (status) {
|
||||
case 0: str = "Call completed without error"; break;
|
||||
case -1: str = "Not implemented"; break;
|
||||
case -2: str = "Invalid argument"; break;
|
||||
case -3: str = "Call completed with error"; break;
|
||||
default: str = "Unknown BIOS status code"; break;
|
||||
struct uv_systab *tab = &uv_systab;
|
||||
|
||||
if (!tab->function)
|
||||
/*
|
||||
* BIOS does not support UV systab
|
||||
*/
|
||||
return BIOS_STATUS_UNIMPLEMENTED;
|
||||
|
||||
return efi_call6((void *)__va(tab->function),
|
||||
(u64)which, a1, a2, a3, a4, a5);
|
||||
}
|
||||
|
||||
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
||||
u64 a4, u64 a5)
|
||||
{
|
||||
unsigned long bios_flags;
|
||||
s64 ret;
|
||||
|
||||
local_irq_save(bios_flags);
|
||||
ret = uv_bios_call(which, a1, a2, a3, a4, a5);
|
||||
local_irq_restore(bios_flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
||||
u64 a4, u64 a5)
|
||||
{
|
||||
s64 ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = uv_bios_call(which, a1, a2, a3, a4, a5);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
long sn_partition_id;
|
||||
EXPORT_SYMBOL_GPL(sn_partition_id);
|
||||
long uv_coherency_id;
|
||||
EXPORT_SYMBOL_GPL(uv_coherency_id);
|
||||
long uv_region_size;
|
||||
EXPORT_SYMBOL_GPL(uv_region_size);
|
||||
int uv_type;
|
||||
|
||||
|
||||
s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
|
||||
long *region)
|
||||
{
|
||||
s64 ret;
|
||||
u64 v0, v1;
|
||||
union partition_info_u part;
|
||||
|
||||
ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
|
||||
(u64)(&v0), (u64)(&v1), 0, 0);
|
||||
if (ret != BIOS_STATUS_SUCCESS)
|
||||
return ret;
|
||||
|
||||
part.val = v0;
|
||||
if (uvtype)
|
||||
*uvtype = part.hub_version;
|
||||
if (partid)
|
||||
*partid = part.partition_id;
|
||||
if (coher)
|
||||
*coher = part.coherence_id;
|
||||
if (region)
|
||||
*region = part.region_size;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
|
||||
{
|
||||
return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
|
||||
(u64)ticks_per_second, 0, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(uv_bios_freq_base);
|
||||
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
void uv_bios_init(void)
|
||||
{
|
||||
struct uv_systab *tab;
|
||||
|
||||
if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
|
||||
(efi.uv_systab == (unsigned long)NULL)) {
|
||||
printk(KERN_CRIT "No EFI UV System Table.\n");
|
||||
uv_systab.function = (unsigned long)NULL;
|
||||
return;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
long
|
||||
x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
|
||||
unsigned long *drift_info)
|
||||
{
|
||||
struct uv_bios_retval isrv;
|
||||
tab = (struct uv_systab *)ioremap(efi.uv_systab,
|
||||
sizeof(struct uv_systab));
|
||||
if (strncmp(tab->signature, "UVST", 4) != 0)
|
||||
printk(KERN_ERR "bad signature in UV system table!");
|
||||
|
||||
BIOS_CALL(isrv, BIOS_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
|
||||
*ticks_per_second = isrv.v0;
|
||||
*drift_info = isrv.v1;
|
||||
return isrv.status;
|
||||
/*
|
||||
* Copy table to permanent spot for later use.
|
||||
*/
|
||||
memcpy(&uv_systab, tab, sizeof(struct uv_systab));
|
||||
iounmap(tab);
|
||||
|
||||
printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(x86_bios_freq_base);
|
||||
#else /* !CONFIG_EFI */
|
||||
|
||||
void uv_bios_init(void) { }
|
||||
#endif
|
||||
|
||||
|
|
|
@ -249,7 +249,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
|||
}
|
||||
numa_set_node(cpu, node);
|
||||
|
||||
printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
|
||||
printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ static void __cpuinit srat_detect_node(void)
|
|||
node = first_node(node_online_map);
|
||||
numa_set_node(cpu, node);
|
||||
|
||||
printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
|
||||
printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -366,6 +366,10 @@ void __init efi_init(void)
|
|||
SMBIOS_TABLE_GUID)) {
|
||||
efi.smbios = config_tables[i].table;
|
||||
printk(" SMBIOS=0x%lx ", config_tables[i].table);
|
||||
} else if (!efi_guidcmp(config_tables[i].guid,
|
||||
UV_SYSTEM_TABLE_GUID)) {
|
||||
efi.uv_systab = config_tables[i].table;
|
||||
printk(" UVsystab=0x%lx ", config_tables[i].table);
|
||||
} else if (!efi_guidcmp(config_tables[i].guid,
|
||||
HCDP_TABLE_GUID)) {
|
||||
efi.hcdp = config_tables[i].table;
|
||||
|
|
|
@ -629,7 +629,7 @@ ENTRY(interrupt)
|
|||
ENTRY(irq_entries_start)
|
||||
RING0_INT_FRAME
|
||||
vector=0
|
||||
.rept NR_IRQS
|
||||
.rept NR_VECTORS
|
||||
ALIGN
|
||||
.if vector
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
|
|
|
@ -179,8 +179,10 @@ static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
* is an example).
|
||||
*/
|
||||
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
|
||||
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
|
||||
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
|
||||
printk(KERN_DEBUG "system APIC only can use physical flat");
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -341,12 +341,12 @@ static __init void map_mmioh_high(int max_pnode)
|
|||
|
||||
static __init void uv_rtc_init(void)
|
||||
{
|
||||
long status, ticks_per_sec, drift;
|
||||
long status;
|
||||
u64 ticks_per_sec;
|
||||
|
||||
status =
|
||||
x86_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
|
||||
&drift);
|
||||
if (status != 0 || ticks_per_sec < 100000) {
|
||||
status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
|
||||
&ticks_per_sec);
|
||||
if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
|
||||
printk(KERN_WARNING
|
||||
"unable to determine platform RTC clock frequency, "
|
||||
"guessing.\n");
|
||||
|
@ -356,7 +356,22 @@ static __init void uv_rtc_init(void)
|
|||
sn_rtc_cycles_per_second = ticks_per_sec;
|
||||
}
|
||||
|
||||
static bool uv_system_inited;
|
||||
/*
|
||||
* Called on each cpu to initialize the per_cpu UV data area.
|
||||
* ZZZ hotplug not supported yet
|
||||
*/
|
||||
void __cpuinit uv_cpu_init(void)
|
||||
{
|
||||
/* CPU 0 initilization will be done via uv_system_init. */
|
||||
if (!uv_blade_info)
|
||||
return;
|
||||
|
||||
uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
|
||||
|
||||
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
|
||||
set_x2apic_extra_bits(uv_hub_info->pnode);
|
||||
}
|
||||
|
||||
|
||||
void __init uv_system_init(void)
|
||||
{
|
||||
|
@ -412,6 +427,9 @@ void __init uv_system_init(void)
|
|||
gnode_upper = (((unsigned long)node_id.s.node_id) &
|
||||
~((1 << n_val) - 1)) << m_val;
|
||||
|
||||
uv_bios_init();
|
||||
uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
|
||||
&uv_coherency_id, &uv_region_size);
|
||||
uv_rtc_init();
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
|
@ -433,7 +451,7 @@ void __init uv_system_init(void)
|
|||
uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
|
||||
uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
|
||||
uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
|
||||
uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
|
||||
uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
|
||||
uv_node_to_blade[nid] = blade;
|
||||
uv_cpu_to_blade[cpu] = blade;
|
||||
max_pnode = max(pnode, max_pnode);
|
||||
|
@ -448,21 +466,6 @@ void __init uv_system_init(void)
|
|||
map_mmr_high(max_pnode);
|
||||
map_config_high(max_pnode);
|
||||
map_mmioh_high(max_pnode);
|
||||
uv_system_inited = true;
|
||||
|
||||
uv_cpu_init();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called on each cpu to initialize the per_cpu UV data area.
|
||||
* ZZZ hotplug not supported yet
|
||||
*/
|
||||
void __cpuinit uv_cpu_init(void)
|
||||
{
|
||||
BUG_ON(!uv_system_inited);
|
||||
|
||||
uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
|
||||
|
||||
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
|
||||
set_x2apic_extra_bits(uv_hub_info->pnode);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,29 +1,49 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hpet.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/i8253.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/hpet.h>
|
||||
|
||||
#define HPET_MASK CLOCKSOURCE_MASK(32)
|
||||
#define HPET_SHIFT 22
|
||||
#define HPET_MASK CLOCKSOURCE_MASK(32)
|
||||
#define HPET_SHIFT 22
|
||||
|
||||
/* FSEC = 10^-15
|
||||
NSEC = 10^-9 */
|
||||
#define FSEC_PER_NSEC 1000000L
|
||||
#define FSEC_PER_NSEC 1000000L
|
||||
|
||||
#define HPET_DEV_USED_BIT 2
|
||||
#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
|
||||
#define HPET_DEV_VALID 0x8
|
||||
#define HPET_DEV_FSB_CAP 0x1000
|
||||
#define HPET_DEV_PERI_CAP 0x2000
|
||||
|
||||
#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
|
||||
|
||||
/*
|
||||
* HPET address is set in acpi/boot.c, when an ACPI entry exists
|
||||
*/
|
||||
unsigned long hpet_address;
|
||||
static void __iomem *hpet_virt_address;
|
||||
unsigned long hpet_address;
|
||||
unsigned long hpet_num_timers;
|
||||
static void __iomem *hpet_virt_address;
|
||||
|
||||
struct hpet_dev {
|
||||
struct clock_event_device evt;
|
||||
unsigned int num;
|
||||
int cpu;
|
||||
unsigned int irq;
|
||||
unsigned int flags;
|
||||
char name[10];
|
||||
};
|
||||
|
||||
unsigned long hpet_readl(unsigned long a)
|
||||
{
|
||||
|
@ -59,7 +79,7 @@ static inline void hpet_clear_mapping(void)
|
|||
static int boot_hpet_disable;
|
||||
int hpet_force_user;
|
||||
|
||||
static int __init hpet_setup(char* str)
|
||||
static int __init hpet_setup(char *str)
|
||||
{
|
||||
if (str) {
|
||||
if (!strncmp("disable", str, 7))
|
||||
|
@ -80,7 +100,7 @@ __setup("nohpet", disable_hpet);
|
|||
|
||||
static inline int is_hpet_capable(void)
|
||||
{
|
||||
return (!boot_hpet_disable && hpet_address);
|
||||
return !boot_hpet_disable && hpet_address;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -102,6 +122,9 @@ EXPORT_SYMBOL_GPL(is_hpet_enabled);
|
|||
* timer 0 and timer 1 in case of RTC emulation.
|
||||
*/
|
||||
#ifdef CONFIG_HPET
|
||||
|
||||
static void hpet_reserve_msi_timers(struct hpet_data *hd);
|
||||
|
||||
static void hpet_reserve_platform_timers(unsigned long id)
|
||||
{
|
||||
struct hpet __iomem *hpet = hpet_virt_address;
|
||||
|
@ -111,10 +134,10 @@ static void hpet_reserve_platform_timers(unsigned long id)
|
|||
|
||||
nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
|
||||
|
||||
memset(&hd, 0, sizeof (hd));
|
||||
hd.hd_phys_address = hpet_address;
|
||||
hd.hd_address = hpet;
|
||||
hd.hd_nirqs = nrtimers;
|
||||
memset(&hd, 0, sizeof(hd));
|
||||
hd.hd_phys_address = hpet_address;
|
||||
hd.hd_address = hpet;
|
||||
hd.hd_nirqs = nrtimers;
|
||||
hpet_reserve_timer(&hd, 0);
|
||||
|
||||
#ifdef CONFIG_HPET_EMULATE_RTC
|
||||
|
@ -130,10 +153,12 @@ static void hpet_reserve_platform_timers(unsigned long id)
|
|||
hd.hd_irq[1] = HPET_LEGACY_RTC;
|
||||
|
||||
for (i = 2; i < nrtimers; timer++, i++) {
|
||||
hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >>
|
||||
Tn_INT_ROUTE_CNF_SHIFT;
|
||||
hd.hd_irq[i] = (readl(&timer->hpet_config) &
|
||||
Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
|
||||
}
|
||||
|
||||
hpet_reserve_msi_timers(&hd);
|
||||
|
||||
hpet_alloc(&hd);
|
||||
|
||||
}
|
||||
|
@ -227,60 +252,70 @@ static void hpet_legacy_clockevent_register(void)
|
|||
printk(KERN_DEBUG "hpet clockevent registered\n");
|
||||
}
|
||||
|
||||
static void hpet_legacy_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
static int hpet_setup_msi_irq(unsigned int irq);
|
||||
|
||||
static void hpet_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt, int timer)
|
||||
{
|
||||
unsigned long cfg, cmp, now;
|
||||
uint64_t delta;
|
||||
|
||||
switch(mode) {
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult;
|
||||
delta >>= hpet_clockevent.shift;
|
||||
delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
|
||||
delta >>= evt->shift;
|
||||
now = hpet_readl(HPET_COUNTER);
|
||||
cmp = now + (unsigned long) delta;
|
||||
cfg = hpet_readl(HPET_T0_CFG);
|
||||
cfg = hpet_readl(HPET_Tn_CFG(timer));
|
||||
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
|
||||
HPET_TN_SETVAL | HPET_TN_32BIT;
|
||||
hpet_writel(cfg, HPET_T0_CFG);
|
||||
hpet_writel(cfg, HPET_Tn_CFG(timer));
|
||||
/*
|
||||
* The first write after writing TN_SETVAL to the
|
||||
* config register sets the counter value, the second
|
||||
* write sets the period.
|
||||
*/
|
||||
hpet_writel(cmp, HPET_T0_CMP);
|
||||
hpet_writel(cmp, HPET_Tn_CMP(timer));
|
||||
udelay(1);
|
||||
hpet_writel((unsigned long) delta, HPET_T0_CMP);
|
||||
hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
cfg = hpet_readl(HPET_T0_CFG);
|
||||
cfg = hpet_readl(HPET_Tn_CFG(timer));
|
||||
cfg &= ~HPET_TN_PERIODIC;
|
||||
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
|
||||
hpet_writel(cfg, HPET_T0_CFG);
|
||||
hpet_writel(cfg, HPET_Tn_CFG(timer));
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
cfg = hpet_readl(HPET_T0_CFG);
|
||||
cfg = hpet_readl(HPET_Tn_CFG(timer));
|
||||
cfg &= ~HPET_TN_ENABLE;
|
||||
hpet_writel(cfg, HPET_T0_CFG);
|
||||
hpet_writel(cfg, HPET_Tn_CFG(timer));
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_RESUME:
|
||||
hpet_enable_legacy_int();
|
||||
if (timer == 0) {
|
||||
hpet_enable_legacy_int();
|
||||
} else {
|
||||
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
|
||||
hpet_setup_msi_irq(hdev->irq);
|
||||
disable_irq(hdev->irq);
|
||||
irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu));
|
||||
enable_irq(hdev->irq);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int hpet_legacy_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
static int hpet_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt, int timer)
|
||||
{
|
||||
u32 cnt;
|
||||
|
||||
cnt = hpet_readl(HPET_COUNTER);
|
||||
cnt += (u32) delta;
|
||||
hpet_writel(cnt, HPET_T0_CMP);
|
||||
hpet_writel(cnt, HPET_Tn_CMP(timer));
|
||||
|
||||
/*
|
||||
* We need to read back the CMP register to make sure that
|
||||
|
@ -292,6 +327,347 @@ static int hpet_legacy_next_event(unsigned long delta,
|
|||
return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
|
||||
}
|
||||
|
||||
static void hpet_legacy_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
hpet_set_mode(mode, evt, 0);
|
||||
}
|
||||
|
||||
static int hpet_legacy_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
return hpet_next_event(delta, evt, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* HPET MSI Support
|
||||
*/
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
||||
static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
|
||||
static struct hpet_dev *hpet_devs;
|
||||
|
||||
void hpet_msi_unmask(unsigned int irq)
|
||||
{
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
unsigned long cfg;
|
||||
|
||||
/* unmask it */
|
||||
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
|
||||
cfg |= HPET_TN_FSB;
|
||||
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
|
||||
}
|
||||
|
||||
void hpet_msi_mask(unsigned int irq)
|
||||
{
|
||||
unsigned long cfg;
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
|
||||
/* mask it */
|
||||
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
|
||||
cfg &= ~HPET_TN_FSB;
|
||||
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
|
||||
}
|
||||
|
||||
void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
|
||||
{
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
|
||||
hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
|
||||
hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
|
||||
}
|
||||
|
||||
void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
|
||||
{
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
|
||||
msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
|
||||
msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
|
||||
msg->address_hi = 0;
|
||||
}
|
||||
|
||||
static void hpet_msi_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
|
||||
hpet_set_mode(mode, evt, hdev->num);
|
||||
}
|
||||
|
||||
static int hpet_msi_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
|
||||
return hpet_next_event(delta, evt, hdev->num);
|
||||
}
|
||||
|
||||
static int hpet_setup_msi_irq(unsigned int irq)
|
||||
{
|
||||
if (arch_setup_hpet_msi(irq)) {
|
||||
destroy_irq(irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpet_assign_irq(struct hpet_dev *dev)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
||||
irq = create_irq();
|
||||
if (!irq)
|
||||
return -EINVAL;
|
||||
|
||||
set_irq_data(irq, dev);
|
||||
|
||||
if (hpet_setup_msi_irq(irq))
|
||||
return -EINVAL;
|
||||
|
||||
dev->irq = irq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t hpet_interrupt_handler(int irq, void *data)
|
||||
{
|
||||
struct hpet_dev *dev = (struct hpet_dev *)data;
|
||||
struct clock_event_device *hevt = &dev->evt;
|
||||
|
||||
if (!hevt->event_handler) {
|
||||
printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
|
||||
dev->num);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
hevt->event_handler(hevt);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int hpet_setup_irq(struct hpet_dev *dev)
|
||||
{
|
||||
|
||||
if (request_irq(dev->irq, hpet_interrupt_handler,
|
||||
IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev))
|
||||
return -1;
|
||||
|
||||
disable_irq(dev->irq);
|
||||
irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu));
|
||||
enable_irq(dev->irq);
|
||||
|
||||
printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
|
||||
dev->name, dev->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This should be called in specific @cpu */
|
||||
static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
|
||||
{
|
||||
struct clock_event_device *evt = &hdev->evt;
|
||||
uint64_t hpet_freq;
|
||||
|
||||
WARN_ON(cpu != smp_processor_id());
|
||||
if (!(hdev->flags & HPET_DEV_VALID))
|
||||
return;
|
||||
|
||||
if (hpet_setup_msi_irq(hdev->irq))
|
||||
return;
|
||||
|
||||
hdev->cpu = cpu;
|
||||
per_cpu(cpu_hpet_dev, cpu) = hdev;
|
||||
evt->name = hdev->name;
|
||||
hpet_setup_irq(hdev);
|
||||
evt->irq = hdev->irq;
|
||||
|
||||
evt->rating = 110;
|
||||
evt->features = CLOCK_EVT_FEAT_ONESHOT;
|
||||
if (hdev->flags & HPET_DEV_PERI_CAP)
|
||||
evt->features |= CLOCK_EVT_FEAT_PERIODIC;
|
||||
|
||||
evt->set_mode = hpet_msi_set_mode;
|
||||
evt->set_next_event = hpet_msi_next_event;
|
||||
evt->shift = 32;
|
||||
|
||||
/*
|
||||
* The period is a femto seconds value. We need to calculate the
|
||||
* scaled math multiplication factor for nanosecond to hpet tick
|
||||
* conversion.
|
||||
*/
|
||||
hpet_freq = 1000000000000000ULL;
|
||||
do_div(hpet_freq, hpet_period);
|
||||
evt->mult = div_sc((unsigned long) hpet_freq,
|
||||
NSEC_PER_SEC, evt->shift);
|
||||
/* Calculate the max delta */
|
||||
evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt);
|
||||
/* 5 usec minimum reprogramming delta. */
|
||||
evt->min_delta_ns = 5000;
|
||||
|
||||
evt->cpumask = cpumask_of_cpu(hdev->cpu);
|
||||
clockevents_register_device(evt);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HPET
|
||||
/* Reserve at least one timer for userspace (/dev/hpet) */
|
||||
#define RESERVE_TIMERS 1
|
||||
#else
|
||||
#define RESERVE_TIMERS 0
|
||||
#endif
|
||||
|
||||
static void hpet_msi_capability_lookup(unsigned int start_timer)
|
||||
{
|
||||
unsigned int id;
|
||||
unsigned int num_timers;
|
||||
unsigned int num_timers_used = 0;
|
||||
int i;
|
||||
|
||||
id = hpet_readl(HPET_ID);
|
||||
|
||||
num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
|
||||
num_timers++; /* Value read out starts from 0 */
|
||||
|
||||
hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
|
||||
if (!hpet_devs)
|
||||
return;
|
||||
|
||||
hpet_num_timers = num_timers;
|
||||
|
||||
for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
|
||||
struct hpet_dev *hdev = &hpet_devs[num_timers_used];
|
||||
unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
|
||||
|
||||
/* Only consider HPET timer with MSI support */
|
||||
if (!(cfg & HPET_TN_FSB_CAP))
|
||||
continue;
|
||||
|
||||
hdev->flags = 0;
|
||||
if (cfg & HPET_TN_PERIODIC_CAP)
|
||||
hdev->flags |= HPET_DEV_PERI_CAP;
|
||||
hdev->num = i;
|
||||
|
||||
sprintf(hdev->name, "hpet%d", i);
|
||||
if (hpet_assign_irq(hdev))
|
||||
continue;
|
||||
|
||||
hdev->flags |= HPET_DEV_FSB_CAP;
|
||||
hdev->flags |= HPET_DEV_VALID;
|
||||
num_timers_used++;
|
||||
if (num_timers_used == num_possible_cpus())
|
||||
break;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
|
||||
num_timers, num_timers_used);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HPET
|
||||
static void hpet_reserve_msi_timers(struct hpet_data *hd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!hpet_devs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < hpet_num_timers; i++) {
|
||||
struct hpet_dev *hdev = &hpet_devs[i];
|
||||
|
||||
if (!(hdev->flags & HPET_DEV_VALID))
|
||||
continue;
|
||||
|
||||
hd->hd_irq[hdev->num] = hdev->irq;
|
||||
hpet_reserve_timer(hd, hdev->num);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct hpet_dev *hpet_get_unused_timer(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!hpet_devs)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < hpet_num_timers; i++) {
|
||||
struct hpet_dev *hdev = &hpet_devs[i];
|
||||
|
||||
if (!(hdev->flags & HPET_DEV_VALID))
|
||||
continue;
|
||||
if (test_and_set_bit(HPET_DEV_USED_BIT,
|
||||
(unsigned long *)&hdev->flags))
|
||||
continue;
|
||||
return hdev;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct hpet_work_struct {
|
||||
struct delayed_work work;
|
||||
struct completion complete;
|
||||
};
|
||||
|
||||
static void hpet_work(struct work_struct *w)
|
||||
{
|
||||
struct hpet_dev *hdev;
|
||||
int cpu = smp_processor_id();
|
||||
struct hpet_work_struct *hpet_work;
|
||||
|
||||
hpet_work = container_of(w, struct hpet_work_struct, work.work);
|
||||
|
||||
hdev = hpet_get_unused_timer();
|
||||
if (hdev)
|
||||
init_one_hpet_msi_clockevent(hdev, cpu);
|
||||
|
||||
complete(&hpet_work->complete);
|
||||
}
|
||||
|
||||
static int hpet_cpuhp_notify(struct notifier_block *n,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned long cpu = (unsigned long)hcpu;
|
||||
struct hpet_work_struct work;
|
||||
struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
|
||||
|
||||
switch (action & 0xf) {
|
||||
case CPU_ONLINE:
|
||||
INIT_DELAYED_WORK(&work.work, hpet_work);
|
||||
init_completion(&work.complete);
|
||||
/* FIXME: add schedule_work_on() */
|
||||
schedule_delayed_work_on(cpu, &work.work, 0);
|
||||
wait_for_completion(&work.complete);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
if (hdev) {
|
||||
free_irq(hdev->irq, hdev);
|
||||
hdev->flags &= ~HPET_DEV_USED;
|
||||
per_cpu(cpu_hpet_dev, cpu) = NULL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#else
|
||||
|
||||
static int hpet_setup_msi_irq(unsigned int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static void hpet_msi_capability_lookup(unsigned int start_timer)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HPET
|
||||
static void hpet_reserve_msi_timers(struct hpet_data *hd)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int hpet_cpuhp_notify(struct notifier_block *n,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Clock source related code
|
||||
*/
|
||||
|
@ -427,8 +803,10 @@ int __init hpet_enable(void)
|
|||
|
||||
if (id & HPET_ID_LEGSUP) {
|
||||
hpet_legacy_clockevent_register();
|
||||
hpet_msi_capability_lookup(2);
|
||||
return 1;
|
||||
}
|
||||
hpet_msi_capability_lookup(0);
|
||||
return 0;
|
||||
|
||||
out_nohpet:
|
||||
|
@ -445,6 +823,8 @@ out_nohpet:
|
|||
*/
|
||||
static __init int hpet_late_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (boot_hpet_disable)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -460,6 +840,13 @@ static __init int hpet_late_init(void)
|
|||
|
||||
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
|
||||
}
|
||||
|
||||
/* This notifier should be called after workqueue is ready */
|
||||
hotcpu_notifier(hpet_cpuhp_notify, -20);
|
||||
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(hpet_late_init);
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* Common interrupt code for 32 and 64 bit
|
||||
*/
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
*/
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* Currently unexpected vectors happen only on SMP and APIC.
|
||||
* We _must_ ack these because every local APIC has only N
|
||||
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
||||
* holds up an irq slot - in excessive cases (when multiple
|
||||
* unexpected vectors occur) that might lock up the APIC
|
||||
* completely.
|
||||
* But only ack when the APIC is enabled -AK
|
||||
*/
|
||||
if (cpu_has_apic)
|
||||
ack_APIC_irq();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# define irq_stats(x) (&per_cpu(irq_stat,x))
|
||||
#else
|
||||
# define irq_stats(x) cpu_pda(x)
|
||||
#endif
|
||||
/*
|
||||
* /proc/interrupts printing:
|
||||
*/
|
||||
static int show_other_interrupts(struct seq_file *p)
|
||||
{
|
||||
int j;
|
||||
|
||||
seq_printf(p, "NMI: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
|
||||
seq_printf(p, " Non-maskable interrupts\n");
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
seq_printf(p, "LOC: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
|
||||
seq_printf(p, " Local timer interrupts\n");
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(p, "RES: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
||||
seq_printf(p, " Rescheduling interrupts\n");
|
||||
seq_printf(p, "CAL: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
|
||||
seq_printf(p, " Function call interrupts\n");
|
||||
seq_printf(p, "TLB: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
||||
seq_printf(p, " TLB shootdowns\n");
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE
|
||||
seq_printf(p, "TRM: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
|
||||
seq_printf(p, " Thermal event interrupts\n");
|
||||
# ifdef CONFIG_X86_64
|
||||
seq_printf(p, "THR: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
|
||||
seq_printf(p, " Threshold APIC interrupts\n");
|
||||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
seq_printf(p, "SPU: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
||||
seq_printf(p, " Spurious interrupts\n");
|
||||
#endif
|
||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||
#if defined(CONFIG_X86_IO_APIC)
|
||||
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
unsigned long flags, any_count = 0;
|
||||
int i = *(loff_t *) v, j;
|
||||
struct irqaction *action;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (i > nr_irqs)
|
||||
return 0;
|
||||
|
||||
if (i == nr_irqs)
|
||||
return show_other_interrupts(p);
|
||||
|
||||
/* print header */
|
||||
if (i == 0) {
|
||||
seq_printf(p, " ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "CPU%-8d",j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
desc = irq_to_desc(i);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
#ifndef CONFIG_SMP
|
||||
any_count = kstat_irqs(i);
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_irqs_cpu(i, j);
|
||||
#endif
|
||||
action = desc->action;
|
||||
if (!action && !any_count)
|
||||
goto out;
|
||||
|
||||
seq_printf(p, "%3d: ", i);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %8s", desc->chip->name);
|
||||
seq_printf(p, "-%-8s", desc->name);
|
||||
|
||||
if (action) {
|
||||
seq_printf(p, " %s", action->name);
|
||||
while ((action = action->next) != NULL)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
}
|
||||
|
||||
seq_putc(p, '\n');
|
||||
out:
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* /proc/stat helpers
|
||||
*/
|
||||
u64 arch_irq_stat_cpu(unsigned int cpu)
|
||||
{
|
||||
u64 sum = irq_stats(cpu)->__nmi_count;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
sum += irq_stats(cpu)->apic_timer_irqs;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
sum += irq_stats(cpu)->irq_resched_count;
|
||||
sum += irq_stats(cpu)->irq_call_count;
|
||||
sum += irq_stats(cpu)->irq_tlb_count;
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE
|
||||
sum += irq_stats(cpu)->irq_thermal_count;
|
||||
# ifdef CONFIG_X86_64
|
||||
sum += irq_stats(cpu)->irq_threshold_count;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
sum += irq_stats(cpu)->irq_spurious_count;
|
||||
#endif
|
||||
return sum;
|
||||
}
|
||||
|
||||
u64 arch_irq_stat(void)
|
||||
{
|
||||
u64 sum = atomic_read(&irq_err_count);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
sum += atomic_read(&irq_mis_count);
|
||||
#endif
|
||||
return sum;
|
||||
}
|
|
@ -25,29 +25,6 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|||
DEFINE_PER_CPU(struct pt_regs *, irq_regs);
|
||||
EXPORT_PER_CPU_SYMBOL(irq_regs);
|
||||
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
*/
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* Currently unexpected vectors happen only on SMP and APIC.
|
||||
* We _must_ ack these because every local APIC has only N
|
||||
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
||||
* holds up an irq slot - in excessive cases (when multiple
|
||||
* unexpected vectors occur) that might lock up the APIC
|
||||
* completely.
|
||||
* But only ack when the APIC is enabled -AK
|
||||
*/
|
||||
if (cpu_has_apic)
|
||||
ack_APIC_irq();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
/* Debugging check for stack overflow: is there less than 1KB free? */
|
||||
static int check_stack_overflow(void)
|
||||
|
@ -223,20 +200,25 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
|||
{
|
||||
struct pt_regs *old_regs;
|
||||
/* high bit used in ret_from_ code */
|
||||
int overflow, irq = ~regs->orig_ax;
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
int overflow;
|
||||
unsigned vector = ~regs->orig_ax;
|
||||
struct irq_desc *desc;
|
||||
unsigned irq;
|
||||
|
||||
if (unlikely((unsigned)irq >= NR_IRQS)) {
|
||||
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
||||
__func__, irq);
|
||||
BUG();
|
||||
}
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
|
||||
overflow = check_stack_overflow();
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (unlikely(!desc)) {
|
||||
printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
|
||||
__func__, irq, vector, smp_processor_id());
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (!execute_on_irq_stack(overflow, desc, irq)) {
|
||||
if (unlikely(overflow))
|
||||
print_stack_overflow();
|
||||
|
@ -248,146 +230,6 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt statistics:
|
||||
*/
|
||||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
/*
|
||||
* /proc/interrupts printing:
|
||||
*/
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
struct irqaction * action;
|
||||
unsigned long flags;
|
||||
|
||||
if (i == 0) {
|
||||
seq_printf(p, " ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "CPU%-8d",j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
if (i < NR_IRQS) {
|
||||
unsigned any_count = 0;
|
||||
|
||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||
#ifndef CONFIG_SMP
|
||||
any_count = kstat_irqs(i);
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_cpu(j).irqs[i];
|
||||
#endif
|
||||
action = irq_desc[i].action;
|
||||
if (!action && !any_count)
|
||||
goto skip;
|
||||
seq_printf(p, "%3d: ",i);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
#endif
|
||||
seq_printf(p, " %8s", irq_desc[i].chip->name);
|
||||
seq_printf(p, "-%-8s", irq_desc[i].name);
|
||||
|
||||
if (action) {
|
||||
seq_printf(p, " %s", action->name);
|
||||
while ((action = action->next) != NULL)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
}
|
||||
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
} else if (i == NR_IRQS) {
|
||||
seq_printf(p, "NMI: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", nmi_count(j));
|
||||
seq_printf(p, " Non-maskable interrupts\n");
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
seq_printf(p, "LOC: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat,j).apic_timer_irqs);
|
||||
seq_printf(p, " Local timer interrupts\n");
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(p, "RES: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat,j).irq_resched_count);
|
||||
seq_printf(p, " Rescheduling interrupts\n");
|
||||
seq_printf(p, "CAL: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat,j).irq_call_count);
|
||||
seq_printf(p, " Function call interrupts\n");
|
||||
seq_printf(p, "TLB: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat,j).irq_tlb_count);
|
||||
seq_printf(p, " TLB shootdowns\n");
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE
|
||||
seq_printf(p, "TRM: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat,j).irq_thermal_count);
|
||||
seq_printf(p, " Thermal event interrupts\n");
|
||||
#endif
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
seq_printf(p, "SPU: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat,j).irq_spurious_count);
|
||||
seq_printf(p, " Spurious interrupts\n");
|
||||
#endif
|
||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||
#if defined(CONFIG_X86_IO_APIC)
|
||||
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
|
||||
#endif
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* /proc/stat helpers
|
||||
*/
|
||||
u64 arch_irq_stat_cpu(unsigned int cpu)
|
||||
{
|
||||
u64 sum = nmi_count(cpu);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
sum += per_cpu(irq_stat, cpu).irq_resched_count;
|
||||
sum += per_cpu(irq_stat, cpu).irq_call_count;
|
||||
sum += per_cpu(irq_stat, cpu).irq_tlb_count;
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE
|
||||
sum += per_cpu(irq_stat, cpu).irq_thermal_count;
|
||||
#endif
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
sum += per_cpu(irq_stat, cpu).irq_spurious_count;
|
||||
#endif
|
||||
return sum;
|
||||
}
|
||||
|
||||
u64 arch_irq_stat(void)
|
||||
{
|
||||
u64 sum = atomic_read(&irq_err_count);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
sum += atomic_read(&irq_mis_count);
|
||||
#endif
|
||||
return sum;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#include <mach_apic.h>
|
||||
|
||||
|
@ -395,20 +237,22 @@ void fixup_irqs(cpumask_t map)
|
|||
{
|
||||
unsigned int irq;
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for (irq = 0; irq < NR_IRQS; irq++) {
|
||||
for_each_irq_desc(irq, desc) {
|
||||
cpumask_t mask;
|
||||
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
cpus_and(mask, irq_desc[irq].affinity, map);
|
||||
cpus_and(mask, desc->affinity, map);
|
||||
if (any_online_cpu(mask) == NR_CPUS) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
mask = map;
|
||||
}
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq, mask);
|
||||
else if (irq_desc[irq].action && !(warned++))
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
else if (desc->action && !(warned++))
|
||||
printk("Cannot set affinity for irq %i\n", irq);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,28 +18,6 @@
|
|||
#include <asm/idle.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
*/
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
|
||||
/*
|
||||
* Currently unexpected vectors happen only on SMP and APIC.
|
||||
* We _must_ ack these because every local APIC has only N
|
||||
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
||||
* holds up an irq slot - in excessive cases (when multiple
|
||||
* unexpected vectors occur) that might lock up the APIC
|
||||
* completely.
|
||||
* But don't ack when the APIC is disabled. -AK
|
||||
*/
|
||||
if (!disable_apic)
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
/*
|
||||
* Probabilistic stack overflow check:
|
||||
|
@ -64,122 +42,6 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic, controller-independent functions:
|
||||
*/
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
struct irqaction * action;
|
||||
unsigned long flags;
|
||||
|
||||
if (i == 0) {
|
||||
seq_printf(p, " ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "CPU%-8d",j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
if (i < NR_IRQS) {
|
||||
unsigned any_count = 0;
|
||||
|
||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||
#ifndef CONFIG_SMP
|
||||
any_count = kstat_irqs(i);
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_cpu(j).irqs[i];
|
||||
#endif
|
||||
action = irq_desc[i].action;
|
||||
if (!action && !any_count)
|
||||
goto skip;
|
||||
seq_printf(p, "%3d: ",i);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
#endif
|
||||
seq_printf(p, " %8s", irq_desc[i].chip->name);
|
||||
seq_printf(p, "-%-8s", irq_desc[i].name);
|
||||
|
||||
if (action) {
|
||||
seq_printf(p, " %s", action->name);
|
||||
while ((action = action->next) != NULL)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
}
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
} else if (i == NR_IRQS) {
|
||||
seq_printf(p, "NMI: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
|
||||
seq_printf(p, " Non-maskable interrupts\n");
|
||||
seq_printf(p, "LOC: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
|
||||
seq_printf(p, " Local timer interrupts\n");
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(p, "RES: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
|
||||
seq_printf(p, " Rescheduling interrupts\n");
|
||||
seq_printf(p, "CAL: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
|
||||
seq_printf(p, " Function call interrupts\n");
|
||||
seq_printf(p, "TLB: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
|
||||
seq_printf(p, " TLB shootdowns\n");
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE
|
||||
seq_printf(p, "TRM: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
|
||||
seq_printf(p, " Thermal event interrupts\n");
|
||||
seq_printf(p, "THR: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
|
||||
seq_printf(p, " Threshold APIC interrupts\n");
|
||||
#endif
|
||||
seq_printf(p, "SPU: ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
|
||||
seq_printf(p, " Spurious interrupts\n");
|
||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* /proc/stat helpers
|
||||
*/
|
||||
u64 arch_irq_stat_cpu(unsigned int cpu)
|
||||
{
|
||||
u64 sum = cpu_pda(cpu)->__nmi_count;
|
||||
|
||||
sum += cpu_pda(cpu)->apic_timer_irqs;
|
||||
#ifdef CONFIG_SMP
|
||||
sum += cpu_pda(cpu)->irq_resched_count;
|
||||
sum += cpu_pda(cpu)->irq_call_count;
|
||||
sum += cpu_pda(cpu)->irq_tlb_count;
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE
|
||||
sum += cpu_pda(cpu)->irq_thermal_count;
|
||||
sum += cpu_pda(cpu)->irq_threshold_count;
|
||||
#endif
|
||||
sum += cpu_pda(cpu)->irq_spurious_count;
|
||||
return sum;
|
||||
}
|
||||
|
||||
u64 arch_irq_stat(void)
|
||||
{
|
||||
return atomic_read(&irq_err_count);
|
||||
}
|
||||
|
||||
/*
|
||||
* do_IRQ handles all normal device IRQ's (the special
|
||||
* SMP cross-CPU interrupts have their own specific
|
||||
|
@ -188,6 +50,7 @@ u64 arch_irq_stat(void)
|
|||
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct irq_desc *desc;
|
||||
|
||||
/* high bit used in ret_from_ code */
|
||||
unsigned vector = ~regs->orig_ax;
|
||||
|
@ -201,8 +64,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
|
|||
stack_overflow_check(regs);
|
||||
#endif
|
||||
|
||||
if (likely(irq < NR_IRQS))
|
||||
generic_handle_irq(irq);
|
||||
desc = irq_to_desc(irq);
|
||||
if (likely(desc))
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
else {
|
||||
if (!disable_apic)
|
||||
ack_APIC_irq();
|
||||
|
@ -223,8 +87,9 @@ void fixup_irqs(cpumask_t map)
|
|||
{
|
||||
unsigned int irq;
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for (irq = 0; irq < NR_IRQS; irq++) {
|
||||
for_each_irq_desc(irq, desc) {
|
||||
cpumask_t mask;
|
||||
int break_affinity = 0;
|
||||
int set_affinity = 1;
|
||||
|
@ -233,32 +98,32 @@ void fixup_irqs(cpumask_t map)
|
|||
continue;
|
||||
|
||||
/* interrupt's are disabled at this point */
|
||||
spin_lock(&irq_desc[irq].lock);
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_has_action(irq) ||
|
||||
cpus_equal(irq_desc[irq].affinity, map)) {
|
||||
spin_unlock(&irq_desc[irq].lock);
|
||||
cpus_equal(desc->affinity, map)) {
|
||||
spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
cpus_and(mask, irq_desc[irq].affinity, map);
|
||||
cpus_and(mask, desc->affinity, map);
|
||||
if (cpus_empty(mask)) {
|
||||
break_affinity = 1;
|
||||
mask = map;
|
||||
}
|
||||
|
||||
if (irq_desc[irq].chip->mask)
|
||||
irq_desc[irq].chip->mask(irq);
|
||||
if (desc->chip->mask)
|
||||
desc->chip->mask(irq);
|
||||
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq, mask);
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
if (irq_desc[irq].chip->unmask)
|
||||
irq_desc[irq].chip->unmask(irq);
|
||||
if (desc->chip->unmask)
|
||||
desc->chip->unmask(irq);
|
||||
|
||||
spin_unlock(&irq_desc[irq].lock);
|
||||
spin_unlock(&desc->lock);
|
||||
|
||||
if (break_affinity && set_affinity)
|
||||
printk("Broke affinity for irq %i\n", irq);
|
||||
|
|
|
@ -69,6 +69,13 @@ void __init init_ISA_irqs (void)
|
|||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
for (i = 0; i < 16; i++) {
|
||||
/* first time call this irq_desc */
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
|
@ -83,6 +90,27 @@ static struct irqaction irq2 = {
|
|||
.name = "cascade",
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
||||
[0 ... IRQ0_VECTOR - 1] = -1,
|
||||
[IRQ0_VECTOR] = 0,
|
||||
[IRQ1_VECTOR] = 1,
|
||||
[IRQ2_VECTOR] = 2,
|
||||
[IRQ3_VECTOR] = 3,
|
||||
[IRQ4_VECTOR] = 4,
|
||||
[IRQ5_VECTOR] = 5,
|
||||
[IRQ6_VECTOR] = 6,
|
||||
[IRQ7_VECTOR] = 7,
|
||||
[IRQ8_VECTOR] = 8,
|
||||
[IRQ9_VECTOR] = 9,
|
||||
[IRQ10_VECTOR] = 10,
|
||||
[IRQ11_VECTOR] = 11,
|
||||
[IRQ12_VECTOR] = 12,
|
||||
[IRQ13_VECTOR] = 13,
|
||||
[IRQ14_VECTOR] = 14,
|
||||
[IRQ15_VECTOR] = 15,
|
||||
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
|
||||
};
|
||||
|
||||
/* Overridden in paravirt.c */
|
||||
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
|
||||
|
||||
|
@ -98,22 +126,14 @@ void __init native_init_IRQ(void)
|
|||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
*/
|
||||
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
|
||||
int vector = FIRST_EXTERNAL_VECTOR + i;
|
||||
if (i >= NR_IRQS)
|
||||
break;
|
||||
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
|
||||
/* SYSCALL_VECTOR was reserved in trap_init. */
|
||||
if (!test_bit(vector, used_vectors))
|
||||
set_intr_gate(vector, interrupt[i]);
|
||||
if (i != SYSCALL_VECTOR)
|
||||
set_intr_gate(i, interrupt[i]);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
|
||||
/*
|
||||
* IRQ0 must be given a fixed assignment and initialized,
|
||||
* because it's used before the IO-APIC is set up.
|
||||
*/
|
||||
set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
|
||||
/*
|
||||
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
|
||||
* IPI, driven by wakeup.
|
||||
|
@ -128,6 +148,9 @@ void __init native_init_IRQ(void)
|
|||
|
||||
/* IPI for single call function */
|
||||
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
|
||||
|
||||
/* Low priority IPI to cleanup after moving an irq */
|
||||
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
|
|
@ -142,23 +142,19 @@ void __init init_ISA_irqs(void)
|
|||
init_bsp_APIC();
|
||||
init_8259A(0);
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++) {
|
||||
irq_desc[i].status = IRQ_DISABLED;
|
||||
irq_desc[i].action = NULL;
|
||||
irq_desc[i].depth = 1;
|
||||
for (i = 0; i < 16; i++) {
|
||||
/* first time call this irq_desc */
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
if (i < 16) {
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
} else {
|
||||
/*
|
||||
* 'high' PCI IRQs filled in on demand
|
||||
*/
|
||||
irq_desc[i].chip = &no_irq_chip;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,9 +35,6 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
|
|||
if (!(word & (1 << 13))) {
|
||||
dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
|
||||
"disabling irq balancing and affinity\n");
|
||||
#ifdef CONFIG_IRQBALANCE
|
||||
irqbalance_disable("");
|
||||
#endif
|
||||
noirqdebug_setup("");
|
||||
#ifdef CONFIG_PROC_FS
|
||||
no_irq_affinity = 1;
|
||||
|
|
|
@ -1073,6 +1073,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
|
||||
prefill_possible_map();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
init_cpu_to_node();
|
||||
#endif
|
||||
|
@ -1080,6 +1081,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
init_apic_mappings();
|
||||
ioapic_init_mappings();
|
||||
|
||||
/* need to wait for io_apic is mapped */
|
||||
nr_irqs = probe_nr_irqs();
|
||||
|
||||
kvm_guest_init();
|
||||
|
||||
e820_reserve_resources();
|
||||
|
|
|
@ -140,25 +140,30 @@ static void __init setup_cpu_pda_map(void)
|
|||
*/
|
||||
void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
ssize_t size = PERCPU_ENOUGH_ROOM;
|
||||
ssize_t size, old_size;
|
||||
char *ptr;
|
||||
int cpu;
|
||||
unsigned long align = 1;
|
||||
|
||||
/* Setup cpu_pda map */
|
||||
setup_cpu_pda_map();
|
||||
|
||||
/* Copy section for each CPU (we discard the original) */
|
||||
size = PERCPU_ENOUGH_ROOM;
|
||||
old_size = PERCPU_ENOUGH_ROOM;
|
||||
align = max_t(unsigned long, PAGE_SIZE, align);
|
||||
size = roundup(old_size, align);
|
||||
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
|
||||
size);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
ptr = __alloc_bootmem(size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
#else
|
||||
int node = early_cpu_to_node(cpu);
|
||||
if (!node_online(node) || !NODE_DATA(node)) {
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
ptr = __alloc_bootmem(size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
printk(KERN_INFO
|
||||
"cpu %d has no node %d or node-local memory\n",
|
||||
cpu, node);
|
||||
|
@ -167,7 +172,8 @@ void __init setup_per_cpu_areas(void)
|
|||
cpu, __pa(ptr));
|
||||
}
|
||||
else {
|
||||
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
|
||||
ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
if (ptr)
|
||||
printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
|
@ -175,7 +181,6 @@ void __init setup_per_cpu_areas(void)
|
|||
#endif
|
||||
per_cpu_offset(cpu) = ptr - __per_cpu_start;
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
|
||||
|
|
|
@ -543,10 +543,10 @@ static inline void __inquire_remote_apic(int apicid)
|
|||
int timeout;
|
||||
u32 status;
|
||||
|
||||
printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
|
||||
printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(regs); i++) {
|
||||
printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
|
||||
printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
|
||||
|
||||
/*
|
||||
* Wait for idle.
|
||||
|
@ -874,7 +874,7 @@ do_rest:
|
|||
start_ip = setup_trampoline();
|
||||
|
||||
/* So we see what's up */
|
||||
printk(KERN_INFO "Booting processor %d/%d ip %lx\n",
|
||||
printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
|
||||
cpu, apicid, start_ip);
|
||||
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* SGI UV IRQ functions
|
||||
*
|
||||
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/uv/uv_irq.h>
|
||||
|
||||
static void uv_noop(unsigned int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned int uv_noop_ret(unsigned int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uv_ack_apic(unsigned int irq)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
struct irq_chip uv_irq_chip = {
|
||||
.name = "UV-CORE",
|
||||
.startup = uv_noop_ret,
|
||||
.shutdown = uv_noop,
|
||||
.enable = uv_noop,
|
||||
.disable = uv_noop,
|
||||
.ack = uv_noop,
|
||||
.mask = uv_noop,
|
||||
.unmask = uv_noop,
|
||||
.eoi = uv_ack_apic,
|
||||
.end = uv_noop,
|
||||
};
|
||||
|
||||
/*
|
||||
* Set up a mapping of an available irq and vector, and enable the specified
|
||||
* MMR that defines the MSI that is to be sent to the specified CPU when an
|
||||
* interrupt is raised.
|
||||
*/
|
||||
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
|
||||
unsigned long mmr_offset)
|
||||
{
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
irq = create_irq();
|
||||
if (irq <= 0)
|
||||
return -EBUSY;
|
||||
|
||||
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset);
|
||||
if (ret != irq)
|
||||
destroy_irq(irq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(uv_setup_irq);
|
||||
|
||||
/*
|
||||
* Tear down a mapping of an irq and vector, and disable the specified MMR that
|
||||
* defined the MSI that was to be sent to the specified CPU when an interrupt
|
||||
* was raised.
|
||||
*
|
||||
* Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
|
||||
*/
|
||||
void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset)
|
||||
{
|
||||
arch_disable_uv_irq(mmr_blade, mmr_offset);
|
||||
destroy_irq(irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(uv_teardown_irq);
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* This file supports the /sys/firmware/sgi_uv interfaces for SGI UV.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright (c) Russ Anderson
|
||||
*/
|
||||
|
||||
#include <linux/sysdev.h>
|
||||
#include <asm/uv/bios.h>
|
||||
|
||||
struct kobject *sgi_uv_kobj;
|
||||
|
||||
static ssize_t partition_id_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id);
|
||||
}
|
||||
|
||||
static ssize_t coherence_id_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
|
||||
}
|
||||
|
||||
static struct kobj_attribute partition_id_attr =
|
||||
__ATTR(partition_id, S_IRUGO, partition_id_show, NULL);
|
||||
|
||||
static struct kobj_attribute coherence_id_attr =
|
||||
__ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL);
|
||||
|
||||
|
||||
static int __init sgi_uv_sysfs_init(void)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (!sgi_uv_kobj)
|
||||
sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
|
||||
if (!sgi_uv_kobj) {
|
||||
printk(KERN_WARNING "kobject_create_and_add sgi_uv failed \n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "sysfs_create_file partition_id failed \n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "sysfs_create_file coherence_id failed \n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(sgi_uv_sysfs_init);
|
|
@ -484,10 +484,11 @@ static void disable_cobalt_irq(unsigned int irq)
|
|||
static unsigned int startup_cobalt_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
|
||||
irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
|
||||
if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
|
||||
desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
|
||||
enable_cobalt_irq(irq);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
return 0;
|
||||
|
@ -506,9 +507,10 @@ static void ack_cobalt_irq(unsigned int irq)
|
|||
static void end_cobalt_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
|
||||
if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
|
||||
enable_cobalt_irq(irq);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
}
|
||||
|
@ -626,12 +628,12 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
|
|||
|
||||
spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
|
||||
desc = irq_desc + realirq;
|
||||
desc = irq_to_desc(realirq);
|
||||
|
||||
/*
|
||||
* handle this 'virtual interrupt' as a Cobalt one now.
|
||||
*/
|
||||
kstat_cpu(smp_processor_id()).irqs[realirq]++;
|
||||
kstat_incr_irqs_this_cpu(realirq, desc);
|
||||
|
||||
if (likely(desc->action != NULL))
|
||||
handle_IRQ_event(realirq, desc->action);
|
||||
|
@ -662,27 +664,29 @@ void init_VISWS_APIC_irqs(void)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
|
||||
irq_desc[i].status = IRQ_DISABLED;
|
||||
irq_desc[i].action = 0;
|
||||
irq_desc[i].depth = 1;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = 0;
|
||||
desc->depth = 1;
|
||||
|
||||
if (i == 0) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_IDE0) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_IDE1) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_8259) {
|
||||
irq_desc[i].chip = &piix4_master_irq_type;
|
||||
desc->chip = &piix4_master_irq_type;
|
||||
}
|
||||
else if (i < CO_IRQ_APIC0) {
|
||||
irq_desc[i].chip = &piix4_virtual_irq_type;
|
||||
desc->chip = &piix4_virtual_irq_type;
|
||||
}
|
||||
else if (IS_CO_APIC(i)) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -235,11 +235,14 @@ static void __devinit vmi_time_init_clockevent(void)
|
|||
|
||||
void __init vmi_time_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
/* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
|
||||
outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
|
||||
|
||||
vmi_time_init_clockevent();
|
||||
setup_irq(0, &vmi_clock_action);
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
|
|
@ -582,7 +582,7 @@ static void __init lguest_init_IRQ(void)
|
|||
for (i = 0; i < LGUEST_IRQS; i++) {
|
||||
int vector = FIRST_EXTERNAL_VECTOR + i;
|
||||
if (vector != SYSCALL_VECTOR) {
|
||||
set_intr_gate(vector, interrupt[i]);
|
||||
set_intr_gate(vector, interrupt[vector]);
|
||||
set_irq_chip_and_handler_name(i, &lguest_irq_controller,
|
||||
handle_level_irq,
|
||||
"level");
|
||||
|
|
|
@ -41,6 +41,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
return cpumask_of_cpu(cpu);
|
||||
}
|
||||
|
||||
static int probe_bigsmp(void)
|
||||
{
|
||||
|
|
|
@ -75,4 +75,18 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
}
|
||||
#endif
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
}
|
||||
|
||||
struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
|
||||
|
|
|
@ -38,4 +38,18 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
}
|
||||
|
||||
struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
|
||||
|
|
|
@ -23,4 +23,18 @@ static int probe_summit(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
}
|
||||
|
||||
struct genapic apic_summit = APIC_INIT("summit", probe_summit);
|
||||
|
|
|
@ -1483,7 +1483,7 @@ static void disable_local_vic_irq(unsigned int irq)
|
|||
* the interrupt off to another CPU */
|
||||
static void before_handle_vic_irq(unsigned int irq)
|
||||
{
|
||||
irq_desc_t *desc = irq_desc + irq;
|
||||
irq_desc_t *desc = irq_to_desc(irq);
|
||||
__u8 cpu = smp_processor_id();
|
||||
|
||||
_raw_spin_lock(&vic_irq_lock);
|
||||
|
@ -1518,7 +1518,7 @@ static void before_handle_vic_irq(unsigned int irq)
|
|||
/* Finish the VIC interrupt: basically mask */
|
||||
static void after_handle_vic_irq(unsigned int irq)
|
||||
{
|
||||
irq_desc_t *desc = irq_desc + irq;
|
||||
irq_desc_t *desc = irq_to_desc(irq);
|
||||
|
||||
_raw_spin_lock(&vic_irq_lock);
|
||||
{
|
||||
|
|
|
@ -21,7 +21,6 @@ void xen_force_evtchn_callback(void)
|
|||
|
||||
static void __init __xen_init_IRQ(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
int i;
|
||||
|
||||
/* Create identity vector->irq map */
|
||||
|
@ -31,7 +30,6 @@ static void __init __xen_init_IRQ(void)
|
|||
for_each_possible_cpu(cpu)
|
||||
per_cpu(vector_irq, cpu)[i] = i;
|
||||
}
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
xen_init_IRQ();
|
||||
}
|
||||
|
|
|
@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
|
|||
ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
|
||||
} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
|
||||
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
|
||||
out:
|
||||
raw_local_irq_restore(flags);
|
||||
|
|
|
@ -219,7 +219,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp)
|
|||
for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ;
|
||||
irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) {
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
if (irq >= nr_irqs) {
|
||||
irq = HPET_MAX_IRQ;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -558,9 +558,26 @@ struct timer_rand_state {
|
|||
unsigned dont_count_entropy:1;
|
||||
};
|
||||
|
||||
static struct timer_rand_state input_timer_state;
|
||||
static struct timer_rand_state *irq_timer_state[NR_IRQS];
|
||||
|
||||
static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
|
||||
{
|
||||
if (irq >= nr_irqs)
|
||||
return NULL;
|
||||
|
||||
return irq_timer_state[irq];
|
||||
}
|
||||
|
||||
static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
|
||||
{
|
||||
if (irq >= nr_irqs)
|
||||
return;
|
||||
|
||||
irq_timer_state[irq] = state;
|
||||
}
|
||||
|
||||
static struct timer_rand_state input_timer_state;
|
||||
|
||||
/*
|
||||
* This function adds entropy to the entropy "pool" by using timing
|
||||
* delays. It uses the timer_rand_state structure to make an estimate
|
||||
|
@ -648,11 +665,15 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
|
|||
|
||||
void add_interrupt_randomness(int irq)
|
||||
{
|
||||
if (irq >= NR_IRQS || irq_timer_state[irq] == NULL)
|
||||
struct timer_rand_state *state;
|
||||
|
||||
state = get_timer_rand_state(irq);
|
||||
|
||||
if (state == NULL)
|
||||
return;
|
||||
|
||||
DEBUG_ENT("irq event %d\n", irq);
|
||||
add_timer_randomness(irq_timer_state[irq], 0x100 + irq);
|
||||
add_timer_randomness(state, 0x100 + irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
@ -912,7 +933,12 @@ void rand_initialize_irq(int irq)
|
|||
{
|
||||
struct timer_rand_state *state;
|
||||
|
||||
if (irq >= NR_IRQS || irq_timer_state[irq])
|
||||
if (irq >= nr_irqs)
|
||||
return;
|
||||
|
||||
state = get_timer_rand_state(irq);
|
||||
|
||||
if (state)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -921,7 +947,7 @@ void rand_initialize_irq(int irq)
|
|||
*/
|
||||
state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
|
||||
if (state)
|
||||
irq_timer_state[irq] = state;
|
||||
set_timer_rand_state(irq, state);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
|
|
@ -641,7 +641,7 @@ static int __devinit giu_probe(struct platform_device *dev)
|
|||
}
|
||||
|
||||
irq = platform_get_irq(dev, 0);
|
||||
if (irq < 0 || irq >= NR_IRQS)
|
||||
if (irq < 0 || irq >= nr_irqs)
|
||||
return -EBUSY;
|
||||
|
||||
return cascade_irq(irq, giu_get_irq);
|
||||
|
|
|
@ -1143,7 +1143,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
|
|||
|
||||
if (!is_out) {
|
||||
int irq = gpio_to_irq(gpio);
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
/* This races with request_irq(), set_irq_type(),
|
||||
* and set_irq_wake() ... but those are "rare".
|
||||
|
|
|
@ -123,7 +123,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
|
|||
irqnr = asic->irq_base +
|
||||
(ASIC3_GPIOS_PER_BANK * bank)
|
||||
+ i;
|
||||
desc = irq_desc + irqnr;
|
||||
desc = irq_to_desc(irqnr);
|
||||
desc->handle_irq(irqnr, desc);
|
||||
if (asic->irq_bothedge[bank] & bit)
|
||||
asic3_irq_flip_edge(asic, base,
|
||||
|
@ -136,7 +136,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
|
|||
for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
|
||||
/* They start at bit 4 and go up */
|
||||
if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) {
|
||||
desc = irq_desc + asic->irq_base + i;
|
||||
desc = irq_to_desc(asic->irq_base + i);
|
||||
desc->handle_irq(asic->irq_base + i,
|
||||
desc);
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc)
|
|||
/* Run irq handler */
|
||||
pr_debug("got IRQ %d\n", irqpin);
|
||||
irq = ei->irq_start + irqpin;
|
||||
desc = &irq_desc[irq];
|
||||
desc = irq_to_desc(irq);
|
||||
desc->handle_irq(irq, desc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ static int vortex_debug = 1;
|
|||
#include <linux/eisa.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <asm/irq.h> /* For NR_IRQS only. */
|
||||
#include <asm/irq.h> /* For nr_irqs only. */
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
@ -1221,7 +1221,7 @@ static int __devinit vortex_probe1(struct device *gendev,
|
|||
if (print_info)
|
||||
printk(", IRQ %d\n", dev->irq);
|
||||
/* Tell them about an invalid IRQ. */
|
||||
if (dev->irq <= 0 || dev->irq >= NR_IRQS)
|
||||
if (dev->irq <= 0 || dev->irq >= nr_irqs)
|
||||
printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
|
||||
dev->irq);
|
||||
|
||||
|
|
|
@ -416,10 +416,10 @@ static int ser12_open(struct net_device *dev)
|
|||
if (!dev || !bc)
|
||||
return -ENXIO;
|
||||
if (!dev->base_addr || dev->base_addr > 0xffff-SER12_EXTENT ||
|
||||
dev->irq < 2 || dev->irq > NR_IRQS) {
|
||||
dev->irq < 2 || dev->irq > nr_irqs) {
|
||||
printk(KERN_INFO "baycom_ser_fdx: invalid portnumber (max %u) "
|
||||
"or irq (2 <= irq <= %d)\n",
|
||||
0xffff-SER12_EXTENT, NR_IRQS);
|
||||
0xffff-SER12_EXTENT, nr_irqs);
|
||||
return -ENXIO;
|
||||
}
|
||||
if (bc->baud < 300 || bc->baud > 4800) {
|
||||
|
|
|
@ -1465,7 +1465,7 @@ static void z8530_init(void)
|
|||
printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
|
||||
|
||||
flag=" ";
|
||||
for (k = 0; k < NR_IRQS; k++)
|
||||
for (k = 0; k < nr_irqs; k++)
|
||||
if (Ivec[k].used)
|
||||
{
|
||||
printk("%s%d", flag, k);
|
||||
|
@ -1728,7 +1728,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
|
||||
if (hwcfg.irq == 2) hwcfg.irq = 9;
|
||||
|
||||
if (hwcfg.irq < 0 || hwcfg.irq >= NR_IRQS)
|
||||
if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
|
||||
return -EINVAL;
|
||||
|
||||
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
|
||||
|
@ -2148,7 +2148,7 @@ static void __exit scc_cleanup_driver(void)
|
|||
}
|
||||
|
||||
/* To unload the port must be closed so no real IRQ pending */
|
||||
for (k=0; k < NR_IRQS ; k++)
|
||||
for (k = 0; k < nr_irqs ; k++)
|
||||
if (Ivec[k].used) free_irq(k, NULL);
|
||||
|
||||
local_irq_enable();
|
||||
|
|
|
@ -318,7 +318,7 @@ sbni_pci_probe( struct net_device *dev )
|
|||
continue;
|
||||
}
|
||||
|
||||
if( pci_irq_line <= 0 || pci_irq_line >= NR_IRQS )
|
||||
if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
|
||||
printk( KERN_WARNING " WARNING: The PCI BIOS assigned "
|
||||
"this PCI card to IRQ %d, which is unlikely "
|
||||
"to work!.\n"
|
||||
|
|
|
@ -298,7 +298,8 @@ struct pci_port_ops dino_port_ops = {
|
|||
|
||||
static void dino_disable_irq(unsigned int irq)
|
||||
{
|
||||
struct dino_device *dino_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct dino_device *dino_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
|
||||
|
||||
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
|
||||
|
@ -310,7 +311,8 @@ static void dino_disable_irq(unsigned int irq)
|
|||
|
||||
static void dino_enable_irq(unsigned int irq)
|
||||
{
|
||||
struct dino_device *dino_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct dino_device *dino_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
|
||||
u32 tmp;
|
||||
|
||||
|
|
|
@ -346,10 +346,10 @@ static int __init eisa_probe(struct parisc_device *dev)
|
|||
}
|
||||
|
||||
/* Reserve IRQ2 */
|
||||
irq_desc[2].action = &irq2_action;
|
||||
irq_to_desc(2)->action = &irq2_action;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
irq_desc[i].chip = &eisa_interrupt_type;
|
||||
irq_to_desc(i)->chip = &eisa_interrupt_type;
|
||||
}
|
||||
|
||||
EISA_bus = 1;
|
||||
|
|
|
@ -108,7 +108,8 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
|
|||
|
||||
static void gsc_asic_disable_irq(unsigned int irq)
|
||||
{
|
||||
struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct gsc_asic *irq_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
|
||||
u32 imr;
|
||||
|
||||
|
@ -123,7 +124,8 @@ static void gsc_asic_disable_irq(unsigned int irq)
|
|||
|
||||
static void gsc_asic_enable_irq(unsigned int irq)
|
||||
{
|
||||
struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct gsc_asic *irq_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
|
||||
u32 imr;
|
||||
|
||||
|
@ -159,12 +161,14 @@ static struct hw_interrupt_type gsc_asic_interrupt_type = {
|
|||
int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
|
||||
{
|
||||
static int irq = GSC_IRQ_BASE;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (irq > GSC_IRQ_MAX)
|
||||
return NO_IRQ;
|
||||
|
||||
irq_desc[irq].chip = type;
|
||||
irq_desc[irq].chip_data = data;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->chip = type;
|
||||
desc->chip_data = data;
|
||||
return irq++;
|
||||
}
|
||||
|
||||
|
|
|
@ -619,7 +619,9 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
|
|||
|
||||
static struct vector_info *iosapic_get_vector(unsigned int irq)
|
||||
{
|
||||
return irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
return desc->chip_data;
|
||||
}
|
||||
|
||||
static void iosapic_disable_irq(unsigned int irq)
|
||||
|
|
|
@ -363,7 +363,9 @@ int superio_fixup_irq(struct pci_dev *pcidev)
|
|||
#endif
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
irq_desc[i].chip = &superio_interrupt_type;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->chip = &superio_interrupt_type;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -193,7 +193,7 @@ dmar_parse_dev(struct dmar_drhd_unit *dmaru)
|
|||
{
|
||||
struct acpi_dmar_hardware_unit *drhd;
|
||||
static int include_all;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
|
||||
|
||||
|
@ -212,7 +212,7 @@ dmar_parse_dev(struct dmar_drhd_unit *dmaru)
|
|||
include_all = 1;
|
||||
}
|
||||
|
||||
if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
|
||||
if (ret) {
|
||||
list_del(&dmaru->list);
|
||||
kfree(dmaru);
|
||||
}
|
||||
|
@ -289,6 +289,24 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dmar_table_detect - checks to see if the platform supports DMAR devices
|
||||
*/
|
||||
static int __init dmar_table_detect(void)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
/* if we could find DMAR table, then there are DMAR devices */
|
||||
status = acpi_get_table(ACPI_SIG_DMAR, 0,
|
||||
(struct acpi_table_header **)&dmar_tbl);
|
||||
|
||||
if (ACPI_SUCCESS(status) && !dmar_tbl) {
|
||||
printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
|
||||
status = AE_NOT_FOUND;
|
||||
}
|
||||
|
||||
return (ACPI_SUCCESS(status) ? 1 : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* parse_dmar_table - parses the DMA reporting table
|
||||
|
@ -300,6 +318,12 @@ parse_dmar_table(void)
|
|||
struct acpi_dmar_header *entry_header;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Do it again, earlier dmar_tbl mapping could be mapped with
|
||||
* fixed map.
|
||||
*/
|
||||
dmar_table_detect();
|
||||
|
||||
dmar = (struct acpi_table_dmar *)dmar_tbl;
|
||||
if (!dmar)
|
||||
return -ENODEV;
|
||||
|
@ -373,10 +397,10 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
|
|||
|
||||
int __init dmar_dev_scope_init(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct dmar_drhd_unit *drhd, *drhd_n;
|
||||
int ret = -ENODEV;
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
|
||||
ret = dmar_parse_dev(drhd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -384,8 +408,8 @@ int __init dmar_dev_scope_init(void)
|
|||
|
||||
#ifdef CONFIG_DMAR
|
||||
{
|
||||
struct dmar_rmrr_unit *rmrr;
|
||||
for_each_rmrr_units(rmrr) {
|
||||
struct dmar_rmrr_unit *rmrr, *rmrr_n;
|
||||
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
|
||||
ret = rmrr_parse_dev(rmrr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -430,30 +454,11 @@ int __init dmar_table_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* early_dmar_detect - checks to see if the platform supports DMAR devices
|
||||
*/
|
||||
int __init early_dmar_detect(void)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
/* if we could find DMAR table, then there are DMAR devices */
|
||||
status = acpi_get_table(ACPI_SIG_DMAR, 0,
|
||||
(struct acpi_table_header **)&dmar_tbl);
|
||||
|
||||
if (ACPI_SUCCESS(status) && !dmar_tbl) {
|
||||
printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
|
||||
status = AE_NOT_FOUND;
|
||||
}
|
||||
|
||||
return (ACPI_SUCCESS(status) ? 1 : 0);
|
||||
}
|
||||
|
||||
void __init detect_intel_iommu(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = early_dmar_detect();
|
||||
ret = dmar_table_detect();
|
||||
|
||||
#ifdef CONFIG_DMAR
|
||||
{
|
||||
|
@ -479,14 +484,16 @@ void __init detect_intel_iommu(void)
|
|||
" x2apic support\n");
|
||||
|
||||
dmar_disabled = 1;
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (ret && !no_iommu && !iommu_detected && !swiotlb &&
|
||||
!dmar_disabled)
|
||||
iommu_detected = 1;
|
||||
}
|
||||
end:
|
||||
#endif
|
||||
dmar_tbl = NULL;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -126,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
|
|||
cfg->msg.address_hi = 0xffffffff;
|
||||
|
||||
irq = create_irq();
|
||||
if (irq < 0) {
|
||||
|
||||
if (irq <= 0) {
|
||||
kfree(cfg);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
@ -11,41 +12,64 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
|
|||
static int ir_ioapic_num;
|
||||
int intr_remapping_enabled;
|
||||
|
||||
static struct {
|
||||
struct irq_2_iommu {
|
||||
struct intel_iommu *iommu;
|
||||
u16 irte_index;
|
||||
u16 sub_handle;
|
||||
u8 irte_mask;
|
||||
} irq_2_iommu[NR_IRQS];
|
||||
};
|
||||
|
||||
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
|
||||
{
|
||||
return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL;
|
||||
}
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
|
||||
{
|
||||
return irq_2_iommu(irq);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(irq_2_ir_lock);
|
||||
|
||||
static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
|
||||
{
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
irq_iommu = irq_2_iommu(irq);
|
||||
|
||||
if (!irq_iommu)
|
||||
return NULL;
|
||||
|
||||
if (!irq_iommu->iommu)
|
||||
return NULL;
|
||||
|
||||
return irq_iommu;
|
||||
}
|
||||
|
||||
int irq_remapped(int irq)
|
||||
{
|
||||
if (irq > NR_IRQS)
|
||||
return 0;
|
||||
|
||||
if (!irq_2_iommu[irq].iommu)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
return valid_irq_2_iommu(irq) != NULL;
|
||||
}
|
||||
|
||||
int get_irte(int irq, struct irte *entry)
|
||||
{
|
||||
int index;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
if (!entry || irq > NR_IRQS)
|
||||
if (!entry)
|
||||
return -1;
|
||||
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
if (!irq_2_iommu[irq].iommu) {
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
||||
*entry = *(irq_2_iommu[irq].iommu->ir_table->base + index);
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
*entry = *(irq_iommu->iommu->ir_table->base + index);
|
||||
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return 0;
|
||||
|
@ -54,6 +78,7 @@ int get_irte(int irq, struct irte *entry)
|
|||
int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
||||
{
|
||||
struct ir_table *table = iommu->ir_table;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
u16 index, start_index;
|
||||
unsigned int mask = 0;
|
||||
int i;
|
||||
|
@ -61,6 +86,10 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|||
if (!count)
|
||||
return -1;
|
||||
|
||||
/* protect irq_2_iommu_alloc later */
|
||||
if (irq >= nr_irqs)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* start the IRTE search from index 0.
|
||||
*/
|
||||
|
@ -100,10 +129,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|||
for (i = index; i < index + count; i++)
|
||||
table->base[i].present = 1;
|
||||
|
||||
irq_2_iommu[irq].iommu = iommu;
|
||||
irq_2_iommu[irq].irte_index = index;
|
||||
irq_2_iommu[irq].sub_handle = 0;
|
||||
irq_2_iommu[irq].irte_mask = mask;
|
||||
irq_iommu = irq_2_iommu_alloc(irq);
|
||||
irq_iommu->iommu = iommu;
|
||||
irq_iommu->irte_index = index;
|
||||
irq_iommu->sub_handle = 0;
|
||||
irq_iommu->irte_mask = mask;
|
||||
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
|
||||
|
@ -124,31 +154,33 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
|
|||
int map_irq_to_irte_handle(int irq, u16 *sub_handle)
|
||||
{
|
||||
int index;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
*sub_handle = irq_2_iommu[irq].sub_handle;
|
||||
index = irq_2_iommu[irq].irte_index;
|
||||
*sub_handle = irq_iommu->sub_handle;
|
||||
index = irq_iommu->irte_index;
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return index;
|
||||
}
|
||||
|
||||
int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
|
||||
{
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) {
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return -1;
|
||||
}
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
irq_2_iommu[irq].iommu = iommu;
|
||||
irq_2_iommu[irq].irte_index = index;
|
||||
irq_2_iommu[irq].sub_handle = subhandle;
|
||||
irq_2_iommu[irq].irte_mask = 0;
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
|
||||
irq_iommu = irq_2_iommu_alloc(irq);
|
||||
|
||||
irq_iommu->iommu = iommu;
|
||||
irq_iommu->irte_index = index;
|
||||
irq_iommu->sub_handle = subhandle;
|
||||
irq_iommu->irte_mask = 0;
|
||||
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
|
||||
|
@ -157,16 +189,19 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
|
|||
|
||||
int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
|
||||
{
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
irq_2_iommu[irq].iommu = NULL;
|
||||
irq_2_iommu[irq].irte_index = 0;
|
||||
irq_2_iommu[irq].sub_handle = 0;
|
||||
irq_2_iommu[irq].irte_mask = 0;
|
||||
irq_iommu->iommu = NULL;
|
||||
irq_iommu->irte_index = 0;
|
||||
irq_iommu->sub_handle = 0;
|
||||
irq_2_iommu(irq)->irte_mask = 0;
|
||||
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
|
||||
|
@ -178,16 +213,18 @@ int modify_irte(int irq, struct irte *irte_modified)
|
|||
int index;
|
||||
struct irte *irte;
|
||||
struct intel_iommu *iommu;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iommu = irq_2_iommu[irq].iommu;
|
||||
iommu = irq_iommu->iommu;
|
||||
|
||||
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
irte = &iommu->ir_table->base[index];
|
||||
|
||||
set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
|
||||
|
@ -203,18 +240,20 @@ int flush_irte(int irq)
|
|||
{
|
||||
int index;
|
||||
struct intel_iommu *iommu;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iommu = irq_2_iommu[irq].iommu;
|
||||
iommu = irq_iommu->iommu;
|
||||
|
||||
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
|
||||
qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
|
||||
qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -246,28 +285,30 @@ int free_irte(int irq)
|
|||
int index, i;
|
||||
struct irte *irte;
|
||||
struct intel_iommu *iommu;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
spin_lock(&irq_2_ir_lock);
|
||||
if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iommu = irq_2_iommu[irq].iommu;
|
||||
iommu = irq_iommu->iommu;
|
||||
|
||||
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
irte = &iommu->ir_table->base[index];
|
||||
|
||||
if (!irq_2_iommu[irq].sub_handle) {
|
||||
for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++)
|
||||
if (!irq_iommu->sub_handle) {
|
||||
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
|
||||
set_64bit((unsigned long *)irte, 0);
|
||||
qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
|
||||
qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
}
|
||||
|
||||
irq_2_iommu[irq].iommu = NULL;
|
||||
irq_2_iommu[irq].irte_index = 0;
|
||||
irq_2_iommu[irq].sub_handle = 0;
|
||||
irq_2_iommu[irq].irte_mask = 0;
|
||||
irq_iommu->iommu = NULL;
|
||||
irq_iommu->irte_index = 0;
|
||||
irq_iommu->sub_handle = 0;
|
||||
irq_iommu->irte_mask = 0;
|
||||
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
|
|||
goto fail0d;
|
||||
cf->socket.pci_irq = board->irq_pin;
|
||||
} else
|
||||
cf->socket.pci_irq = NR_IRQS + 1;
|
||||
cf->socket.pci_irq = nr_irqs + 1;
|
||||
|
||||
/* pcmcia layer only remaps "real" memory not iospace */
|
||||
cf->socket.io_offset = (unsigned long)
|
||||
|
|
|
@ -233,15 +233,18 @@ static struct hw_interrupt_type hd64465_ss_irq_type = {
|
|||
*/
|
||||
static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
DPRINTK("hs_map_irq(sock=%d irq=%d)\n", sp->number, irq);
|
||||
|
||||
if (irq >= HS_NUM_MAPPED_IRQS)
|
||||
return;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
hs_mapped_irq[irq].sock = sp;
|
||||
/* insert ourselves as the irq controller */
|
||||
hs_mapped_irq[irq].old_handler = irq_desc[irq].chip;
|
||||
irq_desc[irq].chip = &hd64465_ss_irq_type;
|
||||
hs_mapped_irq[irq].old_handler = desc->chip;
|
||||
desc->chip = &hd64465_ss_irq_type;
|
||||
}
|
||||
|
||||
|
||||
|
@ -250,13 +253,16 @@ static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
|
|||
*/
|
||||
static void hs_unmap_irq(hs_socket_t *sp, unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
DPRINTK("hs_unmap_irq(sock=%d irq=%d)\n", sp->number, irq);
|
||||
|
||||
if (irq >= HS_NUM_MAPPED_IRQS)
|
||||
return;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
/* restore the original irq controller */
|
||||
irq_desc[irq].chip = hs_mapped_irq[irq].old_handler;
|
||||
desc->chip = hs_mapped_irq[irq].old_handler;
|
||||
}
|
||||
|
||||
/*============================================================*/
|
||||
|
|
|
@ -639,7 +639,7 @@ static int __devinit vrc4171_card_setup(char *options)
|
|||
int irq;
|
||||
options += 4;
|
||||
irq = simple_strtoul(options, &options, 0);
|
||||
if (irq >= 0 && irq < NR_IRQS)
|
||||
if (irq >= 0 && irq < nr_irqs)
|
||||
vrc4171_irq = irq;
|
||||
|
||||
if (*options != ',')
|
||||
|
|
|
@ -360,7 +360,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
|
|||
spin_unlock_irq(&rtc_lock);
|
||||
|
||||
aie_irq = platform_get_irq(pdev, 0);
|
||||
if (aie_irq < 0 || aie_irq >= NR_IRQS) {
|
||||
if (aie_irq < 0 || aie_irq >= nr_irqs) {
|
||||
retval = -EBUSY;
|
||||
goto err_device_unregister;
|
||||
}
|
||||
|
@ -371,7 +371,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
|
|||
goto err_device_unregister;
|
||||
|
||||
pie_irq = platform_get_irq(pdev, 1);
|
||||
if (pie_irq < 0 || pie_irq >= NR_IRQS)
|
||||
if (pie_irq < 0 || pie_irq >= nr_irqs)
|
||||
goto err_free_irq;
|
||||
|
||||
retval = request_irq(pie_irq, rtclong1_interrupt, IRQF_DISABLED,
|
||||
|
|
|
@ -337,7 +337,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
|
|||
#else
|
||||
#define IRQ_MIN 9
|
||||
#if defined(__PPC)
|
||||
#define IRQ_MAX (NR_IRQS-1)
|
||||
#define IRQ_MAX (nr_irqs-1)
|
||||
#else
|
||||
#define IRQ_MAX 12
|
||||
#endif
|
||||
|
|
|
@ -2108,7 +2108,7 @@ struct scsi_qla_host;
|
|||
|
||||
struct qla_msix_entry {
|
||||
int have_irq;
|
||||
uint16_t msix_vector;
|
||||
uint32_t msix_vector;
|
||||
uint16_t msix_entry;
|
||||
};
|
||||
|
||||
|
|
|
@ -66,7 +66,6 @@
|
|||
#endif
|
||||
|
||||
static struct m68k_serial m68k_soft[NR_PORTS];
|
||||
struct m68k_serial *IRQ_ports[NR_IRQS];
|
||||
|
||||
static unsigned int uart_irqs[NR_PORTS] = UART_IRQ_DEFNS;
|
||||
|
||||
|
@ -375,15 +374,11 @@ clear_and_return:
|
|||
*/
|
||||
irqreturn_t rs_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct m68k_serial * info;
|
||||
struct m68k_serial *info = dev_id;
|
||||
m68328_uart *uart;
|
||||
unsigned short rx;
|
||||
unsigned short tx;
|
||||
|
||||
info = IRQ_ports[irq];
|
||||
if(!info)
|
||||
return IRQ_NONE;
|
||||
|
||||
uart = &uart_addr[info->line];
|
||||
rx = uart->urx.w;
|
||||
|
||||
|
@ -1383,8 +1378,6 @@ rs68328_init(void)
|
|||
info->port, info->irq);
|
||||
printk(" is a builtin MC68328 UART\n");
|
||||
|
||||
IRQ_ports[info->irq] = info; /* waste of space */
|
||||
|
||||
#ifdef CONFIG_M68VZ328
|
||||
if (i > 0 )
|
||||
PJSEL &= 0xCF; /* PSW enable second port output */
|
||||
|
@ -1393,7 +1386,7 @@ rs68328_init(void)
|
|||
if (request_irq(uart_irqs[i],
|
||||
rs_interrupt,
|
||||
IRQF_DISABLED,
|
||||
"M68328_UART", NULL))
|
||||
"M68328_UART", info))
|
||||
panic("Unable to attach 68328 serial interrupt\n");
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -156,11 +156,15 @@ struct uart_8250_port {
|
|||
};
|
||||
|
||||
struct irq_info {
|
||||
spinlock_t lock;
|
||||
struct hlist_node node;
|
||||
int irq;
|
||||
spinlock_t lock; /* Protects list not the hash */
|
||||
struct list_head *head;
|
||||
};
|
||||
|
||||
static struct irq_info irq_lists[NR_IRQS];
|
||||
#define NR_IRQ_HASH 32 /* Can be adjusted later */
|
||||
static struct hlist_head irq_lists[NR_IRQ_HASH];
|
||||
static DEFINE_MUTEX(hash_mutex); /* Used to walk the hash */
|
||||
|
||||
/*
|
||||
* Here we define the default xmit fifo size used for each type of UART.
|
||||
|
@ -1545,15 +1549,43 @@ static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up)
|
|||
BUG_ON(i->head != &up->list);
|
||||
i->head = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&i->lock);
|
||||
/* List empty so throw away the hash node */
|
||||
if (i->head == NULL) {
|
||||
hlist_del(&i->node);
|
||||
kfree(i);
|
||||
}
|
||||
}
|
||||
|
||||
static int serial_link_irq_chain(struct uart_8250_port *up)
|
||||
{
|
||||
struct irq_info *i = irq_lists + up->port.irq;
|
||||
struct hlist_head *h;
|
||||
struct hlist_node *n;
|
||||
struct irq_info *i;
|
||||
int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
|
||||
|
||||
mutex_lock(&hash_mutex);
|
||||
|
||||
h = &irq_lists[up->port.irq % NR_IRQ_HASH];
|
||||
|
||||
hlist_for_each(n, h) {
|
||||
i = hlist_entry(n, struct irq_info, node);
|
||||
if (i->irq == up->port.irq)
|
||||
break;
|
||||
}
|
||||
|
||||
if (n == NULL) {
|
||||
i = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
|
||||
if (i == NULL) {
|
||||
mutex_unlock(&hash_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_init(&i->lock);
|
||||
i->irq = up->port.irq;
|
||||
hlist_add_head(&i->node, h);
|
||||
}
|
||||
mutex_unlock(&hash_mutex);
|
||||
|
||||
spin_lock_irq(&i->lock);
|
||||
|
||||
if (i->head) {
|
||||
|
@ -1577,14 +1609,28 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
|
|||
|
||||
static void serial_unlink_irq_chain(struct uart_8250_port *up)
|
||||
{
|
||||
struct irq_info *i = irq_lists + up->port.irq;
|
||||
struct irq_info *i;
|
||||
struct hlist_node *n;
|
||||
struct hlist_head *h;
|
||||
|
||||
mutex_lock(&hash_mutex);
|
||||
|
||||
h = &irq_lists[up->port.irq % NR_IRQ_HASH];
|
||||
|
||||
hlist_for_each(n, h) {
|
||||
i = hlist_entry(n, struct irq_info, node);
|
||||
if (i->irq == up->port.irq)
|
||||
break;
|
||||
}
|
||||
|
||||
BUG_ON(n == NULL);
|
||||
BUG_ON(i->head == NULL);
|
||||
|
||||
if (list_empty(i->head))
|
||||
free_irq(up->port.irq, i);
|
||||
|
||||
serial_do_unlink(i, up);
|
||||
mutex_unlock(&hash_mutex);
|
||||
}
|
||||
|
||||
/* Base timer interval for polling */
|
||||
|
@ -2447,7 +2493,7 @@ static void serial8250_config_port(struct uart_port *port, int flags)
|
|||
static int
|
||||
serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
|
||||
{
|
||||
if (ser->irq >= NR_IRQS || ser->irq < 0 ||
|
||||
if (ser->irq >= nr_irqs || ser->irq < 0 ||
|
||||
ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
|
||||
ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
|
||||
ser->type == PORT_STARTECH)
|
||||
|
@ -2967,7 +3013,7 @@ EXPORT_SYMBOL(serial8250_unregister_port);
|
|||
|
||||
static int __init serial8250_init(void)
|
||||
{
|
||||
int ret, i;
|
||||
int ret;
|
||||
|
||||
if (nr_uarts > UART_NR)
|
||||
nr_uarts = UART_NR;
|
||||
|
@ -2976,9 +3022,6 @@ static int __init serial8250_init(void)
|
|||
"%d ports, IRQ sharing %sabled\n", nr_uarts,
|
||||
share_irqs ? "en" : "dis");
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
spin_lock_init(&irq_lists[i].lock);
|
||||
|
||||
#ifdef CONFIG_SPARC
|
||||
ret = sunserial_register_minors(&serial8250_reg, UART_NR);
|
||||
#else
|
||||
|
@ -3006,15 +3049,15 @@ static int __init serial8250_init(void)
|
|||
goto out;
|
||||
|
||||
platform_device_del(serial8250_isa_devs);
|
||||
put_dev:
|
||||
put_dev:
|
||||
platform_device_put(serial8250_isa_devs);
|
||||
unreg_uart_drv:
|
||||
unreg_uart_drv:
|
||||
#ifdef CONFIG_SPARC
|
||||
sunserial_unregister_minors(&serial8250_reg, UART_NR);
|
||||
#else
|
||||
uart_unregister_driver(&serial8250_reg);
|
||||
#endif
|
||||
out:
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -512,7 +512,7 @@ static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
|
|||
int ret = 0;
|
||||
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
|
||||
ret = -EINVAL;
|
||||
if (ser->irq < 0 || ser->irq >= NR_IRQS)
|
||||
if (ser->irq < 0 || ser->irq >= nr_irqs)
|
||||
ret = -EINVAL;
|
||||
if (ser->baud_base < 9600)
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -572,7 +572,7 @@ static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
|
|||
int ret = 0;
|
||||
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
|
||||
ret = -EINVAL;
|
||||
if (ser->irq < 0 || ser->irq >= NR_IRQS)
|
||||
if (ser->irq < 0 || ser->irq >= nr_irqs)
|
||||
ret = -EINVAL;
|
||||
if (ser->baud_base < 9600)
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -623,7 +623,7 @@ static int cpm_uart_verify_port(struct uart_port *port,
|
|||
|
||||
if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
|
||||
ret = -EINVAL;
|
||||
if (ser->irq < 0 || ser->irq >= NR_IRQS)
|
||||
if (ser->irq < 0 || ser->irq >= nr_irqs)
|
||||
ret = -EINVAL;
|
||||
if (ser->baud_base < 9600)
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -922,7 +922,7 @@ static void m32r_sio_config_port(struct uart_port *port, int flags)
|
|||
static int
|
||||
m32r_sio_verify_port(struct uart_port *port, struct serial_struct *ser)
|
||||
{
|
||||
if (ser->irq >= NR_IRQS || ser->irq < 0 ||
|
||||
if (ser->irq >= nr_irqs || ser->irq < 0 ||
|
||||
ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
|
||||
ser->type >= ARRAY_SIZE(uart_config))
|
||||
return -EINVAL;
|
||||
|
@ -1162,7 +1162,7 @@ static int __init m32r_sio_init(void)
|
|||
|
||||
printk(KERN_INFO "Serial: M32R SIO driver\n");
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
for (i = 0; i < nr_irqs; i++)
|
||||
spin_lock_init(&irq_lists[i].lock);
|
||||
|
||||
ret = uart_register_driver(&m32r_sio_reg);
|
||||
|
|
|
@ -741,7 +741,7 @@ static int uart_set_info(struct uart_state *state,
|
|||
if (port->ops->verify_port)
|
||||
retval = port->ops->verify_port(port, &new_serial);
|
||||
|
||||
if ((new_serial.irq >= NR_IRQS) || (new_serial.irq < 0) ||
|
||||
if ((new_serial.irq >= nr_irqs) || (new_serial.irq < 0) ||
|
||||
(new_serial.baud_base < 9600))
|
||||
retval = -EINVAL;
|
||||
|
||||
|
|
|
@ -460,7 +460,7 @@ static int lh7a40xuart_verify_port (struct uart_port* port,
|
|||
|
||||
if (ser->type != PORT_UNKNOWN && ser->type != PORT_LH7A40X)
|
||||
ret = -EINVAL;
|
||||
if (ser->irq < 0 || ser->irq >= NR_IRQS)
|
||||
if (ser->irq < 0 || ser->irq >= nr_irqs)
|
||||
ret = -EINVAL;
|
||||
if (ser->baud_base < 9600) /* *** FIXME: is this true? */
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -1149,7 +1149,7 @@ static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
|
|||
{
|
||||
struct sci_port *s = &sci_ports[port->line];
|
||||
|
||||
if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > NR_IRQS)
|
||||
if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
|
||||
return -EINVAL;
|
||||
if (ser->baud_base < 2400)
|
||||
/* No paper tape reader for Mitch.. */
|
||||
|
|
|
@ -1066,7 +1066,7 @@ static int qe_uart_verify_port(struct uart_port *port,
|
|||
if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
|
||||
return -EINVAL;
|
||||
|
||||
if (ser->irq < 0 || ser->irq >= NR_IRQS)
|
||||
if (ser->irq < 0 || ser->irq >= nr_irqs)
|
||||
return -EINVAL;
|
||||
|
||||
if (ser->baud_base < 9600)
|
||||
|
|
|
@ -125,7 +125,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
|
|||
|
||||
BUG_ON(irq == -1);
|
||||
#ifdef CONFIG_SMP
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
|
||||
#endif
|
||||
|
||||
__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
|
||||
|
@ -137,10 +137,12 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
|
|||
static void init_evtchn_cpu_bindings(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
||||
/* By default all event channels notify CPU#0. */
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_desc[i].affinity = cpumask_of_cpu(0);
|
||||
for_each_irq_desc(i, desc)
|
||||
desc->affinity = cpumask_of_cpu(0);
|
||||
#endif
|
||||
|
||||
memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
|
||||
|
@ -229,12 +231,12 @@ static int find_unbound_irq(void)
|
|||
int irq;
|
||||
|
||||
/* Only allocate from dynirq range */
|
||||
for (irq = 0; irq < NR_IRQS; irq++)
|
||||
for_each_irq_nr(irq)
|
||||
if (irq_bindcount[irq] == 0)
|
||||
break;
|
||||
|
||||
if (irq == NR_IRQS)
|
||||
panic("No available IRQ to bind to: increase NR_IRQS!\n");
|
||||
if (irq == nr_irqs)
|
||||
panic("No available IRQ to bind to: increase nr_irqs!\n");
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
@ -790,7 +792,7 @@ void xen_irq_resume(void)
|
|||
mask_evtchn(evtchn);
|
||||
|
||||
/* No IRQ <-> event-channel mappings. */
|
||||
for (irq = 0; irq < NR_IRQS; irq++)
|
||||
for_each_irq_nr(irq)
|
||||
irq_info[irq].evtchn = 0; /* zap event-channel binding */
|
||||
|
||||
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
|
||||
|
@ -822,7 +824,7 @@ void __init xen_init_IRQ(void)
|
|||
mask_evtchn(i);
|
||||
|
||||
/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
for_each_irq_nr(i)
|
||||
irq_bindcount[i] = 0;
|
||||
|
||||
irq_ctx_init(smp_processor_id());
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -521,17 +522,13 @@ static const struct file_operations proc_vmalloc_operations = {
|
|||
|
||||
static int show_stat(struct seq_file *p, void *v)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
unsigned long jif;
|
||||
cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
|
||||
cputime64_t guest;
|
||||
u64 sum = 0;
|
||||
struct timespec boottime;
|
||||
unsigned int *per_irq_sum;
|
||||
|
||||
per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
|
||||
if (!per_irq_sum)
|
||||
return -ENOMEM;
|
||||
unsigned int per_irq_sum;
|
||||
|
||||
user = nice = system = idle = iowait =
|
||||
irq = softirq = steal = cputime64_zero;
|
||||
|
@ -540,8 +537,6 @@ static int show_stat(struct seq_file *p, void *v)
|
|||
jif = boottime.tv_sec;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
int j;
|
||||
|
||||
user = cputime64_add(user, kstat_cpu(i).cpustat.user);
|
||||
nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
|
||||
system = cputime64_add(system, kstat_cpu(i).cpustat.system);
|
||||
|
@ -551,11 +546,10 @@ static int show_stat(struct seq_file *p, void *v)
|
|||
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
|
||||
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
|
||||
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
|
||||
for (j = 0; j < NR_IRQS; j++) {
|
||||
unsigned int temp = kstat_cpu(i).irqs[j];
|
||||
sum += temp;
|
||||
per_irq_sum[j] += temp;
|
||||
}
|
||||
|
||||
for_each_irq_nr(j)
|
||||
sum += kstat_irqs_cpu(j, i);
|
||||
|
||||
sum += arch_irq_stat_cpu(i);
|
||||
}
|
||||
sum += arch_irq_stat();
|
||||
|
@ -597,8 +591,15 @@ static int show_stat(struct seq_file *p, void *v)
|
|||
}
|
||||
seq_printf(p, "intr %llu", (unsigned long long)sum);
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
seq_printf(p, " %u", per_irq_sum[i]);
|
||||
/* sum again ? it could be updated? */
|
||||
for_each_irq_nr(j) {
|
||||
per_irq_sum = 0;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
per_irq_sum += kstat_irqs_cpu(j, i);
|
||||
|
||||
seq_printf(p, " %u", per_irq_sum);
|
||||
}
|
||||
|
||||
seq_printf(p,
|
||||
"\nctxt %llu\n"
|
||||
|
@ -612,7 +613,6 @@ static int show_stat(struct seq_file *p, void *v)
|
|||
nr_running(),
|
||||
nr_iowait());
|
||||
|
||||
kfree(per_irq_sum);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -651,15 +651,14 @@ static const struct file_operations proc_stat_operations = {
|
|||
*/
|
||||
static void *int_seq_start(struct seq_file *f, loff_t *pos)
|
||||
{
|
||||
return (*pos <= NR_IRQS) ? pos : NULL;
|
||||
return (*pos <= nr_irqs) ? pos : NULL;
|
||||
}
|
||||
|
||||
|
||||
static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
|
||||
{
|
||||
(*pos)++;
|
||||
if (*pos > NR_IRQS)
|
||||
return NULL;
|
||||
return pos;
|
||||
return (*pos <= nr_irqs) ? pos : NULL;
|
||||
}
|
||||
|
||||
static void int_seq_stop(struct seq_file *f, void *v)
|
||||
|
@ -667,7 +666,6 @@ static void int_seq_stop(struct seq_file *f, void *v)
|
|||
/* Nothing to do */
|
||||
}
|
||||
|
||||
|
||||
static const struct seq_operations int_seq_ops = {
|
||||
.start = int_seq_start,
|
||||
.next = int_seq_next,
|
||||
|
|
|
@ -40,8 +40,6 @@ extern void generic_apic_probe(void);
|
|||
extern unsigned int apic_verbosity;
|
||||
extern int local_apic_timer_c2_ok;
|
||||
|
||||
extern int ioapic_force;
|
||||
|
||||
extern int disable_apic;
|
||||
/*
|
||||
* Basic functions accessing APICs.
|
||||
|
@ -100,6 +98,20 @@ extern void check_x2apic(void);
|
|||
extern void enable_x2apic(void);
|
||||
extern void enable_IR_x2apic(void);
|
||||
extern void x2apic_icr_write(u32 low, u32 id);
|
||||
static inline int x2apic_enabled(void)
|
||||
{
|
||||
int msr, msr2;
|
||||
|
||||
if (!cpu_has_x2apic)
|
||||
return 0;
|
||||
|
||||
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
||||
if (msr & X2APIC_ENABLE)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define x2apic_enabled() 0
|
||||
#endif
|
||||
|
||||
struct apic_ops {
|
||||
|
|
|
@ -9,22 +9,17 @@ static inline int apic_id_registered(void)
|
|||
return (1);
|
||||
}
|
||||
|
||||
/* Round robin the irqs amoung the online cpus */
|
||||
static inline cpumask_t target_cpus(void)
|
||||
{
|
||||
static unsigned long cpu = NR_CPUS;
|
||||
do {
|
||||
if (cpu >= NR_CPUS)
|
||||
cpu = first_cpu(cpu_online_map);
|
||||
else
|
||||
cpu = next_cpu(cpu, cpu_online_map);
|
||||
} while (cpu >= NR_CPUS);
|
||||
return cpumask_of_cpu(cpu);
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_map;
|
||||
#else
|
||||
return cpumask_of_cpu(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef APIC_DEST_LOGICAL
|
||||
#define APIC_DEST_LOGICAL 0
|
||||
#define TARGET_CPUS (target_cpus())
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
#define INT_DELIVERY_MODE (dest_Fixed)
|
||||
#define INT_DEST_MODE (0) /* phys delivery to target proc */
|
||||
|
|
|
@ -94,4 +94,17 @@ extern void efi_reserve_early(void);
|
|||
extern void efi_call_phys_prelog(void);
|
||||
extern void efi_call_phys_epilog(void);
|
||||
|
||||
#ifndef CONFIG_EFI
|
||||
/*
|
||||
* IF EFI is not configured, have the EFI calls return -ENOSYS.
|
||||
*/
|
||||
#define efi_call0(_f) (-ENOSYS)
|
||||
#define efi_call1(_f, _a1) (-ENOSYS)
|
||||
#define efi_call2(_f, _a1, _a2) (-ENOSYS)
|
||||
#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS)
|
||||
#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
|
||||
#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
|
||||
#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
|
||||
#endif /* CONFIG_EFI */
|
||||
|
||||
#endif /* ASM_X86__EFI_H */
|
||||
|
|
|
@ -17,7 +17,6 @@ static inline cpumask_t target_cpus(void)
|
|||
return cpumask_of_cpu(smp_processor_id());
|
||||
#endif
|
||||
}
|
||||
#define TARGET_CPUS (target_cpus())
|
||||
|
||||
#if defined CONFIG_ES7000_CLUSTERED_APIC
|
||||
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
@ -81,7 +80,7 @@ static inline void setup_apic_routing(void)
|
|||
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||
(apic_version[apic] == 0x14) ?
|
||||
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
|
||||
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
|
|
|
@ -57,6 +57,7 @@ struct genapic {
|
|||
unsigned (*get_apic_id)(unsigned long x);
|
||||
unsigned long apic_id_mask;
|
||||
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
|
||||
cpumask_t (*vector_allocation_domain)(int cpu);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* ipi */
|
||||
|
@ -104,6 +105,7 @@ struct genapic {
|
|||
APICFUNC(get_apic_id) \
|
||||
.apic_id_mask = APIC_ID_MASK, \
|
||||
APICFUNC(cpu_mask_to_apicid) \
|
||||
APICFUNC(vector_allocation_domain) \
|
||||
APICFUNC(acpi_madt_oem_check) \
|
||||
IPIFUNC(send_IPI_mask) \
|
||||
IPIFUNC(send_IPI_allbutself) \
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef ASM_X86__HPET_H
|
||||
#define ASM_X86__HPET_H
|
||||
|
||||
#include <linux/msi.h>
|
||||
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
|
||||
#define HPET_MMAP_SIZE 1024
|
||||
|
@ -10,6 +12,11 @@
|
|||
#define HPET_CFG 0x010
|
||||
#define HPET_STATUS 0x020
|
||||
#define HPET_COUNTER 0x0f0
|
||||
|
||||
#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
|
||||
#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
|
||||
#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
|
||||
|
||||
#define HPET_T0_CFG 0x100
|
||||
#define HPET_T0_CMP 0x108
|
||||
#define HPET_T0_ROUTE 0x110
|
||||
|
@ -65,6 +72,20 @@ extern void hpet_disable(void);
|
|||
extern unsigned long hpet_readl(unsigned long a);
|
||||
extern void force_hpet_resume(void);
|
||||
|
||||
extern void hpet_msi_unmask(unsigned int irq);
|
||||
extern void hpet_msi_mask(unsigned int irq);
|
||||
extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg);
|
||||
extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg);
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
extern int arch_setup_hpet_msi(unsigned int irq);
|
||||
#else
|
||||
static inline int arch_setup_hpet_msi(unsigned int irq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HPET_EMULATE_RTC
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
|
|
@ -96,13 +96,8 @@ extern asmlinkage void qic_call_function_interrupt(void);
|
|||
|
||||
/* SMP */
|
||||
extern void smp_apic_timer_interrupt(struct pt_regs *);
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void smp_spurious_interrupt(struct pt_regs *);
|
||||
extern void smp_error_interrupt(struct pt_regs *);
|
||||
#else
|
||||
extern asmlinkage void smp_spurious_interrupt(void);
|
||||
extern asmlinkage void smp_error_interrupt(void);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_SMP
|
||||
extern void smp_reschedule_interrupt(struct pt_regs *);
|
||||
extern void smp_call_function_interrupt(struct pt_regs *);
|
||||
|
@ -115,13 +110,13 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void (*const interrupt[NR_IRQS])(void);
|
||||
#else
|
||||
typedef int vector_irq_t[NR_VECTORS];
|
||||
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
||||
extern void (*const interrupt[NR_VECTORS])(void);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
|
||||
typedef int vector_irq_t[NR_VECTORS];
|
||||
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
extern void lock_vector_lock(void);
|
||||
extern void unlock_vector_lock(void);
|
||||
extern void __setup_vector_irq(int cpu);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
|
||||
/*
|
||||
* Intel IO-APIC support for SMP and UP systems.
|
||||
|
@ -87,24 +88,8 @@ struct IO_APIC_route_entry {
|
|||
mask : 1, /* 0: enabled, 1: disabled */
|
||||
__reserved_2 : 15;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
union {
|
||||
struct {
|
||||
__u32 __reserved_1 : 24,
|
||||
physical_dest : 4,
|
||||
__reserved_2 : 4;
|
||||
} physical;
|
||||
|
||||
struct {
|
||||
__u32 __reserved_1 : 24,
|
||||
logical_dest : 8;
|
||||
} logical;
|
||||
} dest;
|
||||
#else
|
||||
__u32 __reserved_3 : 24,
|
||||
dest : 8;
|
||||
#endif
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct IR_IO_APIC_route_entry {
|
||||
|
@ -203,10 +188,17 @@ extern void restore_IO_APIC_setup(void);
|
|||
extern void reinit_intr_remapped_IO_APIC(int);
|
||||
#endif
|
||||
|
||||
extern int probe_nr_irqs(void);
|
||||
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
static const int timer_through_8259 = 0;
|
||||
static inline void ioapic_init_mappings(void) { }
|
||||
|
||||
static inline int probe_nr_irqs(void)
|
||||
{
|
||||
return NR_IRQS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ASM_X86__IO_APIC_H */
|
||||
|
|
|
@ -19,19 +19,14 @@
|
|||
|
||||
/*
|
||||
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
|
||||
* cleanup after irq migration on 64 bit.
|
||||
* cleanup after irq migration.
|
||||
*/
|
||||
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
|
||||
|
||||
/*
|
||||
* Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
|
||||
* Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
|
||||
* Vectors 0x30-0x3f are used for ISA interrupts.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR)
|
||||
#else
|
||||
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
|
||||
#endif
|
||||
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
|
||||
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
|
||||
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
|
||||
|
@ -96,11 +91,7 @@
|
|||
* start at 0x31(0x41) to spread out vectors evenly between priority
|
||||
* levels. (0x80 is the syscall vector)
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
# define FIRST_DEVICE_VECTOR 0x31
|
||||
#else
|
||||
# define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
|
||||
#endif
|
||||
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
|
||||
|
||||
#define NR_VECTORS 256
|
||||
|
||||
|
@ -116,7 +107,6 @@
|
|||
# else
|
||||
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
|
||||
# endif
|
||||
# define NR_IRQ_VECTORS NR_IRQS
|
||||
|
||||
#elif !defined(CONFIG_X86_VOYAGER)
|
||||
|
||||
|
@ -124,23 +114,15 @@
|
|||
|
||||
# define NR_IRQS 224
|
||||
|
||||
# if (224 >= 32 * NR_CPUS)
|
||||
# define NR_IRQ_VECTORS NR_IRQS
|
||||
# else
|
||||
# define NR_IRQ_VECTORS (32 * NR_CPUS)
|
||||
# endif
|
||||
|
||||
# else /* IO_APIC || PARAVIRT */
|
||||
|
||||
# define NR_IRQS 16
|
||||
# define NR_IRQ_VECTORS NR_IRQS
|
||||
|
||||
# endif
|
||||
|
||||
#else /* !VISWS && !VOYAGER */
|
||||
|
||||
# define NR_IRQS 224
|
||||
# define NR_IRQ_VECTORS NR_IRQS
|
||||
|
||||
#endif /* VISWS */
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
|
|||
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
|
||||
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -85,6 +85,20 @@ static inline int apicid_to_node(int logical_apicid)
|
|||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline cpumask_t vector_allocation_domain(int cpu)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
|
@ -138,6 +152,5 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
|||
static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
#endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
#ifndef ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
|
||||
#define ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
|
||||
|
||||
/*
|
||||
* For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
|
||||
* even with uni-proc kernels, so use a big array.
|
||||
*
|
||||
* This value should be the same in both the generic and summit subarches.
|
||||
* Change one, change 'em both.
|
||||
*/
|
||||
#define NR_IRQS 224
|
||||
#define NR_IRQ_VECTORS 1024
|
||||
|
||||
#endif /* ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H */
|
|
@ -24,6 +24,7 @@
|
|||
#define check_phys_apicid_present (genapic->check_phys_apicid_present)
|
||||
#define check_apicid_used (genapic->check_apicid_used)
|
||||
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
|
||||
#define vector_allocation_domain (genapic->vector_allocation_domain)
|
||||
#define enable_apic_mode (genapic->enable_apic_mode)
|
||||
#define phys_pkg_id (genapic->phys_pkg_id)
|
||||
|
||||
|
|
|
@ -12,8 +12,6 @@ static inline cpumask_t target_cpus(void)
|
|||
return CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
#define TARGET_CPUS (target_cpus())
|
||||
|
||||
#define NO_BALANCE_IRQ (1)
|
||||
#define esr_disable (1)
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ static inline cpumask_t target_cpus(void)
|
|||
*/
|
||||
return cpumask_of_cpu(0);
|
||||
}
|
||||
#define TARGET_CPUS (target_cpus())
|
||||
|
||||
#define INT_DELIVERY_MODE (dest_LowestPrio)
|
||||
#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
#ifndef _ASM_IRQ_VECTORS_LIMITS_H
|
||||
#define _ASM_IRQ_VECTORS_LIMITS_H
|
||||
|
||||
/*
|
||||
* For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
|
||||
* even with uni-proc kernels, so use a big array.
|
||||
*
|
||||
* This value should be the same in both the generic and summit subarches.
|
||||
* Change one, change 'em both.
|
||||
*/
|
||||
#define NR_IRQS 224
|
||||
#define NR_IRQ_VECTORS 1024
|
||||
|
||||
#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
|
|
@ -2,9 +2,7 @@
|
|||
#define ASM_X86__UV__BIOS_H
|
||||
|
||||
/*
|
||||
* BIOS layer definitions.
|
||||
*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* UV BIOS layer definitions.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -19,11 +17,43 @@
|
|||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright (c) Russ Anderson
|
||||
*/
|
||||
|
||||
#include <linux/rtc.h>
|
||||
|
||||
#define BIOS_FREQ_BASE 0x01000001
|
||||
/*
|
||||
* Values for the BIOS calls. It is passed as the first * argument in the
|
||||
* BIOS call. Passing any other value in the first argument will result
|
||||
* in a BIOS_STATUS_UNIMPLEMENTED return status.
|
||||
*/
|
||||
enum uv_bios_cmd {
|
||||
UV_BIOS_COMMON,
|
||||
UV_BIOS_GET_SN_INFO,
|
||||
UV_BIOS_FREQ_BASE
|
||||
};
|
||||
|
||||
/*
|
||||
* Status values returned from a BIOS call.
|
||||
*/
|
||||
enum {
|
||||
BIOS_STATUS_SUCCESS = 0,
|
||||
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
|
||||
BIOS_STATUS_EINVAL = -EINVAL,
|
||||
BIOS_STATUS_UNAVAIL = -EBUSY
|
||||
};
|
||||
|
||||
/*
|
||||
* The UV system table describes specific firmware
|
||||
* capabilities available to the Linux kernel at runtime.
|
||||
*/
|
||||
struct uv_systab {
|
||||
char signature[4]; /* must be "UVST" */
|
||||
u32 revision; /* distinguish different firmware revs */
|
||||
u64 function; /* BIOS runtime callback function ptr */
|
||||
};
|
||||
|
||||
enum {
|
||||
BIOS_FREQ_BASE_PLATFORM = 0,
|
||||
|
@ -31,38 +61,34 @@ enum {
|
|||
BIOS_FREQ_BASE_REALTIME_CLOCK = 2
|
||||
};
|
||||
|
||||
# define BIOS_CALL(result, a0, a1, a2, a3, a4, a5, a6, a7) \
|
||||
do { \
|
||||
/* XXX - the real call goes here */ \
|
||||
result.status = BIOS_STATUS_UNIMPLEMENTED; \
|
||||
isrv.v0 = 0; \
|
||||
isrv.v1 = 0; \
|
||||
} while (0)
|
||||
|
||||
enum {
|
||||
BIOS_STATUS_SUCCESS = 0,
|
||||
BIOS_STATUS_UNIMPLEMENTED = -1,
|
||||
BIOS_STATUS_EINVAL = -2,
|
||||
BIOS_STATUS_ERROR = -3
|
||||
union partition_info_u {
|
||||
u64 val;
|
||||
struct {
|
||||
u64 hub_version : 8,
|
||||
partition_id : 16,
|
||||
coherence_id : 16,
|
||||
region_size : 24;
|
||||
};
|
||||
};
|
||||
|
||||
struct uv_bios_retval {
|
||||
/*
|
||||
* A zero status value indicates call completed without error.
|
||||
* A negative status value indicates reason of call failure.
|
||||
* A positive status value indicates success but an
|
||||
* informational value should be printed (e.g., "reboot for
|
||||
* change to take effect").
|
||||
*/
|
||||
s64 status;
|
||||
u64 v0;
|
||||
u64 v1;
|
||||
u64 v2;
|
||||
};
|
||||
/*
|
||||
* bios calls have 6 parameters
|
||||
*/
|
||||
extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64);
|
||||
extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64);
|
||||
extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
|
||||
|
||||
extern long
|
||||
x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
|
||||
unsigned long *drift_info);
|
||||
extern const char *x86_bios_strerror(long status);
|
||||
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
|
||||
extern s64 uv_bios_freq_base(u64, u64 *);
|
||||
|
||||
extern void uv_bios_init(void);
|
||||
|
||||
extern int uv_type;
|
||||
extern long sn_partition_id;
|
||||
extern long uv_coherency_id;
|
||||
extern long uv_region_size;
|
||||
#define partition_coherence_id() (uv_coherency_id)
|
||||
|
||||
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
||||
|
||||
#endif /* ASM_X86__UV__BIOS_H */
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* SGI UV IRQ definitions
|
||||
*
|
||||
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef ASM_X86__UV__UV_IRQ_H
|
||||
#define ASM_X86__UV__UV_IRQ_H
|
||||
|
||||
/* If a generic version of this structure gets defined, eliminate this one. */
|
||||
struct uv_IO_APIC_route_entry {
|
||||
__u64 vector : 8,
|
||||
delivery_mode : 3,
|
||||
dest_mode : 1,
|
||||
delivery_status : 1,
|
||||
polarity : 1,
|
||||
__reserved_1 : 1,
|
||||
trigger : 1,
|
||||
mask : 1,
|
||||
__reserved_2 : 15,
|
||||
dest : 32;
|
||||
};
|
||||
|
||||
extern struct irq_chip uv_irq_chip;
|
||||
|
||||
extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long);
|
||||
extern void arch_disable_uv_irq(int, unsigned long);
|
||||
|
||||
extern int uv_setup_irq(char *, int, int, unsigned long);
|
||||
extern void uv_teardown_irq(unsigned int, int, unsigned long);
|
||||
|
||||
#endif /* ASM_X86__UV__UV_IRQ_H */
|
|
@ -45,7 +45,6 @@ extern struct list_head dmar_drhd_units;
|
|||
list_for_each_entry(drhd, &dmar_drhd_units, list)
|
||||
|
||||
extern int dmar_table_init(void);
|
||||
extern int early_dmar_detect(void);
|
||||
extern int dmar_dev_scope_init(void);
|
||||
|
||||
/* Intel IOMMU detection */
|
||||
|
|
|
@ -208,6 +208,9 @@ typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_siz
|
|||
#define EFI_GLOBAL_VARIABLE_GUID \
|
||||
EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
|
||||
|
||||
#define UV_SYSTEM_TABLE_GUID \
|
||||
EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
|
||||
|
||||
typedef struct {
|
||||
efi_guid_t guid;
|
||||
unsigned long table;
|
||||
|
@ -255,6 +258,7 @@ extern struct efi {
|
|||
unsigned long boot_info; /* boot info table */
|
||||
unsigned long hcdp; /* HCDP table */
|
||||
unsigned long uga; /* UGA table */
|
||||
unsigned long uv_systab; /* UV system table */
|
||||
efi_get_time_t *get_time;
|
||||
efi_set_time_t *set_time;
|
||||
efi_get_wakeup_time_t *get_wakeup_time;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/preempt.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/irqnr.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/irqnr.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
@ -152,6 +153,7 @@ struct irq_chip {
|
|||
* @name: flow handler name for /proc/interrupts output
|
||||
*/
|
||||
struct irq_desc {
|
||||
unsigned int irq;
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irq_chip *chip;
|
||||
struct msi_desc *msi_desc;
|
||||
|
@ -170,7 +172,7 @@ struct irq_desc {
|
|||
cpumask_t affinity;
|
||||
unsigned int cpu;
|
||||
#endif
|
||||
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_t pending_mask;
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -179,8 +181,14 @@ struct irq_desc {
|
|||
const char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
|
||||
static inline struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < nr_irqs) ? irq_desc + irq : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Migration helpers for obsolete names, they will go away:
|
||||
*/
|
||||
|
@ -198,19 +206,15 @@ extern int setup_irq(unsigned int irq, struct irqaction *new);
|
|||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
|
||||
#ifndef handle_dynamic_tick
|
||||
# define handle_dynamic_tick(a) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
|
||||
void set_pending_irq(unsigned int irq, cpumask_t mask);
|
||||
void move_native_irq(int irq);
|
||||
void move_masked_irq(int irq);
|
||||
|
||||
#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */
|
||||
#else /* CONFIG_GENERIC_PENDING_IRQ */
|
||||
|
||||
static inline void move_irq(int irq)
|
||||
{
|
||||
|
@ -237,19 +241,14 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
|
|||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_IRQBALANCE
|
||||
extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
|
||||
#else
|
||||
static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int no_irq_affinity;
|
||||
|
||||
static inline int irq_balancing_disabled(unsigned int irq)
|
||||
{
|
||||
return irq_desc[irq].status & IRQ_NO_BALANCING_MASK;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
return desc->status & IRQ_NO_BALANCING_MASK;
|
||||
}
|
||||
|
||||
/* Handle irq action chains: */
|
||||
|
@ -279,10 +278,8 @@ extern unsigned int __do_IRQ(unsigned int irq);
|
|||
* irqchip-style controller then we call the ->handle_irq() handler,
|
||||
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
|
||||
*/
|
||||
static inline void generic_handle_irq(unsigned int irq)
|
||||
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
desc->handle_irq(irq, desc);
|
||||
#else
|
||||
|
@ -293,6 +290,11 @@ static inline void generic_handle_irq(unsigned int irq)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void generic_handle_irq(unsigned int irq)
|
||||
{
|
||||
generic_handle_irq_desc(irq, irq_to_desc(irq));
|
||||
}
|
||||
|
||||
/* Handling of unhandled and spurious interrupts: */
|
||||
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
||||
int action_ret);
|
||||
|
@ -325,7 +327,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|||
static inline void __set_irq_handler_unlocked(int irq,
|
||||
irq_flow_handler_t handler)
|
||||
{
|
||||
irq_desc[irq].handle_irq = handler;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->handle_irq = handler;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -353,13 +358,14 @@ extern void set_irq_noprobe(unsigned int irq);
|
|||
extern void set_irq_probe(unsigned int irq);
|
||||
|
||||
/* Handle dynamic irq creation and destruction */
|
||||
extern unsigned int create_irq_nr(unsigned int irq_want);
|
||||
extern int create_irq(void);
|
||||
extern void destroy_irq(unsigned int irq);
|
||||
|
||||
/* Test to see if a driver has successfully requested an irq */
|
||||
static inline int irq_has_action(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc->action != NULL;
|
||||
}
|
||||
|
||||
|
@ -374,10 +380,10 @@ extern int set_irq_chip_data(unsigned int irq, void *data);
|
|||
extern int set_irq_type(unsigned int irq, unsigned int type);
|
||||
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
||||
|
||||
#define get_irq_chip(irq) (irq_desc[irq].chip)
|
||||
#define get_irq_chip_data(irq) (irq_desc[irq].chip_data)
|
||||
#define get_irq_data(irq) (irq_desc[irq].handler_data)
|
||||
#define get_irq_msi(irq) (irq_desc[irq].msi_desc)
|
||||
#define get_irq_chip(irq) (irq_to_desc(irq)->chip)
|
||||
#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
|
||||
#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
|
||||
#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
|
||||
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
#ifndef _LINUX_IRQNR_H
|
||||
#define _LINUX_IRQNR_H
|
||||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||
#include <asm/irq.h>
|
||||
# define nr_irqs NR_IRQS
|
||||
|
||||
# define for_each_irq_desc(irq, desc) \
|
||||
for (irq = 0; irq < nr_irqs; irq++)
|
||||
#else
|
||||
extern int nr_irqs;
|
||||
|
||||
# define for_each_irq_desc(irq, desc) \
|
||||
for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
|
||||
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs -1, desc = irq_desc + (nr_irqs -1 ); \
|
||||
irq > 0; irq--, desc--)
|
||||
#endif
|
||||
|
||||
#define for_each_irq_nr(irq) \
|
||||
for (irq = 0; irq < nr_irqs; irq++)
|
||||
|
||||
#endif
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче