2008-10-16 13:32:24 +04:00
|
|
|
/*
|
|
|
|
* Common interrupt code for 32 and 64 bit
|
|
|
|
*/
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/seq_file.h>
|
2009-01-04 13:52:17 +03:00
|
|
|
#include <linux/smp.h>
|
2009-02-07 01:09:41 +03:00
|
|
|
#include <linux/ftrace.h>
|
2008-10-16 13:32:24 +04:00
|
|
|
|
2009-02-17 15:58:15 +03:00
|
|
|
#include <asm/apic.h>
|
2008-10-16 13:32:24 +04:00
|
|
|
#include <asm/io_apic.h>
|
2008-12-23 17:15:17 +03:00
|
|
|
#include <asm/irq.h>
|
2009-02-07 01:09:41 +03:00
|
|
|
#include <asm/idle.h>
|
2009-05-27 23:56:52 +04:00
|
|
|
#include <asm/mce.h>
|
2009-04-10 22:33:10 +04:00
|
|
|
#include <asm/hw_irq.h>
|
2008-10-16 13:32:24 +04:00
|
|
|
|
|
|
|
atomic_t irq_err_count;
|
|
|
|
|
2009-03-04 21:56:05 +03:00
|
|
|
/* Function pointer for generic interrupt vector handling */
|
|
|
|
void (*generic_interrupt_extension)(void) = NULL;
|
|
|
|
|
2008-10-16 14:18:50 +04:00
|
|
|
/*
|
|
|
|
* 'what should we do if we get a hw irq event on an illegal vector'.
|
|
|
|
* each architecture has to answer this themselves.
|
|
|
|
*/
|
|
|
|
void ack_bad_irq(unsigned int irq)
|
|
|
|
{
|
2009-04-12 20:47:39 +04:00
|
|
|
if (printk_ratelimit())
|
|
|
|
pr_err("unexpected IRQ trap at vector %02x\n", irq);
|
2008-10-16 14:18:50 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently unexpected vectors happen only on SMP and APIC.
|
|
|
|
* We _must_ ack these because every local APIC has only N
|
|
|
|
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
|
|
|
* holds up an irq slot - in excessive cases (when multiple
|
|
|
|
* unexpected vectors occur) that might lock up the APIC
|
|
|
|
* completely.
|
|
|
|
* But only ack when the APIC is enabled -AK
|
|
|
|
*/
|
2009-04-12 20:47:41 +04:00
|
|
|
ack_APIC_irq();
|
2008-10-16 14:18:50 +04:00
|
|
|
}
|
|
|
|
|
2009-01-18 18:38:57 +03:00
|
|
|
#define irq_stats(x) (&per_cpu(irq_stat, x))
|
2008-10-16 13:32:24 +04:00
|
|
|
/*
|
|
|
|
* /proc/interrupts printing:
|
|
|
|
*/
|
2009-03-12 15:45:15 +03:00
|
|
|
static int show_other_interrupts(struct seq_file *p, int prec)
|
2008-10-16 13:32:24 +04:00
|
|
|
{
|
|
|
|
int j;
|
|
|
|
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "NMI");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
|
|
|
|
seq_printf(p, " Non-maskable interrupts\n");
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "LOC");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
|
|
|
|
seq_printf(p, " Local timer interrupts\n");
|
2009-03-22 23:38:34 +03:00
|
|
|
|
|
|
|
seq_printf(p, "%*s: ", prec, "SPU");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
|
|
|
seq_printf(p, " Spurious interrupts\n");
|
2009-03-25 04:50:34 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "CNT");
|
2008-12-03 12:39:53 +03:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
|
|
|
|
seq_printf(p, " Performance counter interrupts\n");
|
2009-04-13 23:24:50 +04:00
|
|
|
seq_printf(p, "%*s: ", prec, "PND");
|
2009-04-06 13:45:03 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
|
|
|
|
seq_printf(p, " Performance pending work\n");
|
2008-10-16 13:32:24 +04:00
|
|
|
#endif
|
2009-03-04 21:56:05 +03:00
|
|
|
if (generic_interrupt_extension) {
|
2009-03-25 04:50:34 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "PLT");
|
2009-03-04 21:56:05 +03:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
|
|
|
|
seq_printf(p, " Platform interrupts\n");
|
|
|
|
}
|
2008-10-16 13:32:24 +04:00
|
|
|
#ifdef CONFIG_SMP
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "RES");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
|
|
|
seq_printf(p, " Rescheduling interrupts\n");
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "CAL");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
|
|
|
|
seq_printf(p, " Function call interrupts\n");
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "TLB");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
|
|
|
seq_printf(p, " TLB shootdowns\n");
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "TRM");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
|
|
|
|
seq_printf(p, " Thermal event interrupts\n");
|
x86, mce: use 64bit machine check code on 32bit
The 64bit machine check code is in many ways much better than
the 32bit machine check code: it is more specification compliant,
is cleaner, only has a single code base versus one per CPU,
has better infrastructure for recovery, has a cleaner way to communicate
with user space etc. etc.
Use the 64bit code for 32bit too.
This is the second attempt to do this. There was one a couple of years
ago to unify this code for 32bit and 64bit. Back then this ran into some
trouble with K7s and was reverted.
I believe this time the K7 problems (and some others) are addressed.
I went over the old handlers and was very careful to retain
all quirks.
But of course this needs a lot of testing on old systems. On newer
64bit capable systems I don't expect much problems because they have been
already tested with the 64bit kernel.
I made this a CONFIG for now that still allows to select the old
machine check code. This is mostly to make testing easier,
if someone runs into a problem we can ask them to try
with the CONFIG switched.
The new code is default y for more coverage.
Once there is confidence the 64bit code works well on older hardware
too the CONFIG_X86_OLD_MCE and the associated code can be easily
removed.
This causes a behaviour change for 32bit installations. They now
have to install the mcelog package to be able to log
corrected machine checks.
The 64bit machine check code only handles CPUs which support the
standard Intel machine check architecture described in the IA32 SDM.
The 32bit code has special support for some older CPUs which
have non standard machine check architectures, in particular
WinChip C3 and Intel P5. I made those a separate CONFIG option
and kept them for now. The WinChip variant could be probably
removed without too much pain, it doesn't really do anything
interesting. P5 is also disabled by default (like it
was before) because many motherboards have it miswired, but
according to Alan Cox a few embedded setups use that one.
Forward ported/heavily changed version of old patch, original patch
included review/fixes from Thomas Gleixner, Bert Wesarg.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-04-28 21:07:31 +04:00
|
|
|
# ifdef CONFIG_X86_MCE_THRESHOLD
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: ", prec, "THR");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
|
|
|
|
seq_printf(p, " Threshold APIC interrupts\n");
|
|
|
|
# endif
|
2009-05-27 23:56:52 +04:00
|
|
|
#endif
|
2009-07-09 02:31:41 +04:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-05-27 23:56:52 +04:00
|
|
|
seq_printf(p, "%*s: ", prec, "MCE");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
|
|
|
|
seq_printf(p, " Machine check exceptions\n");
|
2009-05-27 23:56:57 +04:00
|
|
|
seq_printf(p, "%*s: ", prec, "MCP");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
|
|
|
|
seq_printf(p, " Machine check polls\n");
|
2008-10-16 13:32:24 +04:00
|
|
|
#endif
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
2008-10-16 13:32:24 +04:00
|
|
|
#if defined(CONFIG_X86_IO_APIC)
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
|
2008-10-16 13:32:24 +04:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
|
|
{
|
|
|
|
unsigned long flags, any_count = 0;
|
2009-03-12 15:45:15 +03:00
|
|
|
int i = *(loff_t *) v, j, prec;
|
2008-10-16 13:32:24 +04:00
|
|
|
struct irqaction *action;
|
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
if (i > nr_irqs)
|
|
|
|
return 0;
|
|
|
|
|
2009-03-12 15:45:15 +03:00
|
|
|
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
|
|
|
|
j *= 10;
|
|
|
|
|
2008-10-16 13:32:24 +04:00
|
|
|
if (i == nr_irqs)
|
2009-03-12 15:45:15 +03:00
|
|
|
return show_other_interrupts(p, prec);
|
2008-10-16 13:32:24 +04:00
|
|
|
|
|
|
|
/* print header */
|
|
|
|
if (i == 0) {
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*s", prec + 8, "");
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
2008-10-21 17:49:59 +04:00
|
|
|
seq_printf(p, "CPU%-8d", j);
|
2008-10-16 13:32:24 +04:00
|
|
|
seq_putc(p, '\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = irq_to_desc(i);
|
2008-12-06 05:58:31 +03:00
|
|
|
if (!desc)
|
|
|
|
return 0;
|
|
|
|
|
2008-10-16 13:32:24 +04:00
|
|
|
spin_lock_irqsave(&desc->lock, flags);
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
any_count |= kstat_irqs_cpu(i, j);
|
|
|
|
action = desc->action;
|
|
|
|
if (!action && !any_count)
|
|
|
|
goto out;
|
|
|
|
|
2009-03-12 15:45:15 +03:00
|
|
|
seq_printf(p, "%*d: ", prec, i);
|
2008-10-16 13:32:24 +04:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
|
|
|
seq_printf(p, " %8s", desc->chip->name);
|
|
|
|
seq_printf(p, "-%-8s", desc->name);
|
|
|
|
|
|
|
|
if (action) {
|
|
|
|
seq_printf(p, " %s", action->name);
|
|
|
|
while ((action = action->next) != NULL)
|
|
|
|
seq_printf(p, ", %s", action->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_putc(p, '\n');
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* /proc/stat helpers
|
|
|
|
*/
|
|
|
|
u64 arch_irq_stat_cpu(unsigned int cpu)
|
|
|
|
{
|
|
|
|
u64 sum = irq_stats(cpu)->__nmi_count;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
sum += irq_stats(cpu)->apic_timer_irqs;
|
2009-03-22 23:38:34 +03:00
|
|
|
sum += irq_stats(cpu)->irq_spurious_count;
|
2008-12-03 12:39:53 +03:00
|
|
|
sum += irq_stats(cpu)->apic_perf_irqs;
|
2009-04-06 13:45:03 +04:00
|
|
|
sum += irq_stats(cpu)->apic_pending_irqs;
|
2008-10-16 13:32:24 +04:00
|
|
|
#endif
|
2009-03-04 21:56:05 +03:00
|
|
|
if (generic_interrupt_extension)
|
|
|
|
sum += irq_stats(cpu)->generic_irqs;
|
2008-10-16 13:32:24 +04:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
sum += irq_stats(cpu)->irq_resched_count;
|
|
|
|
sum += irq_stats(cpu)->irq_call_count;
|
|
|
|
sum += irq_stats(cpu)->irq_tlb_count;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_MCE
|
|
|
|
sum += irq_stats(cpu)->irq_thermal_count;
|
x86, mce: use 64bit machine check code on 32bit
The 64bit machine check code is in many ways much better than
the 32bit machine check code: it is more specification compliant,
is cleaner, only has a single code base versus one per CPU,
has better infrastructure for recovery, has a cleaner way to communicate
with user space etc. etc.
Use the 64bit code for 32bit too.
This is the second attempt to do this. There was one a couple of years
ago to unify this code for 32bit and 64bit. Back then this ran into some
trouble with K7s and was reverted.
I believe this time the K7 problems (and some others) are addressed.
I went over the old handlers and was very careful to retain
all quirks.
But of course this needs a lot of testing on old systems. On newer
64bit capable systems I don't expect much problems because they have been
already tested with the 64bit kernel.
I made this a CONFIG for now that still allows to select the old
machine check code. This is mostly to make testing easier,
if someone runs into a problem we can ask them to try
with the CONFIG switched.
The new code is default y for more coverage.
Once there is confidence the 64bit code works well on older hardware
too the CONFIG_X86_OLD_MCE and the associated code can be easily
removed.
This causes a behaviour change for 32bit installations. They now
have to install the mcelog package to be able to log
corrected machine checks.
The 64bit machine check code only handles CPUs which support the
standard Intel machine check architecture described in the IA32 SDM.
The 32bit code has special support for some older CPUs which
have non standard machine check architectures, in particular
WinChip C3 and Intel P5. I made those a separate CONFIG option
and kept them for now. The WinChip variant could be probably
removed without too much pain, it doesn't really do anything
interesting. P5 is also disabled by default (like it
was before) because many motherboards have it miswired, but
according to Alan Cox a few embedded setups use that one.
Forward ported/heavily changed version of old patch, original patch
included review/fixes from Thomas Gleixner, Bert Wesarg.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-04-28 21:07:31 +04:00
|
|
|
# ifdef CONFIG_X86_MCE_THRESHOLD
|
2008-10-16 13:32:24 +04:00
|
|
|
sum += irq_stats(cpu)->irq_threshold_count;
|
2009-04-12 20:47:39 +04:00
|
|
|
# endif
|
2009-06-02 11:53:23 +04:00
|
|
|
#endif
|
2009-07-09 02:31:41 +04:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-06-02 11:53:23 +04:00
|
|
|
sum += per_cpu(mce_exception_count, cpu);
|
|
|
|
sum += per_cpu(mce_poll_count, cpu);
|
2008-10-16 13:32:24 +04:00
|
|
|
#endif
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 arch_irq_stat(void)
|
|
|
|
{
|
|
|
|
u64 sum = atomic_read(&irq_err_count);
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
|
|
sum += atomic_read(&irq_mis_count);
|
|
|
|
#endif
|
|
|
|
return sum;
|
|
|
|
}
|
2008-12-23 17:15:17 +03:00
|
|
|
|
2009-02-07 01:09:41 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* do_IRQ handles all normal device IRQ's (the special
|
|
|
|
* SMP cross-CPU interrupts have their own specific
|
|
|
|
* handlers).
|
|
|
|
*/
|
|
|
|
unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
|
|
|
|
/* high bit used in ret_from_ code */
|
|
|
|
unsigned vector = ~regs->orig_ax;
|
|
|
|
unsigned irq;
|
|
|
|
|
|
|
|
exit_idle();
|
|
|
|
irq_enter();
|
|
|
|
|
|
|
|
irq = __get_cpu_var(vector_irq)[vector];
|
|
|
|
|
|
|
|
if (!handle_irq(irq, regs)) {
|
2009-04-12 20:47:41 +04:00
|
|
|
ack_APIC_irq();
|
2009-02-07 01:09:41 +03:00
|
|
|
|
|
|
|
if (printk_ratelimit())
|
2009-04-12 20:47:39 +04:00
|
|
|
pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
|
|
|
|
__func__, smp_processor_id(), vector, irq);
|
2009-02-07 01:09:41 +03:00
|
|
|
}
|
|
|
|
|
2009-10-08 17:40:41 +04:00
|
|
|
run_local_timers();
|
2009-02-07 01:09:41 +03:00
|
|
|
irq_exit();
|
|
|
|
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-03-04 21:56:05 +03:00
|
|
|
/*
|
|
|
|
* Handler for GENERIC_INTERRUPT_VECTOR.
|
|
|
|
*/
|
|
|
|
void smp_generic_interrupt(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
|
|
|
|
ack_APIC_irq();
|
|
|
|
|
|
|
|
exit_idle();
|
|
|
|
|
|
|
|
irq_enter();
|
|
|
|
|
|
|
|
inc_irq_stat(generic_irqs);
|
|
|
|
|
|
|
|
if (generic_interrupt_extension)
|
|
|
|
generic_interrupt_extension();
|
|
|
|
|
2009-10-08 17:40:41 +04:00
|
|
|
run_local_timers();
|
2009-03-04 21:56:05 +03:00
|
|
|
irq_exit();
|
|
|
|
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
}
|
|
|
|
|
2008-12-23 17:15:17 +03:00
|
|
|
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
|
2009-10-27 01:24:31 +03:00
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
|
|
|
void fixup_irqs(void)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
static int warned;
|
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
for_each_irq_desc(irq, desc) {
|
|
|
|
int break_affinity = 0;
|
|
|
|
int set_affinity = 1;
|
|
|
|
const struct cpumask *affinity;
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
continue;
|
|
|
|
if (irq == 2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* interrupt's are disabled at this point */
|
|
|
|
spin_lock(&desc->lock);
|
|
|
|
|
|
|
|
affinity = desc->affinity;
|
|
|
|
if (!irq_has_action(irq) ||
|
|
|
|
cpumask_equal(affinity, cpu_online_mask)) {
|
|
|
|
spin_unlock(&desc->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-10-27 01:24:34 +03:00
|
|
|
/*
|
|
|
|
* Complete the irq move. This cpu is going down and for
|
|
|
|
* non intr-remapping case, we can't wait till this interrupt
|
|
|
|
* arrives at this cpu before completing the irq move.
|
|
|
|
*/
|
|
|
|
irq_force_complete_move(irq);
|
|
|
|
|
2009-10-27 01:24:31 +03:00
|
|
|
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
|
|
|
break_affinity = 1;
|
|
|
|
affinity = cpu_all_mask;
|
|
|
|
}
|
|
|
|
|
2009-10-27 01:24:32 +03:00
|
|
|
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
|
2009-10-27 01:24:31 +03:00
|
|
|
desc->chip->mask(irq);
|
|
|
|
|
|
|
|
if (desc->chip->set_affinity)
|
|
|
|
desc->chip->set_affinity(irq, affinity);
|
|
|
|
else if (!(warned++))
|
|
|
|
set_affinity = 0;
|
|
|
|
|
2009-10-27 01:24:32 +03:00
|
|
|
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
|
2009-10-27 01:24:31 +03:00
|
|
|
desc->chip->unmask(irq);
|
|
|
|
|
|
|
|
spin_unlock(&desc->lock);
|
|
|
|
|
|
|
|
if (break_affinity && set_affinity)
|
|
|
|
printk("Broke affinity for irq %i\n", irq);
|
|
|
|
else if (!set_affinity)
|
|
|
|
printk("Cannot set affinity for irq %i\n", irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* That doesn't seem sufficient. Give it 1ms. */
|
|
|
|
local_irq_enable();
|
|
|
|
mdelay(1);
|
|
|
|
local_irq_disable();
|
|
|
|
}
|
|
|
|
#endif
|