Merge branch 'irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (32 commits) x86: disable __do_IRQ support sparseirq, powerpc/cell: fix unused variable warning in interrupt.c genirq: deprecate obsolete typedefs and defines genirq: deprecate __do_IRQ genirq: add doc to struct irqaction genirq: use kzalloc instead of explicit zero initialization genirq: make irqreturn_t an enum genirq: remove redundant if condition genirq: remove unused hw_irq_controller typedef irq: export remove_irq() and setup_irq() symbols irq: match remove_irq() args with setup_irq() irq: add remove_irq() for freeing of setup_irq() irqs genirq: assert that irq handlers are indeed running in hardirq context irq: name 'p' variables a bit better irq: further clean up the free_irq() code flow irq: refactor and clean up the free_irq() code flow irq: clean up manage.c irq: use GFP_KERNEL for action allocation in request_irq() kernel/irq: fix sparse warning: make symbol static irq: optimize init_kstat_irqs/init_copy_kstat_irqs ...
This commit is contained in:
Коммит
a8416961d3
|
@ -440,6 +440,7 @@ desc->chip->end();
|
|||
used in the generic IRQ layer.
|
||||
</para>
|
||||
!Iinclude/linux/irq.h
|
||||
!Iinclude/linux/interrupt.h
|
||||
</chapter>
|
||||
|
||||
<chapter id="pubfunctions">
|
||||
|
|
|
@ -346,3 +346,20 @@ Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and
|
|||
Removal is subject to fixing any remaining bugs in ACPI which may
|
||||
cause the thermal throttling not to happen at the right time.
|
||||
Who: Dave Jones <davej@redhat.com>, Matthew Garrett <mjg@redhat.com>
|
||||
|
||||
-----------------------------
|
||||
|
||||
What: __do_IRQ all in one fits nothing interrupt handler
|
||||
When: 2.6.32
|
||||
Why: __do_IRQ was kept for easy migration to the type flow handlers.
|
||||
More than two years of migration time is enough.
|
||||
Who: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
||||
-----------------------------
|
||||
|
||||
What: obsolete generic irq defines and typedefs
|
||||
When: 2.6.30
|
||||
Why: The defines and typedefs (hw_interrupt_type, no_irq_type, irq_desc_t)
|
||||
have been kept around for migration reasons. After more than two years
|
||||
it's time to remove them finally
|
||||
Who: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
|
|
@ -90,7 +90,7 @@ show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(irq));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[irq].chip->typename);
|
||||
seq_printf(p, " %c%s",
|
||||
|
|
|
@ -64,7 +64,7 @@ do_entInt(unsigned long type, unsigned long vector,
|
|||
smp_percpu_timer_interrupt(regs);
|
||||
cpu = smp_processor_id();
|
||||
if (cpu != boot_cpuid) {
|
||||
kstat_cpu(cpu).irqs[RTC_IRQ]++;
|
||||
kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ));
|
||||
} else {
|
||||
handle_irq(RTC_IRQ);
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
|
||||
seq_printf(p, "%3d: ", i);
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
|
||||
seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
|
||||
seq_printf(p, " %s", action->name);
|
||||
for (action = action->next; action; action = action->next)
|
||||
|
|
|
@ -63,7 +63,6 @@ static struct irq_chip ns9xxx_chip = {
|
|||
#else
|
||||
static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct irqaction *action;
|
||||
irqreturn_t action_ret;
|
||||
|
||||
|
@ -72,7 +71,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
|
|||
BUG_ON(desc->status & IRQ_INPROGRESS);
|
||||
|
||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||
kstat_cpu(cpu).irqs[irq]++;
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
action = desc->action;
|
||||
if (unlikely(!action || (desc->status & IRQ_DISABLED)))
|
||||
|
|
|
@ -58,7 +58,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
|
||||
seq_printf(p, "%3d: ", i);
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
|
||||
seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-");
|
||||
seq_printf(p, " %s", action->name);
|
||||
for (action = action->next; action; action = action->next)
|
||||
|
|
|
@ -83,7 +83,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
goto skip;
|
||||
seq_printf(p, "%3d: ", i);
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
seq_printf(p, " %8s", irq_desc[i].chip->name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
for (action = action->next; action; action = action->next)
|
||||
|
|
|
@ -66,7 +66,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -74,7 +74,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
if (action) {
|
||||
seq_printf(p, "%3d: ", i);
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
|
||||
seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
|
||||
seq_printf(p, " %s", action->name);
|
||||
for (action = action->next;
|
||||
|
|
|
@ -183,7 +183,7 @@ asmlinkage void do_IRQ(int irq)
|
|||
#if defined(CONFIG_PROC_FS)
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
int i = *(loff_t *) v;
|
||||
struct irqaction * action;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -196,7 +196,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
if (!action)
|
||||
goto unlock;
|
||||
seq_printf(p, "%3d: ",i);
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, "-%-8s", irq_desc[i].name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -80,7 +80,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j) {
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
}
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
|
|
|
@ -49,7 +49,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -108,7 +108,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -221,7 +221,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
if (action) {
|
||||
seq_printf(p, "%3d: ", i);
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
|
||||
seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
|
||||
(GxICR(i) & GxICR_LEVEL) >>
|
||||
GxICR_LEVEL_SHIFT);
|
||||
|
|
|
@ -185,7 +185,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%3d: ", i);
|
||||
#ifdef CONFIG_SMP
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#else
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#endif
|
||||
|
|
|
@ -190,7 +190,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%3d: ", i);
|
||||
#ifdef CONFIG_SMP
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#else
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -237,8 +237,6 @@ extern int noirqdebug;
|
|||
|
||||
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
const unsigned int cpu = smp_processor_id();
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||
|
@ -254,7 +252,7 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
|
|||
goto out_eoi;
|
||||
}
|
||||
|
||||
kstat_cpu(cpu).irqs[irq]++;
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/* Mark the IRQ currently in progress.*/
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
|
|
|
@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
goto unlock;
|
||||
seq_printf(p, "%3d: ",i);
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, "-%-8s", irq_desc[i].name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -185,7 +185,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %9s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -36,10 +36,10 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/starfire.h>
|
||||
|
|
|
@ -42,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -165,6 +165,9 @@ config GENERIC_HARDIRQS
|
|||
bool
|
||||
default y
|
||||
|
||||
config GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
def_bool y
|
||||
|
||||
config GENERIC_IRQ_PROBE
|
||||
bool
|
||||
default y
|
||||
|
|
|
@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -241,6 +241,10 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/cryptohash.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
# include <linux/irq.h>
|
||||
#endif
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -558,7 +562,7 @@ struct timer_rand_state {
|
|||
unsigned dont_count_entropy:1;
|
||||
};
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||
|
||||
static struct timer_rand_state *irq_timer_state[NR_IRQS];
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ struct irq_2_iommu {
|
|||
u8 irte_mask;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
|
||||
{
|
||||
struct irq_2_iommu *iommu;
|
||||
|
|
|
@ -61,6 +61,17 @@
|
|||
|
||||
typedef irqreturn_t (*irq_handler_t)(int, void *);
|
||||
|
||||
/**
|
||||
* struct irqaction - per interrupt action descriptor
|
||||
* @handler: interrupt handler function
|
||||
* @flags: flags (see IRQF_* above)
|
||||
* @mask: no comment as it is useless and about to be removed
|
||||
* @name: name of the device
|
||||
* @dev_id: cookie to identify the device
|
||||
* @next: pointer to the next irqaction for shared interrupts
|
||||
* @irq: interrupt number
|
||||
* @dir: pointer to the proc/irq/NN/name entry
|
||||
*/
|
||||
struct irqaction {
|
||||
irq_handler_t handler;
|
||||
unsigned long flags;
|
||||
|
@ -462,6 +473,12 @@ static inline void init_irq_proc(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
|
||||
extern void debug_poll_all_shared_irqs(void);
|
||||
#else
|
||||
static inline void debug_poll_all_shared_irqs(void) { }
|
||||
#endif
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v);
|
||||
|
||||
struct irq_desc;
|
||||
|
|
|
@ -160,12 +160,10 @@ struct irq_2_iommu;
|
|||
*/
|
||||
struct irq_desc {
|
||||
unsigned int irq;
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
struct timer_rand_state *timer_rand_state;
|
||||
unsigned int *kstat_irqs;
|
||||
# ifdef CONFIG_INTR_REMAP
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
struct irq_2_iommu *irq_2_iommu;
|
||||
# endif
|
||||
#endif
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irq_chip *chip;
|
||||
|
@ -202,12 +200,6 @@ extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc
|
|||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#else /* CONFIG_SPARSE_IRQ */
|
||||
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
|
||||
|
||||
#define kstat_irqs_this_cpu(DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()])
|
||||
#define kstat_incr_irqs_this_cpu(irqno, DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()]++)
|
||||
|
||||
#endif /* CONFIG_SPARSE_IRQ */
|
||||
|
||||
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
|
||||
|
@ -226,7 +218,6 @@ irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
|
|||
* Migration helpers for obsolete names, they will go away:
|
||||
*/
|
||||
#define hw_interrupt_type irq_chip
|
||||
typedef struct irq_chip hw_irq_controller;
|
||||
#define no_irq_type no_irq_chip
|
||||
typedef struct irq_desc irq_desc_t;
|
||||
|
||||
|
@ -236,6 +227,7 @@ typedef struct irq_desc irq_desc_t;
|
|||
#include <asm/hw_irq.h>
|
||||
|
||||
extern int setup_irq(unsigned int irq, struct irqaction *new);
|
||||
extern void remove_irq(unsigned int irq, struct irqaction *act);
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
|
||||
|
@ -280,7 +272,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
|
|||
}
|
||||
|
||||
/* Handle irq action chains: */
|
||||
extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
|
||||
extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
|
||||
|
||||
/*
|
||||
* Built-in IRQ handlers for various IRQ types,
|
||||
|
@ -325,7 +317,7 @@ static inline void generic_handle_irq(unsigned int irq)
|
|||
|
||||
/* Handling of unhandled and spurious interrupts: */
|
||||
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
||||
int action_ret);
|
||||
irqreturn_t action_ret);
|
||||
|
||||
/* Resending of interrupts :*/
|
||||
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
|
||||
|
|
|
@ -28,13 +28,17 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
|
|||
# define for_each_irq_desc(irq, desc) \
|
||||
for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
|
||||
irq++, desc = irq_to_desc(irq)) \
|
||||
if (desc)
|
||||
if (!desc) \
|
||||
; \
|
||||
else
|
||||
|
||||
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
|
||||
irq--, desc = irq_to_desc(irq)) \
|
||||
if (desc)
|
||||
if (!desc) \
|
||||
; \
|
||||
else
|
||||
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
|
|
|
@ -1,25 +1,17 @@
|
|||
/* irqreturn.h */
|
||||
#ifndef _LINUX_IRQRETURN_H
|
||||
#define _LINUX_IRQRETURN_H
|
||||
|
||||
/*
|
||||
* For 2.4.x compatibility, 2.4.x can use
|
||||
*
|
||||
* typedef void irqreturn_t;
|
||||
* #define IRQ_NONE
|
||||
* #define IRQ_HANDLED
|
||||
* #define IRQ_RETVAL(x)
|
||||
*
|
||||
* To mix old-style and new-style irq handler returns.
|
||||
*
|
||||
* IRQ_NONE means we didn't handle it.
|
||||
* IRQ_HANDLED means that we did have a valid interrupt and handled it.
|
||||
* IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
|
||||
/**
|
||||
* enum irqreturn
|
||||
* @IRQ_NONE interrupt was not from this device
|
||||
* @IRQ_HANDLED interrupt was handled by this device
|
||||
*/
|
||||
typedef int irqreturn_t;
|
||||
enum irqreturn {
|
||||
IRQ_NONE,
|
||||
IRQ_HANDLED,
|
||||
};
|
||||
|
||||
#define IRQ_NONE (0)
|
||||
#define IRQ_HANDLED (1)
|
||||
#define IRQ_RETVAL(x) ((x) != 0)
|
||||
typedef enum irqreturn irqreturn_t;
|
||||
#define IRQ_RETVAL(x) ((x) != IRQ_NONE)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -28,7 +28,7 @@ struct cpu_usage_stat {
|
|||
|
||||
struct kernel_stat {
|
||||
struct cpu_usage_stat cpustat;
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||
unsigned int irqs[NR_IRQS];
|
||||
#endif
|
||||
};
|
||||
|
@ -41,7 +41,7 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|||
|
||||
extern unsigned long long nr_context_switches(void);
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||
#define kstat_irqs_this_cpu(irq) \
|
||||
(kstat_this_cpu.irqs[irq])
|
||||
|
||||
|
@ -52,16 +52,19 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
|||
{
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
return kstat_cpu(cpu).irqs[irq];
|
||||
}
|
||||
#else
|
||||
#include <linux/irq.h>
|
||||
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
||||
#define kstat_irqs_this_cpu(DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()])
|
||||
#define kstat_incr_irqs_this_cpu(irqno, DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()]++)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -78,6 +78,7 @@ void dynamic_irq_cleanup(unsigned int irq)
|
|||
desc->handle_irq = handle_bad_irq;
|
||||
desc->chip = &no_irq_chip;
|
||||
desc->name = NULL;
|
||||
clear_kstat_irqs(desc);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -290,7 +291,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
|
|||
desc->chip->mask_ack(irq);
|
||||
else {
|
||||
desc->chip->mask(irq);
|
||||
desc->chip->ack(irq);
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -476,7 +478,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/* Start handling the irq */
|
||||
desc->chip->ack(irq);
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
desc = irq_remap_to_desc(irq, desc);
|
||||
|
||||
/* Mark the IRQ currently in progress.*/
|
||||
|
|
|
@ -83,19 +83,21 @@ static struct irq_desc irq_desc_init = {
|
|||
|
||||
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
|
||||
{
|
||||
unsigned long bytes;
|
||||
char *ptr;
|
||||
int node;
|
||||
|
||||
/* Compute how many bytes we need per irq and allocate them */
|
||||
bytes = nr * sizeof(unsigned int);
|
||||
void *ptr;
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
|
||||
printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
|
||||
ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
|
||||
|
||||
if (ptr)
|
||||
desc->kstat_irqs = (unsigned int *)ptr;
|
||||
/*
|
||||
* don't overwite if can not get new one
|
||||
* init_copy_kstat_irqs() could still use old one
|
||||
*/
|
||||
if (ptr) {
|
||||
printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
|
||||
cpu, node);
|
||||
desc->kstat_irqs = ptr;
|
||||
}
|
||||
}
|
||||
|
||||
static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
|
||||
|
@ -227,6 +229,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
|||
}
|
||||
};
|
||||
|
||||
static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
@ -238,8 +241,10 @@ int __init early_irq_init(void)
|
|||
desc = irq_desc;
|
||||
count = ARRAY_SIZE(irq_desc);
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
for (i = 0; i < count; i++) {
|
||||
desc[i].irq = i;
|
||||
desc[i].kstat_irqs = kstat_irqs_all[i];
|
||||
}
|
||||
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
@ -255,6 +260,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
|
|||
}
|
||||
#endif /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
void clear_kstat_irqs(struct irq_desc *desc)
|
||||
{
|
||||
memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
|
||||
}
|
||||
|
||||
/*
|
||||
* What should we do if we get a hw irq event on an illegal vector?
|
||||
* Each architecture has to answer this themself.
|
||||
|
@ -328,6 +338,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
|
|||
irqreturn_t ret, retval = IRQ_NONE;
|
||||
unsigned int status = 0;
|
||||
|
||||
WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
|
||||
|
||||
if (!(action->flags & IRQF_DISABLED))
|
||||
local_irq_enable_in_hardirq();
|
||||
|
||||
|
@ -347,6 +359,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
|
|||
}
|
||||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
|
||||
#ifdef CONFIG_ENABLE_WARN_DEPRECATED
|
||||
# warning __do_IRQ is deprecated. Please convert to proper flow handlers
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __do_IRQ - original all in one highlevel IRQ handler
|
||||
* @irq: the interrupt number
|
||||
|
@ -467,12 +484,10 @@ void early_init_irq_lock_class(void)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc ? desc->kstat_irqs[cpu] : 0;
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(kstat_irqs_cpu);
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|||
|
||||
extern struct lock_class_key irq_desc_lock_class;
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
|
||||
extern void clear_kstat_irqs(struct irq_desc *desc);
|
||||
extern spinlock_t sparse_irq_lock;
|
||||
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
|||
/*
|
||||
* Generic version of the affinity autoselector.
|
||||
*/
|
||||
int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
|
||||
static int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
if (!irq_can_set_affinity(irq))
|
||||
return 0;
|
||||
|
@ -133,7 +133,7 @@ set_affinity:
|
|||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
|
||||
static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
|
||||
{
|
||||
return irq_select_affinity(irq);
|
||||
}
|
||||
|
@ -149,14 +149,14 @@ int irq_select_affinity_usr(unsigned int irq)
|
|||
int ret;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
ret = do_irq_select_affinity(irq, desc);
|
||||
ret = setup_affinity(irq, desc);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
|
||||
static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -389,9 +389,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|||
* allocate special interrupts that are part of the architecture.
|
||||
*/
|
||||
static int
|
||||
__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
||||
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
{
|
||||
struct irqaction *old, **p;
|
||||
struct irqaction *old, **old_ptr;
|
||||
const char *old_name = NULL;
|
||||
unsigned long flags;
|
||||
int shared = 0;
|
||||
|
@ -423,8 +423,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
|||
* The following block of code has to be executed atomically
|
||||
*/
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
p = &desc->action;
|
||||
old = *p;
|
||||
old_ptr = &desc->action;
|
||||
old = *old_ptr;
|
||||
if (old) {
|
||||
/*
|
||||
* Can't share interrupts unless both agree to and are
|
||||
|
@ -447,8 +447,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
|||
|
||||
/* add new interrupt at end of irq queue */
|
||||
do {
|
||||
p = &old->next;
|
||||
old = *p;
|
||||
old_ptr = &old->next;
|
||||
old = *old_ptr;
|
||||
} while (old);
|
||||
shared = 1;
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
|||
desc->status |= IRQ_NO_BALANCING;
|
||||
|
||||
/* Set default affinity mask once everything is setup */
|
||||
do_irq_select_affinity(irq, desc);
|
||||
setup_affinity(irq, desc);
|
||||
|
||||
} else if ((new->flags & IRQF_TRIGGER_MASK)
|
||||
&& (new->flags & IRQF_TRIGGER_MASK)
|
||||
|
@ -499,7 +499,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
|
|||
(int)(new->flags & IRQF_TRIGGER_MASK));
|
||||
}
|
||||
|
||||
*p = new;
|
||||
*old_ptr = new;
|
||||
|
||||
/* Reset broken irq detection when installing new handler */
|
||||
desc->irq_count = 0;
|
||||
|
@ -549,9 +549,102 @@ int setup_irq(unsigned int irq, struct irqaction *act)
|
|||
|
||||
return __setup_irq(irq, desc, act);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(setup_irq);
|
||||
|
||||
/*
|
||||
* Internal function to unregister an irqaction - used to free
|
||||
* regular and special interrupts that are part of the architecture.
|
||||
*/
|
||||
static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *action, **action_ptr;
|
||||
unsigned long flags;
|
||||
|
||||
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
|
||||
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/*
|
||||
* There can be multiple actions per IRQ descriptor, find the right
|
||||
* one based on the dev_id:
|
||||
*/
|
||||
action_ptr = &desc->action;
|
||||
for (;;) {
|
||||
action = *action_ptr;
|
||||
|
||||
if (!action) {
|
||||
WARN(1, "Trying to free already-free IRQ %d\n", irq);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (action->dev_id == dev_id)
|
||||
break;
|
||||
action_ptr = &action->next;
|
||||
}
|
||||
|
||||
/* Found it - now remove it from the list of entries: */
|
||||
*action_ptr = action->next;
|
||||
|
||||
/* Currently used only by UML, might disappear one day: */
|
||||
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
||||
if (desc->chip->release)
|
||||
desc->chip->release(irq, dev_id);
|
||||
#endif
|
||||
|
||||
/* If this was the last handler, shut down the IRQ line: */
|
||||
if (!desc->action) {
|
||||
desc->status |= IRQ_DISABLED;
|
||||
if (desc->chip->shutdown)
|
||||
desc->chip->shutdown(irq);
|
||||
else
|
||||
desc->chip->disable(irq);
|
||||
}
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
unregister_handler_proc(irq, action);
|
||||
|
||||
/* Make sure it's not being used on another CPU: */
|
||||
synchronize_irq(irq);
|
||||
|
||||
#ifdef CONFIG_DEBUG_SHIRQ
|
||||
/*
|
||||
* It's a shared IRQ -- the driver ought to be prepared for an IRQ
|
||||
* event to happen even now it's being freed, so let's make sure that
|
||||
* is so by doing an extra call to the handler ....
|
||||
*
|
||||
* ( We do this after actually deregistering it, to make sure that a
|
||||
* 'real' IRQ doesn't run in * parallel with our fake. )
|
||||
*/
|
||||
if (action->flags & IRQF_SHARED) {
|
||||
local_irq_save(flags);
|
||||
action->handler(irq, dev_id);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
return action;
|
||||
}
|
||||
|
||||
/**
|
||||
* free_irq - free an interrupt
|
||||
* remove_irq - free an interrupt
|
||||
* @irq: Interrupt line to free
|
||||
* @act: irqaction for the interrupt
|
||||
*
|
||||
* Used to remove interrupts statically setup by the early boot process.
|
||||
*/
|
||||
void remove_irq(unsigned int irq, struct irqaction *act)
|
||||
{
|
||||
__free_irq(irq, act->dev_id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(remove_irq);
|
||||
|
||||
/**
|
||||
* free_irq - free an interrupt allocated with request_irq
|
||||
* @irq: Interrupt line to free
|
||||
* @dev_id: Device identity to free
|
||||
*
|
||||
|
@ -566,73 +659,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
|
|||
*/
|
||||
void free_irq(unsigned int irq, void *dev_id)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction **p;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(in_interrupt());
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
p = &desc->action;
|
||||
for (;;) {
|
||||
struct irqaction *action = *p;
|
||||
|
||||
if (action) {
|
||||
struct irqaction **pp = p;
|
||||
|
||||
p = &action->next;
|
||||
if (action->dev_id != dev_id)
|
||||
continue;
|
||||
|
||||
/* Found it - now remove it from the list of entries */
|
||||
*pp = action->next;
|
||||
|
||||
/* Currently used only by UML, might disappear one day.*/
|
||||
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
||||
if (desc->chip->release)
|
||||
desc->chip->release(irq, dev_id);
|
||||
#endif
|
||||
|
||||
if (!desc->action) {
|
||||
desc->status |= IRQ_DISABLED;
|
||||
if (desc->chip->shutdown)
|
||||
desc->chip->shutdown(irq);
|
||||
else
|
||||
desc->chip->disable(irq);
|
||||
}
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
unregister_handler_proc(irq, action);
|
||||
|
||||
/* Make sure it's not being used on another CPU */
|
||||
synchronize_irq(irq);
|
||||
#ifdef CONFIG_DEBUG_SHIRQ
|
||||
/*
|
||||
* It's a shared IRQ -- the driver ought to be
|
||||
* prepared for it to happen even now it's
|
||||
* being freed, so let's make sure.... We do
|
||||
* this after actually deregistering it, to
|
||||
* make sure that a 'real' IRQ doesn't run in
|
||||
* parallel with our fake
|
||||
*/
|
||||
if (action->flags & IRQF_SHARED) {
|
||||
local_irq_save(flags);
|
||||
action->handler(irq, dev_id);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
kfree(action);
|
||||
return;
|
||||
}
|
||||
printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
|
||||
#ifdef CONFIG_DEBUG_SHIRQ
|
||||
dump_stack();
|
||||
#endif
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return;
|
||||
}
|
||||
kfree(__free_irq(irq, dev_id));
|
||||
}
|
||||
EXPORT_SYMBOL(free_irq);
|
||||
|
||||
|
@ -679,11 +706,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
|
|||
* the behavior is classified as "will not fix" so we need to
|
||||
* start nudging drivers away from using that idiom.
|
||||
*/
|
||||
if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
|
||||
== (IRQF_SHARED|IRQF_DISABLED))
|
||||
pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
|
||||
"guaranteed on shared IRQs\n",
|
||||
irq, devname);
|
||||
if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
|
||||
(IRQF_SHARED|IRQF_DISABLED)) {
|
||||
pr_warning(
|
||||
"IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
|
||||
irq, devname);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
/*
|
||||
|
@ -709,15 +737,13 @@ int request_irq(unsigned int irq, irq_handler_t handler,
|
|||
if (!handler)
|
||||
return -EINVAL;
|
||||
|
||||
action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
|
||||
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
||||
if (!action)
|
||||
return -ENOMEM;
|
||||
|
||||
action->handler = handler;
|
||||
action->flags = irqflags;
|
||||
cpus_clear(action->mask);
|
||||
action->name = devname;
|
||||
action->next = NULL;
|
||||
action->dev_id = dev_id;
|
||||
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
|
|
|
@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc,
|
|||
struct irq_desc *desc,
|
||||
int cpu, int nr)
|
||||
{
|
||||
unsigned long bytes;
|
||||
|
||||
init_kstat_irqs(desc, cpu, nr);
|
||||
|
||||
if (desc->kstat_irqs != old_desc->kstat_irqs) {
|
||||
/* Compute how many bytes we need per irq and allocate them */
|
||||
bytes = nr * sizeof(unsigned int);
|
||||
|
||||
memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
|
||||
}
|
||||
if (desc->kstat_irqs != old_desc->kstat_irqs)
|
||||
memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
|
||||
nr * sizeof(*desc->kstat_irqs));
|
||||
}
|
||||
|
||||
static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
|
|
|
@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
|
|||
return ok;
|
||||
}
|
||||
|
||||
static void poll_spurious_irqs(unsigned long dummy)
|
||||
static void poll_all_shared_irqs(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy)
|
|||
|
||||
try_one_irq(i, desc);
|
||||
}
|
||||
}
|
||||
|
||||
static void poll_spurious_irqs(unsigned long dummy)
|
||||
{
|
||||
poll_all_shared_irqs();
|
||||
|
||||
mod_timer(&poll_spurious_irq_timer,
|
||||
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_SHIRQ
|
||||
void debug_poll_all_shared_irqs(void)
|
||||
{
|
||||
poll_all_shared_irqs();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If 99,900 of the previous 100,000 interrupts have not been handled
|
||||
* then assume that the IRQ is stuck in some manner. Drop a diagnostic
|
||||
|
|
Загрузка…
Ссылка в новой задаче