genirq: Do not copy affinity before set

While rumaging through arch code I found that there are a few
workarounds which deal with the fact that the initial affinity setting
from request_irq() copies the mask into irq_data->affinity before the
chip code is called. In the normal path we unconditionally copy the
mask when the chip code returns 0.

Copy after the code is called and add a return code
IRQ_SET_MASK_OK_NOCOPY for the chip functions, which prevents the
copy. That way we see the real mask when the chip function decided to
truncate it further as some arches do. IRQ_SET_MASK_OK is 0, which is
the current behaviour.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2011-02-07 16:02:20 +01:00
Родитель 569bda8df1
Коммит 3b8249e759
4 изменённых файлов: 47 добавлений и 15 удалений

Просмотреть файл

@ -85,6 +85,17 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
#endif #endif
/*
* Return value for chip->irq_set_affinity()
*
* IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
*/
enum {
IRQ_SET_MASK_OK = 0,
IRQ_SET_MASK_OK_NOCOPY,
};
struct msi_desc; struct msi_desc;
/** /**

Просмотреть файл

@ -43,7 +43,7 @@ static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { } struct irqaction *action) { }
#endif #endif
extern int irq_select_affinity_usr(unsigned int irq); extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc); extern void irq_set_thread_affinity(struct irq_desc *desc);

Просмотреть файл

@ -148,9 +148,12 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
if (irq_can_move_pcntxt(desc)) { if (irq_can_move_pcntxt(desc)) {
ret = chip->irq_set_affinity(&desc->irq_data, mask, false); ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
if (!ret) { switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(desc->irq_data.affinity, mask); cpumask_copy(desc->irq_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc); irq_set_thread_affinity(desc);
ret = 0;
} }
} else { } else {
desc->status |= IRQ_MOVE_PENDING; desc->status |= IRQ_MOVE_PENDING;
@ -254,9 +257,12 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
/* /*
* Generic version of the affinity autoselector. * Generic version of the affinity autoselector.
*/ */
static int setup_affinity(unsigned int irq, struct irq_desc *desc) static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{ {
struct irq_chip *chip = get_irq_desc_chip(desc);
struct cpumask *set = irq_default_affinity; struct cpumask *set = irq_default_affinity;
int ret;
/* Excludes PER_CPU and NO_BALANCE interrupts */ /* Excludes PER_CPU and NO_BALANCE interrupts */
if (!irq_can_set_affinity(irq)) if (!irq_can_set_affinity(irq))
@ -273,13 +279,20 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
else else
desc->status &= ~IRQ_AFFINITY_SET; desc->status &= ~IRQ_AFFINITY_SET;
} }
cpumask_and(desc->irq_data.affinity, cpu_online_mask, set);
desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
cpumask_and(mask, cpu_online_mask, set);
ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(desc->irq_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc);
}
return 0; return 0;
} }
#else #else
static inline int setup_affinity(unsigned int irq, struct irq_desc *d) static inline int
setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
{ {
return irq_select_affinity(irq); return irq_select_affinity(irq);
} }
@ -288,23 +301,23 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
/* /*
* Called when affinity is set via /proc/irq * Called when affinity is set via /proc/irq
*/ */
int irq_select_affinity_usr(unsigned int irq) int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags; unsigned long flags;
int ret; int ret;
raw_spin_lock_irqsave(&desc->lock, flags); raw_spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc); ret = setup_affinity(irq, desc, mask);
if (!ret) if (!ret)
irq_set_thread_affinity(desc); irq_set_thread_affinity(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags); raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret; return ret;
} }
#else #else
static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) static inline int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{ {
return 0; return 0;
} }
@ -765,8 +778,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
struct irqaction *old, **old_ptr; struct irqaction *old, **old_ptr;
const char *old_name = NULL; const char *old_name = NULL;
unsigned long flags; unsigned long flags;
int nested, shared = 0; int ret, nested, shared = 0;
int ret; cpumask_var_t mask;
if (!desc) if (!desc)
return -EINVAL; return -EINVAL;
@ -831,6 +844,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
new->thread = t; new->thread = t;
} }
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_thread;
}
/* /*
* The following block of code has to be executed atomically * The following block of code has to be executed atomically
*/ */
@ -876,7 +894,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
new->flags & IRQF_TRIGGER_MASK); new->flags & IRQF_TRIGGER_MASK);
if (ret) if (ret)
goto out_thread; goto out_mask;
} else } else
compat_irq_chip_set_default_handler(desc); compat_irq_chip_set_default_handler(desc);
#if defined(CONFIG_IRQ_PER_CPU) #if defined(CONFIG_IRQ_PER_CPU)
@ -903,7 +921,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->status |= IRQ_NO_BALANCING; desc->status |= IRQ_NO_BALANCING;
/* Set default affinity mask once everything is setup */ /* Set default affinity mask once everything is setup */
setup_affinity(irq, desc); setup_affinity(irq, desc, mask);
} else if ((new->flags & IRQF_TRIGGER_MASK) } else if ((new->flags & IRQF_TRIGGER_MASK)
&& (new->flags & IRQF_TRIGGER_MASK) && (new->flags & IRQF_TRIGGER_MASK)
@ -956,6 +974,9 @@ mismatch:
#endif #endif
ret = -EBUSY; ret = -EBUSY;
out_mask:
free_cpumask_var(mask);
out_thread: out_thread:
raw_spin_unlock_irqrestore(&desc->lock, flags); raw_spin_unlock_irqrestore(&desc->lock, flags);
if (new->thread) { if (new->thread) {

Просмотреть файл

@ -89,7 +89,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
if (!cpumask_intersects(new_value, cpu_online_mask)) { if (!cpumask_intersects(new_value, cpu_online_mask)) {
/* Special case for empty set - allow the architecture /* Special case for empty set - allow the architecture
code to set default SMP affinity. */ code to set default SMP affinity. */
err = irq_select_affinity_usr(irq) ? -EINVAL : count; err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
} else { } else {
irq_set_affinity(irq, new_value); irq_set_affinity(irq, new_value);
err = count; err = count;