MIPS: Sibyte: Fix locking in set_irq_affinity

Locking of irq_desc is now done in irq_set_affinity; don't lock it again
in chip specific set_affinity function.

Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Thomas Bogendoerfer 2009-05-04 23:51:54 +02:00 коммит произвёл Ralf Baechle
Родитель a6d5ff04e8
Коммит 5d81b83d03
2 изменённых файлов: 4 добавлений и 10 удалений

Просмотреть файл

@ -113,7 +113,6 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
{ {
int i = 0, old_cpu, cpu, int_on, k; int i = 0, old_cpu, cpu, int_on, k;
u64 cur_ints; u64 cur_ints;
struct irq_desc *desc = irq_desc + irq;
unsigned long flags; unsigned long flags;
unsigned int irq_dirty; unsigned int irq_dirty;
@ -127,8 +126,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
cpu = cpu_logical_map(i); cpu = cpu_logical_map(i);
/* Protect against other affinity changers and IMR manipulation */ /* Protect against other affinity changers and IMR manipulation */
spin_lock_irqsave(&desc->lock, flags); spin_lock_irqsave(&bcm1480_imr_lock, flags);
spin_lock(&bcm1480_imr_lock);
/* Swizzle each CPU's IMR (but leave the IP selection alone) */ /* Swizzle each CPU's IMR (but leave the IP selection alone) */
old_cpu = bcm1480_irq_owner[irq]; old_cpu = bcm1480_irq_owner[irq];
@ -153,8 +151,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
} }
} }
spin_unlock(&bcm1480_imr_lock); spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
spin_unlock_irqrestore(&desc->lock, flags);
} }
#endif #endif

Просмотреть файл

@ -107,7 +107,6 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
{ {
int i = 0, old_cpu, cpu, int_on; int i = 0, old_cpu, cpu, int_on;
u64 cur_ints; u64 cur_ints;
struct irq_desc *desc = irq_desc + irq;
unsigned long flags; unsigned long flags;
i = cpumask_first(mask); i = cpumask_first(mask);
@ -121,8 +120,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
cpu = cpu_logical_map(i); cpu = cpu_logical_map(i);
/* Protect against other affinity changers and IMR manipulation */ /* Protect against other affinity changers and IMR manipulation */
spin_lock_irqsave(&desc->lock, flags); spin_lock_irqsave(&sb1250_imr_lock, flags);
spin_lock(&sb1250_imr_lock);
/* Swizzle each CPU's IMR (but leave the IP selection alone) */ /* Swizzle each CPU's IMR (but leave the IP selection alone) */
old_cpu = sb1250_irq_owner[irq]; old_cpu = sb1250_irq_owner[irq];
@ -144,8 +142,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) + ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK)); R_IMR_INTERRUPT_MASK));
} }
spin_unlock(&sb1250_imr_lock); spin_unlock_irqrestore(&sb1250_imr_lock, flags);
spin_unlock_irqrestore(&desc->lock, flags);
} }
#endif #endif