irqchip/sifive-plic: Implement irq_set_affinity() for SMP host
Currently on SMP host, all CPUs take external interrupts routed via PLIC. All CPUs will try to claim a given external interrupt but only one of them will succeed while other CPUs would simply resume whatever they were doing before. This means if we have N CPUs then for every external interrupt N-1 CPUs will always fail to claim it and waste their CPU time. Instead of above, external interrupts should be taken by only one CPU and we should have provision to explicitly specify IRQ affinity from kernel-space or user-space. This patch provides irq_set_affinity() implementation for PLIC driver. It also updates irq_enable() such that PLIC interrupts are only enabled for one of CPUs specified in IRQ affinity mask. With this patch in-place, we can change IRQ affinity at any-time from user-space using procfs. Example: / # cat /proc/interrupts CPU0 CPU1 CPU2 CPU3 8: 44 0 0 0 SiFive PLIC 8 virtio0 10: 48 0 0 0 SiFive PLIC 10 ttyS0 IPI0: 55 663 58 363 Rescheduling interrupts IPI1: 0 1 3 16 Function call interrupts / # / # / # echo 4 > /proc/irq/10/smp_affinity / # / # cat /proc/interrupts CPU0 CPU1 CPU2 CPU3 8: 45 0 0 0 SiFive PLIC 8 virtio0 10: 160 0 17 0 SiFive PLIC 10 ttyS0 IPI0: 68 693 77 410 Rescheduling interrupts IPI1: 0 2 3 16 Function call interrupts Signed-off-by: Anup Patel <anup@brainfault.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Родитель
6adfe8d2f5
Коммит
cc9f04f9a8
|
@ -83,29 +83,59 @@ static inline void plic_toggle(struct plic_handler *handler,
|
|||
raw_spin_unlock(&handler->enable_lock);
|
||||
}
|
||||
|
||||
static inline void plic_irq_toggle(struct irq_data *d, int enable)
|
||||
static inline void plic_irq_toggle(const struct cpumask *mask,
|
||||
int hwirq, int enable)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
|
||||
for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
|
||||
writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
|
||||
for_each_cpu(cpu, mask) {
|
||||
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
|
||||
|
||||
if (handler->present)
|
||||
plic_toggle(handler, d->hwirq, enable);
|
||||
plic_toggle(handler, hwirq, enable);
|
||||
}
|
||||
}
|
||||
|
||||
static void plic_irq_enable(struct irq_data *d)
|
||||
{
|
||||
plic_irq_toggle(d, 1);
|
||||
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
|
||||
cpu_online_mask);
|
||||
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
||||
return;
|
||||
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
|
||||
}
|
||||
|
||||
static void plic_irq_disable(struct irq_data *d)
|
||||
{
|
||||
plic_irq_toggle(d, 0);
|
||||
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int plic_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *mask_val, bool force)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
if (force)
|
||||
cpu = cpumask_first(mask_val);
|
||||
else
|
||||
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return -EINVAL;
|
||||
|
||||
if (!irqd_irq_disabled(d)) {
|
||||
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
|
||||
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
|
||||
}
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct irq_chip plic_chip = {
|
||||
.name = "SiFive PLIC",
|
||||
/*
|
||||
|
@ -114,6 +144,9 @@ static struct irq_chip plic_chip = {
|
|||
*/
|
||||
.irq_enable = plic_irq_enable,
|
||||
.irq_disable = plic_irq_disable,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = plic_set_affinity,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
||||
|
|
Загрузка…
Ссылка в новой задаче