genirq: Use hlist for managing resend handlers

The current implementation utilizes a bitmap for managing interrupt resend
handlers, which is allocated based on the SPARSE_IRQ/NR_IRQS macros.
However, this method may not efficiently utilize memory during runtime,
particularly when IRQ_BITMAP_BITS is large.

Address this issue by using an hlist to manage interrupt resend handlers
instead of relying on a static bitmap memory allocation. Additionally, a
new function, clear_irq_resend(), is introduced and called from
irq_shutdown to ensure a graceful teardown of the interrupt.

Signed-off-by: Shanker Donthineni <sdonthineni@nvidia.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20230519134902.1495562-2-sdonthineni@nvidia.com
This commit is contained in:
Shanker Donthineni 2023-05-19 08:49:00 -05:00 коммит произвёл Thomas Gleixner
Родитель d15121be74
Коммит bc06a9e087
5 изменённых файлов: 37 добавлений и 16 удалений

Просмотреть файл

@ -102,6 +102,9 @@ struct irq_desc {
int parent_irq;
struct module *owner;
const char *name;
#ifdef CONFIG_HARDIRQS_SW_RESEND
struct hlist_node resend_node;
#endif
} ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_SPARSE_IRQ

Просмотреть файл

@ -306,6 +306,7 @@ static void __irq_disable(struct irq_desc *desc, bool mask);
void irq_shutdown(struct irq_desc *desc)
{
if (irqd_is_started(&desc->irq_data)) {
clear_irq_resend(desc);
desc->depth = 1;
if (desc->irq_data.chip->irq_shutdown) {
desc->irq_data.chip->irq_shutdown(&desc->irq_data);

Просмотреть файл

@ -113,6 +113,8 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);
/* Resending of interrupts :*/
int check_irq_resend(struct irq_desc *desc, bool inject);
void clear_irq_resend(struct irq_desc *desc);
void irq_resend_init(struct irq_desc *desc);
bool irq_wait_for_poll(struct irq_desc *desc);
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);

Просмотреть файл

@ -415,6 +415,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
desc_set_defaults(irq, desc, node, affinity, owner);
irqd_set(&desc->irq_data, flags);
kobject_init(&desc->kobj, &irq_kobj_type);
irq_resend_init(desc);
return desc;
@ -581,6 +582,7 @@ int __init early_irq_init(void)
mutex_init(&desc[i].request_mutex);
init_waitqueue_head(&desc[i].wait_for_threads);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
irq_resend_init(desc);
}
return arch_early_irq_init();
}

Просмотреть файл

@ -21,8 +21,9 @@
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* Bitmap to handle software resend of interrupts: */
static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
/* hlist_head to handle software resend of interrupts: */
static HLIST_HEAD(irq_resend_list);
static DEFINE_RAW_SPINLOCK(irq_resend_lock);
/*
* Run software resends of IRQ's
@ -30,18 +31,17 @@ static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
static void resend_irqs(struct tasklet_struct *unused)
{
struct irq_desc *desc;
int irq;
while (!bitmap_empty(irqs_resend, nr_irqs)) {
irq = find_first_bit(irqs_resend, nr_irqs);
clear_bit(irq, irqs_resend);
desc = irq_to_desc(irq);
if (!desc)
continue;
local_irq_disable();
raw_spin_lock_irq(&irq_resend_lock);
while (!hlist_empty(&irq_resend_list)) {
desc = hlist_entry(irq_resend_list.first, struct irq_desc,
resend_node);
hlist_del_init(&desc->resend_node);
raw_spin_unlock(&irq_resend_lock);
desc->handle_irq(desc);
local_irq_enable();
raw_spin_lock(&irq_resend_lock);
}
raw_spin_unlock_irq(&irq_resend_lock);
}
/* Tasklet to handle resend: */
@ -49,8 +49,6 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs);
static int irq_sw_resend(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
/*
* Validate whether this interrupt can be safely injected from
* non interrupt context
@ -70,16 +68,31 @@ static int irq_sw_resend(struct irq_desc *desc)
*/
if (!desc->parent_irq)
return -EINVAL;
irq = desc->parent_irq;
}
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
/* Add to resend_list and activate the softirq: */
raw_spin_lock(&irq_resend_lock);
hlist_add_head(&desc->resend_node, &irq_resend_list);
raw_spin_unlock(&irq_resend_lock);
tasklet_schedule(&resend_tasklet);
return 0;
}
void clear_irq_resend(struct irq_desc *desc)
{
raw_spin_lock(&irq_resend_lock);
hlist_del_init(&desc->resend_node);
raw_spin_unlock(&irq_resend_lock);
}
void irq_resend_init(struct irq_desc *desc)
{
INIT_HLIST_NODE(&desc->resend_node);
}
#else
void clear_irq_resend(struct irq_desc *desc) {}
void irq_resend_init(struct irq_desc *desc) {}
static int irq_sw_resend(struct irq_desc *desc)
{
return -EINVAL;