genirq: Use hlist for managing resend handlers
The current implementation utilizes a bitmap for managing interrupt resend handlers, which is allocated based on the SPARSE_IRQ/NR_IRQS macros. However, this method may not efficiently utilize memory during runtime, particularly when IRQ_BITMAP_BITS is large. Address this issue by using an hlist to manage interrupt resend handlers instead of relying on a static bitmap memory allocation. Additionally, a new function, clear_irq_resend(), is introduced and called from irq_shutdown to ensure a graceful teardown of the interrupt. Signed-off-by: Shanker Donthineni <sdonthineni@nvidia.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20230519134902.1495562-2-sdonthineni@nvidia.com
This commit is contained in:
parent
d15121be74
commit
bc06a9e087
5 changed files with 37 additions and 16 deletions
|
@ -102,6 +102,9 @@ struct irq_desc {
|
||||||
int parent_irq;
|
int parent_irq;
|
||||||
struct module *owner;
|
struct module *owner;
|
||||||
const char *name;
|
const char *name;
|
||||||
|
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||||
|
struct hlist_node resend_node;
|
||||||
|
#endif
|
||||||
} ____cacheline_internodealigned_in_smp;
|
} ____cacheline_internodealigned_in_smp;
|
||||||
|
|
||||||
#ifdef CONFIG_SPARSE_IRQ
|
#ifdef CONFIG_SPARSE_IRQ
|
||||||
|
|
|
@ -306,6 +306,7 @@ static void __irq_disable(struct irq_desc *desc, bool mask);
|
||||||
void irq_shutdown(struct irq_desc *desc)
|
void irq_shutdown(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
if (irqd_is_started(&desc->irq_data)) {
|
if (irqd_is_started(&desc->irq_data)) {
|
||||||
|
clear_irq_resend(desc);
|
||||||
desc->depth = 1;
|
desc->depth = 1;
|
||||||
if (desc->irq_data.chip->irq_shutdown) {
|
if (desc->irq_data.chip->irq_shutdown) {
|
||||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||||
|
|
|
@ -113,6 +113,8 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);
|
||||||
|
|
||||||
/* Resending of interrupts :*/
|
/* Resending of interrupts :*/
|
||||||
int check_irq_resend(struct irq_desc *desc, bool inject);
|
int check_irq_resend(struct irq_desc *desc, bool inject);
|
||||||
|
void clear_irq_resend(struct irq_desc *desc);
|
||||||
|
void irq_resend_init(struct irq_desc *desc);
|
||||||
bool irq_wait_for_poll(struct irq_desc *desc);
|
bool irq_wait_for_poll(struct irq_desc *desc);
|
||||||
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
|
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
|
||||||
|
|
||||||
|
|
|
@ -415,6 +415,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
|
||||||
desc_set_defaults(irq, desc, node, affinity, owner);
|
desc_set_defaults(irq, desc, node, affinity, owner);
|
||||||
irqd_set(&desc->irq_data, flags);
|
irqd_set(&desc->irq_data, flags);
|
||||||
kobject_init(&desc->kobj, &irq_kobj_type);
|
kobject_init(&desc->kobj, &irq_kobj_type);
|
||||||
|
irq_resend_init(desc);
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
|
|
||||||
|
@ -581,6 +582,7 @@ int __init early_irq_init(void)
|
||||||
mutex_init(&desc[i].request_mutex);
|
mutex_init(&desc[i].request_mutex);
|
||||||
init_waitqueue_head(&desc[i].wait_for_threads);
|
init_waitqueue_head(&desc[i].wait_for_threads);
|
||||||
desc_set_defaults(i, &desc[i], node, NULL, NULL);
|
desc_set_defaults(i, &desc[i], node, NULL, NULL);
|
||||||
|
irq_resend_init(desc);
|
||||||
}
|
}
|
||||||
return arch_early_irq_init();
|
return arch_early_irq_init();
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,9 @@
|
||||||
|
|
||||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||||
|
|
||||||
/* Bitmap to handle software resend of interrupts: */
|
/* hlist_head to handle software resend of interrupts: */
|
||||||
static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
|
static HLIST_HEAD(irq_resend_list);
|
||||||
|
static DEFINE_RAW_SPINLOCK(irq_resend_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Run software resends of IRQ's
|
* Run software resends of IRQ's
|
||||||
|
@ -30,18 +31,17 @@ static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
|
||||||
static void resend_irqs(struct tasklet_struct *unused)
|
static void resend_irqs(struct tasklet_struct *unused)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc;
|
struct irq_desc *desc;
|
||||||
int irq;
|
|
||||||
|
|
||||||
while (!bitmap_empty(irqs_resend, nr_irqs)) {
|
raw_spin_lock_irq(&irq_resend_lock);
|
||||||
irq = find_first_bit(irqs_resend, nr_irqs);
|
while (!hlist_empty(&irq_resend_list)) {
|
||||||
clear_bit(irq, irqs_resend);
|
desc = hlist_entry(irq_resend_list.first, struct irq_desc,
|
||||||
desc = irq_to_desc(irq);
|
resend_node);
|
||||||
if (!desc)
|
hlist_del_init(&desc->resend_node);
|
||||||
continue;
|
raw_spin_unlock(&irq_resend_lock);
|
||||||
local_irq_disable();
|
|
||||||
desc->handle_irq(desc);
|
desc->handle_irq(desc);
|
||||||
local_irq_enable();
|
raw_spin_lock(&irq_resend_lock);
|
||||||
}
|
}
|
||||||
|
raw_spin_unlock_irq(&irq_resend_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Tasklet to handle resend: */
|
/* Tasklet to handle resend: */
|
||||||
|
@ -49,8 +49,6 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs);
|
||||||
|
|
||||||
static int irq_sw_resend(struct irq_desc *desc)
|
static int irq_sw_resend(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
unsigned int irq = irq_desc_get_irq(desc);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Validate whether this interrupt can be safely injected from
|
* Validate whether this interrupt can be safely injected from
|
||||||
* non interrupt context
|
* non interrupt context
|
||||||
|
@ -70,16 +68,31 @@ static int irq_sw_resend(struct irq_desc *desc)
|
||||||
*/
|
*/
|
||||||
if (!desc->parent_irq)
|
if (!desc->parent_irq)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
irq = desc->parent_irq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set it pending and activate the softirq: */
|
/* Add to resend_list and activate the softirq: */
|
||||||
set_bit(irq, irqs_resend);
|
raw_spin_lock(&irq_resend_lock);
|
||||||
|
hlist_add_head(&desc->resend_node, &irq_resend_list);
|
||||||
|
raw_spin_unlock(&irq_resend_lock);
|
||||||
tasklet_schedule(&resend_tasklet);
|
tasklet_schedule(&resend_tasklet);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void clear_irq_resend(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
raw_spin_lock(&irq_resend_lock);
|
||||||
|
hlist_del_init(&desc->resend_node);
|
||||||
|
raw_spin_unlock(&irq_resend_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void irq_resend_init(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
INIT_HLIST_NODE(&desc->resend_node);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
|
void clear_irq_resend(struct irq_desc *desc) {}
|
||||||
|
void irq_resend_init(struct irq_desc *desc) {}
|
||||||
|
|
||||||
static int irq_sw_resend(struct irq_desc *desc)
|
static int irq_sw_resend(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
Loading…
Add table
Reference in a new issue