x86/apic: Convert to IRQCHIP_MOVE_DEFERRED
Instead of marking individual interrupts as safe to be migrated in arbitrary contexts, mark the interrupt chips, which require the interrupt to be moved in actual interrupt context, with the new IRQCHIP_MOVE_DEFERRED flag. This makes more sense because this is a per interrupt chip property and not restricted to individual interrupts. That flips the logic from the historical opt-out to a opt-in model. This is simpler to handle for other architectures, which default to unrestricted affinity setting. It also allows to cleanup the redundant core logic significantly. All interrupt chips, which belong to a top-level domain sitting directly on top of the x86 vector domain are marked accordingly, unless the related setup code marks the interrupts with IRQ_MOVE_PCNTXT, i.e. XEN. No functional change intended. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steve Wahl <steve.wahl@hpe.com> Acked-by: Wei Liu <wei.liu@kernel.org> Link: https://lore.kernel.org/all/20241210103335.563277044@linutronix.de
This commit is contained in:
parent
a648eb3a3f
commit
7d04319a05
11 changed files with 7 additions and 23 deletions
|
@ -173,6 +173,7 @@ config X86
|
|||
select GENERIC_IRQ_RESERVATION_MODE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_PENDING_IRQ if SMP
|
||||
select GENERIC_PENDING_IRQ_CHIPFLAGS if SMP
|
||||
select GENERIC_PTDUMP
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
|
|
|
@ -304,7 +304,7 @@ static struct irq_chip hv_pci_msi_controller = {
|
|||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_compose_msi_msg = hv_irq_compose_msi_msg,
|
||||
.irq_set_affinity = msi_domain_set_affinity,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED,
|
||||
};
|
||||
|
||||
static struct msi_domain_ops pci_msi_domain_ops = {
|
||||
|
|
|
@ -1861,7 +1861,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
|||
.irq_set_affinity = ioapic_set_affinity,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED |
|
||||
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
|
|
|
@ -214,6 +214,7 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
|
|||
if (WARN_ON_ONCE(domain != real_parent))
|
||||
return false;
|
||||
info->chip->irq_set_affinity = msi_set_affinity;
|
||||
info->chip->flags |= IRQCHIP_MOVE_DEFERRED;
|
||||
break;
|
||||
case DOMAIN_BUS_DMAR:
|
||||
case DOMAIN_BUS_AMDVI:
|
||||
|
@ -315,7 +316,7 @@ static struct irq_chip dmar_msi_controller = {
|
|||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_compose_msi_msg = dmar_msi_compose_msg,
|
||||
.irq_write_msi_msg = dmar_msi_write_msg,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED |
|
||||
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
|
|
|
@ -516,22 +516,14 @@ static int hpet_msi_init(struct irq_domain *domain,
|
|||
struct msi_domain_info *info, unsigned int virq,
|
||||
irq_hw_number_t hwirq, msi_alloc_info_t *arg)
|
||||
{
|
||||
irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
|
||||
irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL,
|
||||
handle_edge_irq, arg->data, "edge");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hpet_msi_free(struct irq_domain *domain,
|
||||
struct msi_domain_info *info, unsigned int virq)
|
||||
{
|
||||
irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
|
||||
}
|
||||
|
||||
static struct msi_domain_ops hpet_msi_domain_ops = {
|
||||
.msi_init = hpet_msi_init,
|
||||
.msi_free = hpet_msi_free,
|
||||
};
|
||||
|
||||
static struct msi_domain_info hpet_msi_domain_info = {
|
||||
|
|
|
@ -92,8 +92,6 @@ static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
if (ret >= 0) {
|
||||
if (info->uv.limit == UV_AFFINITY_CPU)
|
||||
irq_set_status_flags(virq, IRQ_NO_BALANCING);
|
||||
else
|
||||
irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
|
||||
|
||||
chip_data->pnode = uv_blade_to_pnode(info->uv.blade);
|
||||
chip_data->offset = info->uv.offset;
|
||||
|
@ -113,7 +111,6 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
|
|||
|
||||
BUG_ON(nr_irqs != 1);
|
||||
kfree(irq_data->chip_data);
|
||||
irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
|
||||
irq_clear_status_flags(virq, IRQ_NO_BALANCING);
|
||||
irq_domain_free_irqs_top(domain, virq, nr_irqs);
|
||||
}
|
||||
|
|
|
@ -2332,7 +2332,7 @@ static struct irq_chip intcapxt_controller = {
|
|||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_affinity = intcapxt_set_affinity,
|
||||
.irq_set_wake = intcapxt_set_wake,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED,
|
||||
};
|
||||
|
||||
static const struct irq_domain_ops intcapxt_domain_ops = {
|
||||
|
|
|
@ -3532,7 +3532,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
irq_data->chip_data = data;
|
||||
irq_data->chip = &amd_ir_chip;
|
||||
irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
|
||||
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1463,7 +1463,6 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
|
|||
else
|
||||
irq_data->chip = &intel_ir_chip;
|
||||
intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
|
||||
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -2053,6 +2053,7 @@ static struct irq_chip hv_msi_irq_chip = {
|
|||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
#ifdef CONFIG_X86
|
||||
.irq_ack = irq_chip_ack_parent,
|
||||
.flags = IRQCHIP_MOVE_DEFERRED,
|
||||
#elif defined(CONFIG_ARM64)
|
||||
.irq_eoi = irq_chip_eoi_parent,
|
||||
#endif
|
||||
|
|
|
@ -722,12 +722,6 @@ static struct irq_info *xen_irq_init(unsigned int irq)
|
|||
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
|
||||
|
||||
set_info_for_irq(irq, info);
|
||||
/*
|
||||
* Interrupt affinity setting can be immediate. No point
|
||||
* in delaying it until an interrupt is handled.
|
||||
*/
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
|
||||
INIT_LIST_HEAD(&info->eoi_list);
|
||||
list_add_tail(&info->list, &xen_irq_list_head);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue