xen/events: modify internal [un]bind interfaces
Modify the internal bind- and unbind-interfaces to take a struct irq_info parameter. When allocating a new IRQ pass the pointer from the allocating function further up. This will reduce the number of info_for_irq() calls and make the code more efficient. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Signed-off-by: Juergen Gross <jgross@suse.com>
This commit is contained in:
parent
5dd9ad32d7
commit
3fcdaf3d76
1 changed files with 124 additions and 135 deletions
|
@ -327,7 +327,6 @@ static void delayed_free_irq(struct work_struct *work)
|
||||||
|
|
||||||
/* Constructors for packed IRQ information. */
|
/* Constructors for packed IRQ information. */
|
||||||
static int xen_irq_info_common_setup(struct irq_info *info,
|
static int xen_irq_info_common_setup(struct irq_info *info,
|
||||||
unsigned irq,
|
|
||||||
enum xen_irq_type type,
|
enum xen_irq_type type,
|
||||||
evtchn_port_t evtchn,
|
evtchn_port_t evtchn,
|
||||||
unsigned short cpu)
|
unsigned short cpu)
|
||||||
|
@ -342,23 +341,22 @@ static int xen_irq_info_common_setup(struct irq_info *info,
|
||||||
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
|
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
|
||||||
raw_spin_lock_init(&info->lock);
|
raw_spin_lock_init(&info->lock);
|
||||||
|
|
||||||
ret = set_evtchn_to_irq(evtchn, irq);
|
ret = set_evtchn_to_irq(evtchn, info->irq);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
|
irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
|
||||||
|
|
||||||
return xen_evtchn_port_setup(evtchn);
|
return xen_evtchn_port_setup(evtchn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_irq_info_evtchn_setup(unsigned irq,
|
static int xen_irq_info_evtchn_setup(struct irq_info *info,
|
||||||
evtchn_port_t evtchn,
|
evtchn_port_t evtchn,
|
||||||
struct xenbus_device *dev)
|
struct xenbus_device *dev)
|
||||||
{
|
{
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
|
ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
|
||||||
info->u.interdomain = dev;
|
info->u.interdomain = dev;
|
||||||
if (dev)
|
if (dev)
|
||||||
atomic_inc(&dev->event_channels);
|
atomic_inc(&dev->event_channels);
|
||||||
|
@ -366,50 +364,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_irq_info_ipi_setup(unsigned cpu,
|
static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
|
||||||
unsigned irq,
|
evtchn_port_t evtchn, enum ipi_vector ipi)
|
||||||
evtchn_port_t evtchn,
|
|
||||||
enum ipi_vector ipi)
|
|
||||||
{
|
{
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
|
|
||||||
info->u.ipi = ipi;
|
info->u.ipi = ipi;
|
||||||
|
|
||||||
per_cpu(ipi_to_irq, cpu)[ipi] = irq;
|
per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
|
||||||
per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
|
per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
|
||||||
|
|
||||||
return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
|
return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_irq_info_virq_setup(unsigned cpu,
|
static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
|
||||||
unsigned irq,
|
evtchn_port_t evtchn, unsigned int virq)
|
||||||
evtchn_port_t evtchn,
|
|
||||||
unsigned virq)
|
|
||||||
{
|
{
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
|
|
||||||
info->u.virq = virq;
|
info->u.virq = virq;
|
||||||
|
|
||||||
per_cpu(virq_to_irq, cpu)[virq] = irq;
|
per_cpu(virq_to_irq, cpu)[virq] = info->irq;
|
||||||
|
|
||||||
return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
|
return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_irq_info_pirq_setup(unsigned irq,
|
static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
|
||||||
evtchn_port_t evtchn,
|
unsigned int pirq, unsigned int gsi,
|
||||||
unsigned pirq,
|
uint16_t domid, unsigned char flags)
|
||||||
unsigned gsi,
|
|
||||||
uint16_t domid,
|
|
||||||
unsigned char flags)
|
|
||||||
{
|
{
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
|
|
||||||
info->u.pirq.pirq = pirq;
|
info->u.pirq.pirq = pirq;
|
||||||
info->u.pirq.gsi = gsi;
|
info->u.pirq.gsi = gsi;
|
||||||
info->u.pirq.domid = domid;
|
info->u.pirq.domid = domid;
|
||||||
info->u.pirq.flags = flags;
|
info->u.pirq.flags = flags;
|
||||||
|
|
||||||
return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
|
return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_irq_info_cleanup(struct irq_info *info)
|
static void xen_irq_info_cleanup(struct irq_info *info)
|
||||||
|
@ -453,20 +438,16 @@ int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
|
||||||
return irq;
|
return irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum ipi_vector ipi_from_irq(unsigned irq)
|
static enum ipi_vector ipi_from_irq(struct irq_info *info)
|
||||||
{
|
{
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
|
|
||||||
BUG_ON(info == NULL);
|
BUG_ON(info == NULL);
|
||||||
BUG_ON(info->type != IRQT_IPI);
|
BUG_ON(info->type != IRQT_IPI);
|
||||||
|
|
||||||
return info->u.ipi;
|
return info->u.ipi;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned virq_from_irq(unsigned irq)
|
static unsigned int virq_from_irq(struct irq_info *info)
|
||||||
{
|
{
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
|
|
||||||
BUG_ON(info == NULL);
|
BUG_ON(info == NULL);
|
||||||
BUG_ON(info->type != IRQT_VIRQ);
|
BUG_ON(info->type != IRQT_VIRQ);
|
||||||
|
|
||||||
|
@ -533,13 +514,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
|
||||||
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
|
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
|
static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
|
||||||
bool force_affinity)
|
bool force_affinity)
|
||||||
{
|
{
|
||||||
struct irq_info *info = evtchn_to_info(evtchn);
|
|
||||||
|
|
||||||
BUG_ON(info == NULL);
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
|
if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
|
||||||
struct irq_data *data = irq_get_irq_data(info->irq);
|
struct irq_data *data = irq_get_irq_data(info->irq);
|
||||||
|
|
||||||
|
@ -547,7 +524,7 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
|
||||||
irq_data_update_effective_affinity(data, cpumask_of(cpu));
|
irq_data_update_effective_affinity(data, cpumask_of(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
|
xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
|
||||||
|
|
||||||
channels_on_cpu_dec(info);
|
channels_on_cpu_dec(info);
|
||||||
info->cpu = cpu;
|
info->cpu = cpu;
|
||||||
|
@ -762,23 +739,24 @@ static struct irq_info *xen_irq_init(unsigned int irq)
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __must_check xen_allocate_irq_dynamic(void)
|
static struct irq_info *xen_allocate_irq_dynamic(void)
|
||||||
{
|
{
|
||||||
int irq = irq_alloc_desc_from(0, -1);
|
int irq = irq_alloc_desc_from(0, -1);
|
||||||
|
struct irq_info *info = NULL;
|
||||||
|
|
||||||
if (irq >= 0) {
|
if (irq >= 0) {
|
||||||
if (!xen_irq_init(irq)) {
|
info = xen_irq_init(irq);
|
||||||
|
if (!info)
|
||||||
xen_irq_free_desc(irq);
|
xen_irq_free_desc(irq);
|
||||||
irq = -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return irq;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __must_check xen_allocate_irq_gsi(unsigned gsi)
|
static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
|
||||||
{
|
{
|
||||||
int irq;
|
int irq;
|
||||||
|
struct irq_info *info;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A PV guest has no concept of a GSI (since it has no ACPI
|
* A PV guest has no concept of a GSI (since it has no ACPI
|
||||||
|
@ -795,18 +773,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
|
||||||
else
|
else
|
||||||
irq = irq_alloc_desc_at(gsi, -1);
|
irq = irq_alloc_desc_at(gsi, -1);
|
||||||
|
|
||||||
if (!xen_irq_init(irq)) {
|
info = xen_irq_init(irq);
|
||||||
|
if (!info)
|
||||||
xen_irq_free_desc(irq);
|
xen_irq_free_desc(irq);
|
||||||
irq = -1;
|
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
return irq;
|
static void xen_free_irq(struct irq_info *info)
|
||||||
}
|
|
||||||
|
|
||||||
static void xen_free_irq(unsigned irq)
|
|
||||||
{
|
{
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
|
|
||||||
if (WARN_ON(!info))
|
if (WARN_ON(!info))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -897,7 +872,7 @@ static unsigned int __startup_pirq(unsigned int irq)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
info->evtchn = evtchn;
|
info->evtchn = evtchn;
|
||||||
bind_evtchn_to_cpu(evtchn, 0, false);
|
bind_evtchn_to_cpu(info, 0, false);
|
||||||
|
|
||||||
rc = xen_evtchn_port_setup(evtchn);
|
rc = xen_evtchn_port_setup(evtchn);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -963,10 +938,9 @@ int xen_irq_from_gsi(unsigned gsi)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
|
EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
|
||||||
|
|
||||||
static void __unbind_from_irq(unsigned int irq)
|
static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
|
||||||
{
|
{
|
||||||
evtchn_port_t evtchn = evtchn_from_irq(irq);
|
evtchn_port_t evtchn;
|
||||||
struct irq_info *info = info_for_irq(irq);
|
|
||||||
|
|
||||||
if (!info) {
|
if (!info) {
|
||||||
xen_irq_free_desc(irq);
|
xen_irq_free_desc(irq);
|
||||||
|
@ -979,6 +953,8 @@ static void __unbind_from_irq(unsigned int irq)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
evtchn = info->evtchn;
|
||||||
|
|
||||||
if (VALID_EVTCHN(evtchn)) {
|
if (VALID_EVTCHN(evtchn)) {
|
||||||
unsigned int cpu = info->cpu;
|
unsigned int cpu = info->cpu;
|
||||||
struct xenbus_device *dev;
|
struct xenbus_device *dev;
|
||||||
|
@ -988,11 +964,11 @@ static void __unbind_from_irq(unsigned int irq)
|
||||||
|
|
||||||
switch (info->type) {
|
switch (info->type) {
|
||||||
case IRQT_VIRQ:
|
case IRQT_VIRQ:
|
||||||
per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
|
per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
|
||||||
break;
|
break;
|
||||||
case IRQT_IPI:
|
case IRQT_IPI:
|
||||||
per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
|
per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
|
||||||
per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
|
per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
|
||||||
break;
|
break;
|
||||||
case IRQT_EVTCHN:
|
case IRQT_EVTCHN:
|
||||||
dev = info->u.interdomain;
|
dev = info->u.interdomain;
|
||||||
|
@ -1006,7 +982,7 @@ static void __unbind_from_irq(unsigned int irq)
|
||||||
xen_irq_info_cleanup(info);
|
xen_irq_info_cleanup(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
xen_free_irq(irq);
|
xen_free_irq(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1022,24 +998,24 @@ static void __unbind_from_irq(unsigned int irq)
|
||||||
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
|
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
|
||||||
unsigned pirq, int shareable, char *name)
|
unsigned pirq, int shareable, char *name)
|
||||||
{
|
{
|
||||||
int irq;
|
struct irq_info *info;
|
||||||
struct physdev_irq irq_op;
|
struct physdev_irq irq_op;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&irq_mapping_update_lock);
|
mutex_lock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
irq = xen_irq_from_gsi(gsi);
|
ret = xen_irq_from_gsi(gsi);
|
||||||
if (irq != -1) {
|
if (ret != -1) {
|
||||||
pr_info("%s: returning irq %d for gsi %u\n",
|
pr_info("%s: returning irq %d for gsi %u\n",
|
||||||
__func__, irq, gsi);
|
__func__, ret, gsi);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
irq = xen_allocate_irq_gsi(gsi);
|
info = xen_allocate_irq_gsi(gsi);
|
||||||
if (irq < 0)
|
if (!info)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
irq_op.irq = irq;
|
irq_op.irq = info->irq;
|
||||||
irq_op.vector = 0;
|
irq_op.vector = 0;
|
||||||
|
|
||||||
/* Only the privileged domain can do this. For non-priv, the pcifront
|
/* Only the privileged domain can do this. For non-priv, the pcifront
|
||||||
|
@ -1047,20 +1023,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
|
||||||
* this in the priv domain. */
|
* this in the priv domain. */
|
||||||
if (xen_initial_domain() &&
|
if (xen_initial_domain() &&
|
||||||
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
|
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
|
||||||
xen_free_irq(irq);
|
xen_free_irq(info);
|
||||||
irq = -ENOSPC;
|
ret = -ENOSPC;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
|
ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
|
||||||
shareable ? PIRQ_SHAREABLE : 0);
|
shareable ? PIRQ_SHAREABLE : 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
__unbind_from_irq(irq);
|
__unbind_from_irq(info, info->irq);
|
||||||
irq = ret;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
pirq_query_unmask(irq);
|
pirq_query_unmask(info->irq);
|
||||||
/* We try to use the handler with the appropriate semantic for the
|
/* We try to use the handler with the appropriate semantic for the
|
||||||
* type of interrupt: if the interrupt is an edge triggered
|
* type of interrupt: if the interrupt is an edge triggered
|
||||||
* interrupt we use handle_edge_irq.
|
* interrupt we use handle_edge_irq.
|
||||||
|
@ -1077,16 +1052,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
|
||||||
* is the right choice either way.
|
* is the right choice either way.
|
||||||
*/
|
*/
|
||||||
if (shareable)
|
if (shareable)
|
||||||
irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
|
irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
|
||||||
handle_fasteoi_irq, name);
|
handle_fasteoi_irq, name);
|
||||||
else
|
else
|
||||||
irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
|
irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
|
||||||
handle_edge_irq, name);
|
handle_edge_irq, name);
|
||||||
|
|
||||||
|
ret = info->irq;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
return irq;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PCI_MSI
|
#ifdef CONFIG_PCI_MSI
|
||||||
|
@ -1108,6 +1085,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
|
||||||
int pirq, int nvec, const char *name, domid_t domid)
|
int pirq, int nvec, const char *name, domid_t domid)
|
||||||
{
|
{
|
||||||
int i, irq, ret;
|
int i, irq, ret;
|
||||||
|
struct irq_info *info;
|
||||||
|
|
||||||
mutex_lock(&irq_mapping_update_lock);
|
mutex_lock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
|
@ -1116,12 +1094,13 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for (i = 0; i < nvec; i++) {
|
for (i = 0; i < nvec; i++) {
|
||||||
if (!xen_irq_init(irq + i))
|
info = xen_irq_init(irq + i);
|
||||||
|
if (!info)
|
||||||
goto error_irq;
|
goto error_irq;
|
||||||
|
|
||||||
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
|
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
|
||||||
|
|
||||||
ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
|
ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
|
||||||
i == 0 ? 0 : PIRQ_MSI_GROUP);
|
i == 0 ? 0 : PIRQ_MSI_GROUP);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error_irq;
|
goto error_irq;
|
||||||
|
@ -1133,9 +1112,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
return irq;
|
return irq;
|
||||||
|
|
||||||
error_irq:
|
error_irq:
|
||||||
while (nvec--)
|
while (nvec--) {
|
||||||
__unbind_from_irq(irq + nvec);
|
info = info_for_irq(irq + nvec);
|
||||||
|
__unbind_from_irq(info, irq + nvec);
|
||||||
|
}
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1171,7 +1153,7 @@ int xen_destroy_irq(int irq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
xen_free_irq(irq);
|
xen_free_irq(info);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
|
@ -1187,8 +1169,7 @@ EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
|
||||||
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
|
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
|
||||||
struct xenbus_device *dev)
|
struct xenbus_device *dev)
|
||||||
{
|
{
|
||||||
int irq;
|
int ret = -ENOMEM;
|
||||||
int ret;
|
|
||||||
struct irq_info *info;
|
struct irq_info *info;
|
||||||
|
|
||||||
if (evtchn >= xen_evtchn_max_channels())
|
if (evtchn >= xen_evtchn_max_channels())
|
||||||
|
@ -1199,17 +1180,16 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
|
||||||
info = evtchn_to_info(evtchn);
|
info = evtchn_to_info(evtchn);
|
||||||
|
|
||||||
if (!info) {
|
if (!info) {
|
||||||
irq = xen_allocate_irq_dynamic();
|
info = xen_allocate_irq_dynamic();
|
||||||
if (irq < 0)
|
if (!info)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
irq_set_chip_and_handler_name(irq, chip,
|
irq_set_chip_and_handler_name(info->irq, chip,
|
||||||
handle_edge_irq, "event");
|
handle_edge_irq, "event");
|
||||||
|
|
||||||
ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
|
ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
__unbind_from_irq(irq);
|
__unbind_from_irq(info, info->irq);
|
||||||
irq = ret;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -1219,17 +1199,17 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
|
||||||
* affinity setting is not invoked on them so nothing would
|
* affinity setting is not invoked on them so nothing would
|
||||||
* bind the channel.
|
* bind the channel.
|
||||||
*/
|
*/
|
||||||
bind_evtchn_to_cpu(evtchn, 0, false);
|
bind_evtchn_to_cpu(info, 0, false);
|
||||||
} else {
|
} else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
|
||||||
if (!WARN_ON(info->type != IRQT_EVTCHN))
|
|
||||||
info->refcnt++;
|
info->refcnt++;
|
||||||
irq = info->irq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = info->irq;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
return irq;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bind_evtchn_to_irq(evtchn_port_t evtchn)
|
int bind_evtchn_to_irq(evtchn_port_t evtchn)
|
||||||
|
@ -1248,18 +1228,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct evtchn_bind_ipi bind_ipi;
|
struct evtchn_bind_ipi bind_ipi;
|
||||||
evtchn_port_t evtchn;
|
evtchn_port_t evtchn;
|
||||||
int ret, irq;
|
struct irq_info *info;
|
||||||
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&irq_mapping_update_lock);
|
mutex_lock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
irq = per_cpu(ipi_to_irq, cpu)[ipi];
|
ret = per_cpu(ipi_to_irq, cpu)[ipi];
|
||||||
|
|
||||||
if (irq == -1) {
|
if (ret == -1) {
|
||||||
irq = xen_allocate_irq_dynamic();
|
info = xen_allocate_irq_dynamic();
|
||||||
if (irq < 0)
|
if (!info)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
|
irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
|
||||||
handle_percpu_irq, "ipi");
|
handle_percpu_irq, "ipi");
|
||||||
|
|
||||||
bind_ipi.vcpu = xen_vcpu_nr(cpu);
|
bind_ipi.vcpu = xen_vcpu_nr(cpu);
|
||||||
|
@ -1268,25 +1249,25 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
|
||||||
BUG();
|
BUG();
|
||||||
evtchn = bind_ipi.port;
|
evtchn = bind_ipi.port;
|
||||||
|
|
||||||
ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
|
ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
__unbind_from_irq(irq);
|
__unbind_from_irq(info, info->irq);
|
||||||
irq = ret;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Force the affinity mask to the target CPU so proc shows
|
* Force the affinity mask to the target CPU so proc shows
|
||||||
* the correct target.
|
* the correct target.
|
||||||
*/
|
*/
|
||||||
bind_evtchn_to_cpu(evtchn, cpu, true);
|
bind_evtchn_to_cpu(info, cpu, true);
|
||||||
|
ret = info->irq;
|
||||||
} else {
|
} else {
|
||||||
struct irq_info *info = info_for_irq(irq);
|
info = info_for_irq(ret);
|
||||||
WARN_ON(info == NULL || info->type != IRQT_IPI);
|
WARN_ON(info == NULL || info->type != IRQT_IPI);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
return irq;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
|
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
|
||||||
|
@ -1354,22 +1335,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
|
||||||
{
|
{
|
||||||
struct evtchn_bind_virq bind_virq;
|
struct evtchn_bind_virq bind_virq;
|
||||||
evtchn_port_t evtchn = 0;
|
evtchn_port_t evtchn = 0;
|
||||||
int irq, ret;
|
struct irq_info *info;
|
||||||
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&irq_mapping_update_lock);
|
mutex_lock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
irq = per_cpu(virq_to_irq, cpu)[virq];
|
ret = per_cpu(virq_to_irq, cpu)[virq];
|
||||||
|
|
||||||
if (irq == -1) {
|
if (ret == -1) {
|
||||||
irq = xen_allocate_irq_dynamic();
|
info = xen_allocate_irq_dynamic();
|
||||||
if (irq < 0)
|
if (!info)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (percpu)
|
if (percpu)
|
||||||
irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
|
irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
|
||||||
handle_percpu_irq, "virq");
|
handle_percpu_irq, "virq");
|
||||||
else
|
else
|
||||||
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
|
irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
|
||||||
handle_edge_irq, "virq");
|
handle_edge_irq, "virq");
|
||||||
|
|
||||||
bind_virq.virq = virq;
|
bind_virq.virq = virq;
|
||||||
|
@ -1384,10 +1366,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
|
||||||
BUG_ON(ret < 0);
|
BUG_ON(ret < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
|
ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
__unbind_from_irq(irq);
|
__unbind_from_irq(info, info->irq);
|
||||||
irq = ret;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1395,22 +1376,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
|
||||||
* Force the affinity mask for percpu interrupts so proc
|
* Force the affinity mask for percpu interrupts so proc
|
||||||
* shows the correct target.
|
* shows the correct target.
|
||||||
*/
|
*/
|
||||||
bind_evtchn_to_cpu(evtchn, cpu, percpu);
|
bind_evtchn_to_cpu(info, cpu, percpu);
|
||||||
|
ret = info->irq;
|
||||||
} else {
|
} else {
|
||||||
struct irq_info *info = info_for_irq(irq);
|
info = info_for_irq(ret);
|
||||||
WARN_ON(info == NULL || info->type != IRQT_VIRQ);
|
WARN_ON(info == NULL || info->type != IRQT_VIRQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
return irq;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unbind_from_irq(unsigned int irq)
|
static void unbind_from_irq(unsigned int irq)
|
||||||
{
|
{
|
||||||
|
struct irq_info *info;
|
||||||
|
|
||||||
mutex_lock(&irq_mapping_update_lock);
|
mutex_lock(&irq_mapping_update_lock);
|
||||||
__unbind_from_irq(irq);
|
info = info_for_irq(irq);
|
||||||
|
__unbind_from_irq(info, irq);
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1744,11 +1729,11 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
|
||||||
BUG_ON(info->type == IRQT_UNBOUND);
|
BUG_ON(info->type == IRQT_UNBOUND);
|
||||||
|
|
||||||
info->irq = irq;
|
info->irq = irq;
|
||||||
(void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
|
(void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
|
||||||
|
|
||||||
mutex_unlock(&irq_mapping_update_lock);
|
mutex_unlock(&irq_mapping_update_lock);
|
||||||
|
|
||||||
bind_evtchn_to_cpu(evtchn, info->cpu, false);
|
bind_evtchn_to_cpu(info, info->cpu, false);
|
||||||
|
|
||||||
/* Unmask the event channel. */
|
/* Unmask the event channel. */
|
||||||
enable_irq(irq);
|
enable_irq(irq);
|
||||||
|
@ -1782,7 +1767,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
|
||||||
* it, but don't do the xenlinux-level rebind in that case.
|
* it, but don't do the xenlinux-level rebind in that case.
|
||||||
*/
|
*/
|
||||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
|
||||||
bind_evtchn_to_cpu(evtchn, tcpu, false);
|
bind_evtchn_to_cpu(info, tcpu, false);
|
||||||
|
|
||||||
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
|
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
|
||||||
|
|
||||||
|
@ -1933,7 +1918,7 @@ static void restore_pirqs(void)
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
|
pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
|
||||||
gsi, irq, pirq, rc);
|
gsi, irq, pirq, rc);
|
||||||
xen_free_irq(irq);
|
xen_free_irq(info);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1947,13 +1932,15 @@ static void restore_cpu_virqs(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct evtchn_bind_virq bind_virq;
|
struct evtchn_bind_virq bind_virq;
|
||||||
evtchn_port_t evtchn;
|
evtchn_port_t evtchn;
|
||||||
|
struct irq_info *info;
|
||||||
int virq, irq;
|
int virq, irq;
|
||||||
|
|
||||||
for (virq = 0; virq < NR_VIRQS; virq++) {
|
for (virq = 0; virq < NR_VIRQS; virq++) {
|
||||||
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
|
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
|
||||||
continue;
|
continue;
|
||||||
|
info = info_for_irq(irq);
|
||||||
|
|
||||||
BUG_ON(virq_from_irq(irq) != virq);
|
BUG_ON(virq_from_irq(info) != virq);
|
||||||
|
|
||||||
/* Get a new binding from Xen. */
|
/* Get a new binding from Xen. */
|
||||||
bind_virq.virq = virq;
|
bind_virq.virq = virq;
|
||||||
|
@ -1964,9 +1951,9 @@ static void restore_cpu_virqs(unsigned int cpu)
|
||||||
evtchn = bind_virq.port;
|
evtchn = bind_virq.port;
|
||||||
|
|
||||||
/* Record the new mapping. */
|
/* Record the new mapping. */
|
||||||
(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
|
xen_irq_info_virq_setup(info, cpu, evtchn, virq);
|
||||||
/* The affinity mask is still valid */
|
/* The affinity mask is still valid */
|
||||||
bind_evtchn_to_cpu(evtchn, cpu, false);
|
bind_evtchn_to_cpu(info, cpu, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1974,13 +1961,15 @@ static void restore_cpu_ipis(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct evtchn_bind_ipi bind_ipi;
|
struct evtchn_bind_ipi bind_ipi;
|
||||||
evtchn_port_t evtchn;
|
evtchn_port_t evtchn;
|
||||||
|
struct irq_info *info;
|
||||||
int ipi, irq;
|
int ipi, irq;
|
||||||
|
|
||||||
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
|
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
|
||||||
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
|
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
|
||||||
continue;
|
continue;
|
||||||
|
info = info_for_irq(irq);
|
||||||
|
|
||||||
BUG_ON(ipi_from_irq(irq) != ipi);
|
BUG_ON(ipi_from_irq(info) != ipi);
|
||||||
|
|
||||||
/* Get a new binding from Xen. */
|
/* Get a new binding from Xen. */
|
||||||
bind_ipi.vcpu = xen_vcpu_nr(cpu);
|
bind_ipi.vcpu = xen_vcpu_nr(cpu);
|
||||||
|
@ -1990,9 +1979,9 @@ static void restore_cpu_ipis(unsigned int cpu)
|
||||||
evtchn = bind_ipi.port;
|
evtchn = bind_ipi.port;
|
||||||
|
|
||||||
/* Record the new mapping. */
|
/* Record the new mapping. */
|
||||||
(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
|
xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
|
||||||
/* The affinity mask is still valid */
|
/* The affinity mask is still valid */
|
||||||
bind_evtchn_to_cpu(evtchn, cpu, false);
|
bind_evtchn_to_cpu(info, cpu, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue