KVM: Let ioapic know the irq line status
Userspace may deliver RTC interrupt without query the status. So we want to track RTC EOI for this case. Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
f3bff6318f
commit
aa2fbe6d44
9 changed files with 54 additions and 37 deletions
|
@ -290,8 +290,8 @@ static void pit_do_work(struct kthread_work *work)
|
||||||
}
|
}
|
||||||
spin_unlock(&ps->inject_lock);
|
spin_unlock(&ps->inject_lock);
|
||||||
if (inject) {
|
if (inject) {
|
||||||
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
|
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
|
||||||
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
|
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Provides NMI watchdog support via Virtual Wire mode.
|
* Provides NMI watchdog support via Virtual Wire mode.
|
||||||
|
|
|
@ -3491,13 +3491,15 @@ out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
|
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
|
||||||
|
bool line_status)
|
||||||
{
|
{
|
||||||
if (!irqchip_in_kernel(kvm))
|
if (!irqchip_in_kernel(kvm))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
|
irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
|
||||||
irq_event->irq, irq_event->level);
|
irq_event->irq, irq_event->level,
|
||||||
|
line_status);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -292,7 +292,8 @@ struct kvm_kernel_irq_routing_entry {
|
||||||
u32 gsi;
|
u32 gsi;
|
||||||
u32 type;
|
u32 type;
|
||||||
int (*set)(struct kvm_kernel_irq_routing_entry *e,
|
int (*set)(struct kvm_kernel_irq_routing_entry *e,
|
||||||
struct kvm *kvm, int irq_source_id, int level);
|
struct kvm *kvm, int irq_source_id, int level,
|
||||||
|
bool line_status);
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
unsigned irqchip;
|
unsigned irqchip;
|
||||||
|
@ -591,7 +592,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||||
|
|
||||||
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
|
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem);
|
struct kvm_userspace_memory_region *mem);
|
||||||
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
|
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
|
||||||
|
bool line_status);
|
||||||
long kvm_arch_vm_ioctl(struct file *filp,
|
long kvm_arch_vm_ioctl(struct file *filp,
|
||||||
unsigned int ioctl, unsigned long arg);
|
unsigned int ioctl, unsigned long arg);
|
||||||
|
|
||||||
|
@ -722,10 +724,11 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
||||||
union kvm_ioapic_redirect_entry *entry,
|
union kvm_ioapic_redirect_entry *entry,
|
||||||
unsigned long *deliver_bitmask);
|
unsigned long *deliver_bitmask);
|
||||||
#endif
|
#endif
|
||||||
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
|
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
|
||||||
|
bool line_status);
|
||||||
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
|
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
|
||||||
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
|
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
|
||||||
int irq_source_id, int level);
|
int irq_source_id, int level, bool line_status);
|
||||||
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
|
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
|
||||||
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
|
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
|
||||||
void kvm_register_irq_ack_notifier(struct kvm *kvm,
|
void kvm_register_irq_ack_notifier(struct kvm *kvm,
|
||||||
|
|
|
@ -80,11 +80,12 @@ kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
|
||||||
spin_lock(&assigned_dev->intx_mask_lock);
|
spin_lock(&assigned_dev->intx_mask_lock);
|
||||||
if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
|
if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
|
||||||
kvm_set_irq(assigned_dev->kvm,
|
kvm_set_irq(assigned_dev->kvm,
|
||||||
assigned_dev->irq_source_id, vector, 1);
|
assigned_dev->irq_source_id, vector, 1,
|
||||||
|
false);
|
||||||
spin_unlock(&assigned_dev->intx_mask_lock);
|
spin_unlock(&assigned_dev->intx_mask_lock);
|
||||||
} else
|
} else
|
||||||
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
|
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
|
||||||
vector, 1);
|
vector, 1, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
|
static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
|
||||||
|
@ -165,7 +166,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
|
||||||
container_of(kian, struct kvm_assigned_dev_kernel,
|
container_of(kian, struct kvm_assigned_dev_kernel,
|
||||||
ack_notifier);
|
ack_notifier);
|
||||||
|
|
||||||
kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
|
kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false);
|
||||||
|
|
||||||
spin_lock(&dev->intx_mask_lock);
|
spin_lock(&dev->intx_mask_lock);
|
||||||
|
|
||||||
|
@ -188,7 +189,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
|
||||||
|
|
||||||
if (reassert)
|
if (reassert)
|
||||||
kvm_set_irq(dev->kvm, dev->irq_source_id,
|
kvm_set_irq(dev->kvm, dev->irq_source_id,
|
||||||
dev->guest_irq, 1);
|
dev->guest_irq, 1, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&dev->intx_mask_lock);
|
spin_unlock(&dev->intx_mask_lock);
|
||||||
|
@ -202,7 +203,7 @@ static void deassign_guest_irq(struct kvm *kvm,
|
||||||
&assigned_dev->ack_notifier);
|
&assigned_dev->ack_notifier);
|
||||||
|
|
||||||
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
|
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
|
||||||
assigned_dev->guest_irq, 0);
|
assigned_dev->guest_irq, 0, false);
|
||||||
|
|
||||||
if (assigned_dev->irq_source_id != -1)
|
if (assigned_dev->irq_source_id != -1)
|
||||||
kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
|
kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
|
||||||
|
@ -901,7 +902,7 @@ static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
|
||||||
if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
|
if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
|
||||||
if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
|
if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
|
||||||
kvm_set_irq(match->kvm, match->irq_source_id,
|
kvm_set_irq(match->kvm, match->irq_source_id,
|
||||||
match->guest_irq, 0);
|
match->guest_irq, 0, false);
|
||||||
/*
|
/*
|
||||||
* Masking at hardware-level is performed on demand,
|
* Masking at hardware-level is performed on demand,
|
||||||
* i.e. when an IRQ actually arrives at the host.
|
* i.e. when an IRQ actually arrives at the host.
|
||||||
|
|
|
@ -100,11 +100,13 @@ irqfd_inject(struct work_struct *work)
|
||||||
struct kvm *kvm = irqfd->kvm;
|
struct kvm *kvm = irqfd->kvm;
|
||||||
|
|
||||||
if (!irqfd->resampler) {
|
if (!irqfd->resampler) {
|
||||||
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
|
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
|
||||||
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
|
false);
|
||||||
|
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
|
||||||
|
false);
|
||||||
} else
|
} else
|
||||||
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
||||||
irqfd->gsi, 1);
|
irqfd->gsi, 1, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -121,7 +123,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
|
||||||
resampler = container_of(kian, struct _irqfd_resampler, notifier);
|
resampler = container_of(kian, struct _irqfd_resampler, notifier);
|
||||||
|
|
||||||
kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
||||||
resampler->notifier.gsi, 0);
|
resampler->notifier.gsi, 0, false);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
|
@ -146,7 +148,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
|
||||||
list_del(&resampler->link);
|
list_del(&resampler->link);
|
||||||
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
|
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
|
||||||
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
|
||||||
resampler->notifier.gsi, 0);
|
resampler->notifier.gsi, 0, false);
|
||||||
kfree(resampler);
|
kfree(resampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,7 +227,8 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||||
irq = rcu_dereference(irqfd->irq_entry);
|
irq = rcu_dereference(irqfd->irq_entry);
|
||||||
/* An event has been signaled, inject an interrupt */
|
/* An event has been signaled, inject an interrupt */
|
||||||
if (irq)
|
if (irq)
|
||||||
kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
|
kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
|
||||||
|
false);
|
||||||
else
|
else
|
||||||
schedule_work(&irqfd->inject);
|
schedule_work(&irqfd->inject);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
|
@ -50,7 +50,8 @@
|
||||||
#else
|
#else
|
||||||
#define ioapic_debug(fmt, arg...)
|
#define ioapic_debug(fmt, arg...)
|
||||||
#endif
|
#endif
|
||||||
static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
|
static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq,
|
||||||
|
bool line_status);
|
||||||
|
|
||||||
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
|
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
|
||||||
unsigned long addr,
|
unsigned long addr,
|
||||||
|
@ -146,7 +147,8 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
|
||||||
__rtc_irq_eoi_tracking_restore_one(vcpu);
|
__rtc_irq_eoi_tracking_restore_one(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
|
static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx,
|
||||||
|
bool line_status)
|
||||||
{
|
{
|
||||||
union kvm_ioapic_redirect_entry *pent;
|
union kvm_ioapic_redirect_entry *pent;
|
||||||
int injected = -1;
|
int injected = -1;
|
||||||
|
@ -154,7 +156,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
|
||||||
pent = &ioapic->redirtbl[idx];
|
pent = &ioapic->redirtbl[idx];
|
||||||
|
|
||||||
if (!pent->fields.mask) {
|
if (!pent->fields.mask) {
|
||||||
injected = ioapic_deliver(ioapic, idx);
|
injected = ioapic_deliver(ioapic, idx, line_status);
|
||||||
if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
|
if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
|
||||||
pent->fields.remote_irr = 1;
|
pent->fields.remote_irr = 1;
|
||||||
}
|
}
|
||||||
|
@ -248,13 +250,13 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||||
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
|
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
|
||||||
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
|
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
|
||||||
&& ioapic->irr & (1 << index))
|
&& ioapic->irr & (1 << index))
|
||||||
ioapic_service(ioapic, index);
|
ioapic_service(ioapic, index, false);
|
||||||
kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
|
kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
|
||||||
{
|
{
|
||||||
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
|
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
|
||||||
struct kvm_lapic_irq irqe;
|
struct kvm_lapic_irq irqe;
|
||||||
|
@ -277,7 +279,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
||||||
int level)
|
int level, bool line_status)
|
||||||
{
|
{
|
||||||
u32 old_irr;
|
u32 old_irr;
|
||||||
u32 mask = 1 << irq;
|
u32 mask = 1 << irq;
|
||||||
|
@ -300,7 +302,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
||||||
ioapic->irr |= mask;
|
ioapic->irr |= mask;
|
||||||
if ((edge && old_irr != ioapic->irr) ||
|
if ((edge && old_irr != ioapic->irr) ||
|
||||||
(!edge && !entry.fields.remote_irr))
|
(!edge && !entry.fields.remote_irr))
|
||||||
ret = ioapic_service(ioapic, irq);
|
ret = ioapic_service(ioapic, irq, line_status);
|
||||||
else
|
else
|
||||||
ret = 0; /* report coalesced interrupt */
|
ret = 0; /* report coalesced interrupt */
|
||||||
}
|
}
|
||||||
|
@ -349,7 +351,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
|
||||||
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
|
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
|
||||||
ent->fields.remote_irr = 0;
|
ent->fields.remote_irr = 0;
|
||||||
if (!ent->fields.mask && (ioapic->irr & (1 << i)))
|
if (!ent->fields.mask && (ioapic->irr & (1 << i)))
|
||||||
ioapic_service(ioapic, i);
|
ioapic_service(ioapic, i, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
|
||||||
int kvm_ioapic_init(struct kvm *kvm);
|
int kvm_ioapic_init(struct kvm *kvm);
|
||||||
void kvm_ioapic_destroy(struct kvm *kvm);
|
void kvm_ioapic_destroy(struct kvm *kvm);
|
||||||
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
||||||
int level);
|
int level, bool line_status);
|
||||||
void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
|
void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
|
||||||
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
|
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
|
||||||
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
|
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
|
||||||
|
|
|
@ -35,7 +35,8 @@
|
||||||
#include "ioapic.h"
|
#include "ioapic.h"
|
||||||
|
|
||||||
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
|
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
|
||||||
struct kvm *kvm, int irq_source_id, int level)
|
struct kvm *kvm, int irq_source_id, int level,
|
||||||
|
bool line_status)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
struct kvm_pic *pic = pic_irqchip(kvm);
|
struct kvm_pic *pic = pic_irqchip(kvm);
|
||||||
|
@ -46,10 +47,12 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
|
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
|
||||||
struct kvm *kvm, int irq_source_id, int level)
|
struct kvm *kvm, int irq_source_id, int level,
|
||||||
|
bool line_status)
|
||||||
{
|
{
|
||||||
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
|
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
|
||||||
return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level);
|
return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
|
||||||
|
line_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
|
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
|
||||||
|
@ -121,7 +124,7 @@ static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
||||||
struct kvm *kvm, int irq_source_id, int level)
|
struct kvm *kvm, int irq_source_id, int level, bool line_status)
|
||||||
{
|
{
|
||||||
struct kvm_lapic_irq irq;
|
struct kvm_lapic_irq irq;
|
||||||
|
|
||||||
|
@ -159,7 +162,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
|
||||||
route.msi.address_hi = msi->address_hi;
|
route.msi.address_hi = msi->address_hi;
|
||||||
route.msi.data = msi->data;
|
route.msi.data = msi->data;
|
||||||
|
|
||||||
return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
|
return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -168,7 +171,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
|
||||||
* = 0 Interrupt was coalesced (previous irq is still pending)
|
* = 0 Interrupt was coalesced (previous irq is still pending)
|
||||||
* > 0 Number of CPUs interrupt was delivered to
|
* > 0 Number of CPUs interrupt was delivered to
|
||||||
*/
|
*/
|
||||||
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
|
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
|
||||||
|
bool line_status)
|
||||||
{
|
{
|
||||||
struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
|
struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
|
||||||
int ret = -1, i = 0;
|
int ret = -1, i = 0;
|
||||||
|
@ -189,7 +193,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
|
||||||
|
|
||||||
while(i--) {
|
while(i--) {
|
||||||
int r;
|
int r;
|
||||||
r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
|
r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
|
||||||
|
line_status);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -2258,7 +2258,8 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||||
if (copy_from_user(&irq_event, argp, sizeof irq_event))
|
if (copy_from_user(&irq_event, argp, sizeof irq_event))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
r = kvm_vm_ioctl_irq_line(kvm, &irq_event);
|
r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
|
||||||
|
ioctl == KVM_IRQ_LINE_STATUS);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue