KVM: SVM: Disable preemption across AVIC load/put during APICv refresh
Disable preemption when loading/putting the AVIC during an APICv refresh.
If the vCPU task is preempted and migrated ot a different pCPU, the
unprotected avic_vcpu_load() could set the wrong pCPU in the physical ID
cache/table.
Pull the necessary code out of avic_vcpu_{,un}blocking() and into a new
helper to reduce the probability of introducing this exact bug a third
time.
Fixes: df7e4827c5
("KVM: SVM: call avic_vcpu_load/avic_vcpu_put when enabling/disabling AVIC")
Cc: stable@vger.kernel.org
Reported-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
aa9f58415a
commit
b652de1e3d
3 changed files with 59 additions and 50 deletions
|
@ -616,38 +616,6 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
|
||||||
struct vmcb *vmcb = svm->vmcb01.ptr;
|
|
||||||
bool activated = kvm_vcpu_apicv_active(vcpu);
|
|
||||||
|
|
||||||
if (!enable_apicv)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (activated) {
|
|
||||||
/**
|
|
||||||
* During AVIC temporary deactivation, guest could update
|
|
||||||
* APIC ID, DFR and LDR registers, which would not be trapped
|
|
||||||
* by avic_unaccelerated_access_interception(). In this case,
|
|
||||||
* we need to check and update the AVIC logical APIC ID table
|
|
||||||
* accordingly before re-activating.
|
|
||||||
*/
|
|
||||||
avic_apicv_post_state_restore(vcpu);
|
|
||||||
vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
|
|
||||||
} else {
|
|
||||||
vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
|
|
||||||
}
|
|
||||||
vmcb_mark_dirty(vmcb, VMCB_AVIC);
|
|
||||||
|
|
||||||
if (activated)
|
|
||||||
avic_vcpu_load(vcpu, vcpu->cpu);
|
|
||||||
else
|
|
||||||
avic_vcpu_put(vcpu);
|
|
||||||
|
|
||||||
avic_set_pi_irte_mode(vcpu, activated);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -899,7 +867,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
{
|
{
|
||||||
u64 entry;
|
u64 entry;
|
||||||
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
|
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
|
||||||
|
@ -936,7 +904,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
|
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
void __avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u64 entry;
|
u64 entry;
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
@ -955,13 +923,63 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
|
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void avic_vcpu_load(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
int cpu = get_cpu();
|
||||||
|
|
||||||
|
WARN_ON(cpu != vcpu->cpu);
|
||||||
|
|
||||||
|
__avic_vcpu_load(vcpu, cpu);
|
||||||
|
|
||||||
|
put_cpu();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
|
__avic_vcpu_put(vcpu);
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
struct vmcb *vmcb = svm->vmcb01.ptr;
|
||||||
|
bool activated = kvm_vcpu_apicv_active(vcpu);
|
||||||
|
|
||||||
|
if (!enable_apicv)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (activated) {
|
||||||
|
/**
|
||||||
|
* During AVIC temporary deactivation, guest could update
|
||||||
|
* APIC ID, DFR and LDR registers, which would not be trapped
|
||||||
|
* by avic_unaccelerated_access_interception(). In this case,
|
||||||
|
* we need to check and update the AVIC logical APIC ID table
|
||||||
|
* accordingly before re-activating.
|
||||||
|
*/
|
||||||
|
avic_apicv_post_state_restore(vcpu);
|
||||||
|
vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
|
||||||
|
} else {
|
||||||
|
vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
|
||||||
|
}
|
||||||
|
vmcb_mark_dirty(vmcb, VMCB_AVIC);
|
||||||
|
|
||||||
|
if (activated)
|
||||||
|
avic_vcpu_load(vcpu);
|
||||||
|
else
|
||||||
|
avic_vcpu_put(vcpu);
|
||||||
|
|
||||||
|
avic_set_pi_irte_mode(vcpu, activated);
|
||||||
|
}
|
||||||
|
|
||||||
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
|
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (!kvm_vcpu_apicv_active(vcpu))
|
if (!kvm_vcpu_apicv_active(vcpu))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unload the AVIC when the vCPU is about to block, _before_
|
* Unload the AVIC when the vCPU is about to block, _before_
|
||||||
* the vCPU actually blocks.
|
* the vCPU actually blocks.
|
||||||
|
@ -976,21 +994,12 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||||
* the cause of errata #1235).
|
* the cause of errata #1235).
|
||||||
*/
|
*/
|
||||||
avic_vcpu_put(vcpu);
|
avic_vcpu_put(vcpu);
|
||||||
|
|
||||||
preempt_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int cpu;
|
|
||||||
|
|
||||||
if (!kvm_vcpu_apicv_active(vcpu))
|
if (!kvm_vcpu_apicv_active(vcpu))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpu = get_cpu();
|
avic_vcpu_load(vcpu);
|
||||||
WARN_ON(cpu != vcpu->cpu);
|
|
||||||
|
|
||||||
avic_vcpu_load(vcpu, cpu);
|
|
||||||
|
|
||||||
put_cpu();
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1318,13 +1318,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
indirect_branch_prediction_barrier();
|
indirect_branch_prediction_barrier();
|
||||||
}
|
}
|
||||||
if (kvm_vcpu_apicv_active(vcpu))
|
if (kvm_vcpu_apicv_active(vcpu))
|
||||||
avic_vcpu_load(vcpu, cpu);
|
__avic_vcpu_load(vcpu, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
|
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (kvm_vcpu_apicv_active(vcpu))
|
if (kvm_vcpu_apicv_active(vcpu))
|
||||||
avic_vcpu_put(vcpu);
|
__avic_vcpu_put(vcpu);
|
||||||
|
|
||||||
svm_prepare_host_switch(vcpu);
|
svm_prepare_host_switch(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -576,8 +576,8 @@ void avic_init_vmcb(struct vcpu_svm *svm);
|
||||||
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
|
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
|
||||||
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
|
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
|
||||||
int avic_init_vcpu(struct vcpu_svm *svm);
|
int avic_init_vcpu(struct vcpu_svm *svm);
|
||||||
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||||
void avic_vcpu_put(struct kvm_vcpu *vcpu);
|
void __avic_vcpu_put(struct kvm_vcpu *vcpu);
|
||||||
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
|
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
|
||||||
void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
||||||
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
|
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
|
||||||
|
|
Loading…
Add table
Reference in a new issue