KVM: x86/mmu: Handle no-slot faults at the beginning of kvm_faultin_pfn()
Handle the "no memslot" case at the beginning of kvm_faultin_pfn(), just after the private versus shared check, so that there's no need to repeatedly query whether or not a slot exists. This also makes it more obvious that, except for private vs. shared attributes, the process of faulting in a pfn simply doesn't apply to gfns without a slot. Opportunistically stuff @fault's metadata in kvm_handle_noslot_fault() so that it doesn't need to be duplicated in all paths that invoke kvm_handle_noslot_fault(), and to minimize the probability of not stuffing the right fields. Leave the existing handle behind, but convert it to a WARN, to guard against __kvm_faultin_pfn() unexpectedly nullifying fault->slot. Cc: David Matlack <dmatlack@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Kai Huang <kai.huang@intel.com> Message-ID: <20240228024147.41573-14-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
cd272fc439
commit
f6adeae81f
1 changed files with 17 additions and 12 deletions
|
@ -3269,6 +3269,10 @@ static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
|
||||||
vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
|
vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
|
||||||
access & shadow_mmio_access_mask);
|
access & shadow_mmio_access_mask);
|
||||||
|
|
||||||
|
fault->slot = NULL;
|
||||||
|
fault->pfn = KVM_PFN_NOSLOT;
|
||||||
|
fault->map_writable = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If MMIO caching is disabled, emulate immediately without
|
* If MMIO caching is disabled, emulate immediately without
|
||||||
* touching the shadow page tables as attempting to install an
|
* touching the shadow page tables as attempting to install an
|
||||||
|
@ -4349,15 +4353,18 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(!slot))
|
||||||
|
return kvm_handle_noslot_fault(vcpu, fault, access);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Retry the page fault if the gfn hit a memslot that is being deleted
|
* Retry the page fault if the gfn hit a memslot that is being deleted
|
||||||
* or moved. This ensures any existing SPTEs for the old memslot will
|
* or moved. This ensures any existing SPTEs for the old memslot will
|
||||||
* be zapped before KVM inserts a new MMIO SPTE for the gfn.
|
* be zapped before KVM inserts a new MMIO SPTE for the gfn.
|
||||||
*/
|
*/
|
||||||
if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
|
if (slot->flags & KVM_MEMSLOT_INVALID)
|
||||||
return RET_PF_RETRY;
|
return RET_PF_RETRY;
|
||||||
|
|
||||||
if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
|
if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
|
||||||
/*
|
/*
|
||||||
* Don't map L1's APIC access page into L2, KVM doesn't support
|
* Don't map L1's APIC access page into L2, KVM doesn't support
|
||||||
* using APICv/AVIC to accelerate L2 accesses to L1's APIC,
|
* using APICv/AVIC to accelerate L2 accesses to L1's APIC,
|
||||||
|
@ -4369,12 +4376,9 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||||
* uses different roots for L1 vs. L2, i.e. there is no danger
|
* uses different roots for L1 vs. L2, i.e. there is no danger
|
||||||
* of breaking APICv/AVIC for L1.
|
* of breaking APICv/AVIC for L1.
|
||||||
*/
|
*/
|
||||||
if (is_guest_mode(vcpu)) {
|
if (is_guest_mode(vcpu))
|
||||||
fault->slot = NULL;
|
return kvm_handle_noslot_fault(vcpu, fault, access);
|
||||||
fault->pfn = KVM_PFN_NOSLOT;
|
|
||||||
fault->map_writable = false;
|
|
||||||
goto faultin_done;
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* If the APIC access page exists but is disabled, go directly
|
* If the APIC access page exists but is disabled, go directly
|
||||||
* to emulation without caching the MMIO access or creating a
|
* to emulation without caching the MMIO access or creating a
|
||||||
|
@ -4385,6 +4389,9 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||||
return RET_PF_EMULATE;
|
return RET_PF_EMULATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||||
|
smp_rmb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for a relevant mmu_notifier invalidation event before getting
|
* Check for a relevant mmu_notifier invalidation event before getting
|
||||||
* the pfn from the primary MMU, and before acquiring mmu_lock.
|
* the pfn from the primary MMU, and before acquiring mmu_lock.
|
||||||
|
@ -4406,19 +4413,17 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||||
* *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
|
* *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
|
||||||
* to detect retry guarantees the worst case latency for the vCPU.
|
* to detect retry guarantees the worst case latency for the vCPU.
|
||||||
*/
|
*/
|
||||||
if (fault->slot &&
|
if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
|
||||||
mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
|
|
||||||
return RET_PF_RETRY;
|
return RET_PF_RETRY;
|
||||||
|
|
||||||
ret = __kvm_faultin_pfn(vcpu, fault);
|
ret = __kvm_faultin_pfn(vcpu, fault);
|
||||||
if (ret != RET_PF_CONTINUE)
|
if (ret != RET_PF_CONTINUE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
faultin_done:
|
|
||||||
if (unlikely(is_error_pfn(fault->pfn)))
|
if (unlikely(is_error_pfn(fault->pfn)))
|
||||||
return kvm_handle_error_pfn(vcpu, fault);
|
return kvm_handle_error_pfn(vcpu, fault);
|
||||||
|
|
||||||
if (unlikely(!fault->slot))
|
if (WARN_ON_ONCE(!fault->slot))
|
||||||
return kvm_handle_noslot_fault(vcpu, fault, access);
|
return kvm_handle_noslot_fault(vcpu, fault, access);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Reference in a new issue