KVM: VMX: Fold Hyper-V EPTP checking into it's only caller
Fold check_ept_pointer_match() into hv_remote_flush_tlb_with_range() in preparation for combining the kvm_for_each_vcpu loops of the ==CHECK and !=MATCH statements. No functional change intended. Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210305183123.3978098-5-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b68aa15cca
commit
288bee2809
1 changed files with 20 additions and 24 deletions
|
@ -472,28 +472,6 @@ static const u32 vmx_uret_msrs_list[] = {
|
||||||
static bool __read_mostly enlightened_vmcs = true;
|
static bool __read_mostly enlightened_vmcs = true;
|
||||||
module_param(enlightened_vmcs, bool, 0444);
|
module_param(enlightened_vmcs, bool, 0444);
|
||||||
|
|
||||||
/* check_ept_pointer() should be under protection of ept_pointer_lock. */
|
|
||||||
static void check_ept_pointer_match(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
u64 tmp_eptp = INVALID_PAGE;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
||||||
if (!VALID_PAGE(tmp_eptp)) {
|
|
||||||
tmp_eptp = to_vmx(vcpu)->ept_pointer;
|
|
||||||
} else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
|
|
||||||
to_kvm_vmx(kvm)->hv_tlb_eptp = INVALID_PAGE;
|
|
||||||
to_kvm_vmx(kvm)->ept_pointers_match
|
|
||||||
= EPT_POINTERS_MISMATCH;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
to_kvm_vmx(kvm)->hv_tlb_eptp = tmp_eptp;
|
|
||||||
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
|
static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
|
@ -523,11 +501,29 @@ static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
|
||||||
struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
|
struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
int ret = 0, i;
|
int ret = 0, i;
|
||||||
|
u64 tmp_eptp;
|
||||||
|
|
||||||
spin_lock(&kvm_vmx->ept_pointer_lock);
|
spin_lock(&kvm_vmx->ept_pointer_lock);
|
||||||
|
|
||||||
if (kvm_vmx->ept_pointers_match == EPT_POINTERS_CHECK)
|
if (kvm_vmx->ept_pointers_match == EPT_POINTERS_CHECK) {
|
||||||
check_ept_pointer_match(kvm);
|
kvm_vmx->ept_pointers_match = EPT_POINTERS_MATCH;
|
||||||
|
kvm_vmx->hv_tlb_eptp = INVALID_PAGE;
|
||||||
|
|
||||||
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
|
tmp_eptp = to_vmx(vcpu)->ept_pointer;
|
||||||
|
if (!VALID_PAGE(tmp_eptp))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!VALID_PAGE(kvm_vmx->hv_tlb_eptp)) {
|
||||||
|
kvm_vmx->hv_tlb_eptp = tmp_eptp;
|
||||||
|
} else if (kvm_vmx->hv_tlb_eptp != tmp_eptp) {
|
||||||
|
kvm_vmx->hv_tlb_eptp = INVALID_PAGE;
|
||||||
|
kvm_vmx->ept_pointers_match
|
||||||
|
= EPT_POINTERS_MISMATCH;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (kvm_vmx->ept_pointers_match != EPT_POINTERS_MATCH) {
|
if (kvm_vmx->ept_pointers_match != EPT_POINTERS_MATCH) {
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue