KVM: VMX: Move MSR_KERNEL_GS_BASE out of the vmx autoload msr area
Currently MSR_KERNEL_GS_BASE is saved and restored as part of the guest/host msr reloading. Since we wish to lazy-restore all the other msrs, save and reload MSR_KERNEL_GS_BASE explicitly instead of using the common code. Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
3ce672d484
commit
44ea2b1758
1 changed files with 26 additions and 13 deletions
|
@ -99,7 +99,8 @@ struct vcpu_vmx {
|
||||||
int save_nmsrs;
|
int save_nmsrs;
|
||||||
int msr_offset_efer;
|
int msr_offset_efer;
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
int msr_offset_kernel_gs_base;
|
u64 msr_host_kernel_gs_base;
|
||||||
|
u64 msr_guest_kernel_gs_base;
|
||||||
#endif
|
#endif
|
||||||
struct vmcs *vmcs;
|
struct vmcs *vmcs;
|
||||||
struct {
|
struct {
|
||||||
|
@ -202,7 +203,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
|
||||||
*/
|
*/
|
||||||
static const u32 vmx_msr_index[] = {
|
static const u32 vmx_msr_index[] = {
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
|
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
|
||||||
#endif
|
#endif
|
||||||
MSR_EFER, MSR_K6_STAR,
|
MSR_EFER, MSR_K6_STAR,
|
||||||
};
|
};
|
||||||
|
@ -674,10 +675,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (is_long_mode(&vmx->vcpu))
|
if (is_long_mode(&vmx->vcpu)) {
|
||||||
save_msrs(vmx->host_msrs +
|
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
|
||||||
vmx->msr_offset_kernel_gs_base, 1);
|
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
|
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
|
||||||
load_transition_efer(vmx);
|
load_transition_efer(vmx);
|
||||||
|
@ -711,6 +712,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
||||||
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
|
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
|
||||||
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
|
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
|
||||||
reload_host_efer(vmx);
|
reload_host_efer(vmx);
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
if (is_long_mode(&vmx->vcpu)) {
|
||||||
|
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||||
|
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_load_host_state(struct vcpu_vmx *vmx)
|
static void vmx_load_host_state(struct vcpu_vmx *vmx)
|
||||||
|
@ -938,9 +945,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
||||||
if (index >= 0)
|
if (index >= 0)
|
||||||
move_msr_up(vmx, index, save_nmsrs++);
|
move_msr_up(vmx, index, save_nmsrs++);
|
||||||
index = __find_msr_index(vmx, MSR_CSTAR);
|
index = __find_msr_index(vmx, MSR_CSTAR);
|
||||||
if (index >= 0)
|
|
||||||
move_msr_up(vmx, index, save_nmsrs++);
|
|
||||||
index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
|
|
||||||
if (index >= 0)
|
if (index >= 0)
|
||||||
move_msr_up(vmx, index, save_nmsrs++);
|
move_msr_up(vmx, index, save_nmsrs++);
|
||||||
/*
|
/*
|
||||||
|
@ -954,10 +958,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
||||||
#endif
|
#endif
|
||||||
vmx->save_nmsrs = save_nmsrs;
|
vmx->save_nmsrs = save_nmsrs;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
vmx->msr_offset_kernel_gs_base =
|
|
||||||
__find_msr_index(vmx, MSR_KERNEL_GS_BASE);
|
|
||||||
#endif
|
|
||||||
vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
|
vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
|
||||||
|
|
||||||
if (cpu_has_vmx_msr_bitmap()) {
|
if (cpu_has_vmx_msr_bitmap()) {
|
||||||
|
@ -1015,6 +1015,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
|
||||||
case MSR_GS_BASE:
|
case MSR_GS_BASE:
|
||||||
data = vmcs_readl(GUEST_GS_BASE);
|
data = vmcs_readl(GUEST_GS_BASE);
|
||||||
break;
|
break;
|
||||||
|
case MSR_KERNEL_GS_BASE:
|
||||||
|
vmx_load_host_state(to_vmx(vcpu));
|
||||||
|
data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
|
||||||
|
break;
|
||||||
case MSR_EFER:
|
case MSR_EFER:
|
||||||
return kvm_get_msr_common(vcpu, msr_index, pdata);
|
return kvm_get_msr_common(vcpu, msr_index, pdata);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1068,6 +1072,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
||||||
case MSR_GS_BASE:
|
case MSR_GS_BASE:
|
||||||
vmcs_writel(GUEST_GS_BASE, data);
|
vmcs_writel(GUEST_GS_BASE, data);
|
||||||
break;
|
break;
|
||||||
|
case MSR_KERNEL_GS_BASE:
|
||||||
|
vmx_load_host_state(vmx);
|
||||||
|
vmx->msr_guest_kernel_gs_base = data;
|
||||||
|
break;
|
||||||
#endif
|
#endif
|
||||||
case MSR_IA32_SYSENTER_CS:
|
case MSR_IA32_SYSENTER_CS:
|
||||||
vmcs_write32(GUEST_SYSENTER_CS, data);
|
vmcs_write32(GUEST_SYSENTER_CS, data);
|
||||||
|
@ -1559,6 +1567,11 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
|
struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Force kernel_gs_base reloading before EFER changes, as control
|
||||||
|
* of this msr depends on is_long_mode().
|
||||||
|
*/
|
||||||
|
vmx_load_host_state(to_vmx(vcpu));
|
||||||
vcpu->arch.shadow_efer = efer;
|
vcpu->arch.shadow_efer = efer;
|
||||||
if (!msr)
|
if (!msr)
|
||||||
return;
|
return;
|
||||||
|
|
Loading…
Add table
Reference in a new issue