KVM: arm64: nv: Use guest hypervisor's max VL when running nested guest
The max VL for nested guests is additionally constrained by the max VL selected by the guest hypervisor. Use that instead of KVM's max VL when running a nested guest. Note that the guest hypervisor's ZCR_EL2 is sanitised against the VM's max VL at the time of access, so there's no additional handling required at the time of use. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20240620164653.1130714-7-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
b7e5c94264
commit
9092aca9fe
1 changed files with 12 additions and 0 deletions
|
@ -314,11 +314,23 @@ static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
|
||||
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* The vCPU's saved SVE state layout always matches the max VL of the
|
||||
* vCPU. Start off with the max VL so we can load the SVE state.
|
||||
*/
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||
__sve_restore_state(vcpu_sve_pffr(vcpu),
|
||||
&vcpu->arch.ctxt.fp_regs.fpsr,
|
||||
true);
|
||||
|
||||
/*
|
||||
* The effective VL for a VM could differ from the max VL when running a
|
||||
* nested guest, as the guest hypervisor could select a smaller VL. Slap
|
||||
* that into hardware before wrapping up.
|
||||
*/
|
||||
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
|
||||
sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
|
||||
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue