KVM: arm64: Exclude FP ownership from kvm_vcpu_arch
In retrospect, it is fairly obvious that the FP state ownership is only meaningful for a given CPU, and that locating this information in the vcpu was just a mistake. Move the ownership tracking into the host data structure, and rename it from fp_state to fp_owner, which is a better description (name suggested by Mark Brown). Reviewed-by: Mark Brown <broonie@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
51e09b5572
commit
5294afdbf4
8 changed files with 19 additions and 27 deletions
|
@ -588,7 +588,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||||
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
|
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
|
||||||
|
|
||||||
if (!vcpu_has_sve(vcpu) ||
|
if (!vcpu_has_sve(vcpu) ||
|
||||||
(vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
|
(*host_data_ptr(fp_owner) != FP_STATE_GUEST_OWNED))
|
||||||
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
|
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
|
||||||
if (cpus_have_final_cap(ARM64_SME))
|
if (cpus_have_final_cap(ARM64_SME))
|
||||||
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
|
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
|
||||||
|
@ -596,7 +596,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||||
val = CPTR_NVHE_EL2_RES1;
|
val = CPTR_NVHE_EL2_RES1;
|
||||||
|
|
||||||
if (vcpu_has_sve(vcpu) &&
|
if (vcpu_has_sve(vcpu) &&
|
||||||
(vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
|
(*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED))
|
||||||
val |= CPTR_EL2_TZ;
|
val |= CPTR_EL2_TZ;
|
||||||
if (cpus_have_final_cap(ARM64_SME))
|
if (cpus_have_final_cap(ARM64_SME))
|
||||||
val &= ~CPTR_EL2_TSM;
|
val &= ~CPTR_EL2_TSM;
|
||||||
|
|
|
@ -545,6 +545,13 @@ struct kvm_host_data {
|
||||||
struct kvm_cpu_context host_ctxt;
|
struct kvm_cpu_context host_ctxt;
|
||||||
struct user_fpsimd_state *fpsimd_state; /* hyp VA */
|
struct user_fpsimd_state *fpsimd_state; /* hyp VA */
|
||||||
|
|
||||||
|
/* Ownership of the FP regs */
|
||||||
|
enum {
|
||||||
|
FP_STATE_FREE,
|
||||||
|
FP_STATE_HOST_OWNED,
|
||||||
|
FP_STATE_GUEST_OWNED,
|
||||||
|
} fp_owner;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* host_debug_state contains the host registers which are
|
* host_debug_state contains the host registers which are
|
||||||
* saved and restored during world switches.
|
* saved and restored during world switches.
|
||||||
|
@ -622,13 +629,6 @@ struct kvm_vcpu_arch {
|
||||||
/* Exception Information */
|
/* Exception Information */
|
||||||
struct kvm_vcpu_fault_info fault;
|
struct kvm_vcpu_fault_info fault;
|
||||||
|
|
||||||
/* Ownership of the FP regs */
|
|
||||||
enum {
|
|
||||||
FP_STATE_FREE,
|
|
||||||
FP_STATE_HOST_OWNED,
|
|
||||||
FP_STATE_GUEST_OWNED,
|
|
||||||
} fp_state;
|
|
||||||
|
|
||||||
/* Configuration flags, set once and for all before the vcpu can run */
|
/* Configuration flags, set once and for all before the vcpu can run */
|
||||||
u8 cflags;
|
u8 cflags;
|
||||||
|
|
||||||
|
|
|
@ -378,12 +378,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
|
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
|
||||||
|
|
||||||
/*
|
|
||||||
* Default value for the FP state, will be overloaded at load
|
|
||||||
* time if we support FP (pretty likely)
|
|
||||||
*/
|
|
||||||
vcpu->arch.fp_state = FP_STATE_FREE;
|
|
||||||
|
|
||||||
/* Set up the timer */
|
/* Set up the timer */
|
||||||
kvm_timer_vcpu_init(vcpu);
|
kvm_timer_vcpu_init(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||||
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
|
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
|
||||||
* FP_STATE_FREE if the flag set.
|
* FP_STATE_FREE if the flag set.
|
||||||
*/
|
*/
|
||||||
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
|
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||||
*host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state);
|
*host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state);
|
||||||
|
|
||||||
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
|
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
|
||||||
|
@ -109,7 +109,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||||
* been saved, this is very unlikely to happen.
|
* been saved, this is very unlikely to happen.
|
||||||
*/
|
*/
|
||||||
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
|
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
|
||||||
vcpu->arch.fp_state = FP_STATE_FREE;
|
*host_data_ptr(fp_owner) = FP_STATE_FREE;
|
||||||
fpsimd_save_and_flush_cpu_state();
|
fpsimd_save_and_flush_cpu_state();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||||
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
|
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
|
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
|
||||||
vcpu->arch.fp_state = FP_STATE_FREE;
|
*host_data_ptr(fp_owner) = FP_STATE_FREE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -141,7 +141,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
|
|
||||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Currently we do not support SME guests so SVCR is
|
* Currently we do not support SME guests so SVCR is
|
||||||
|
@ -195,7 +195,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
|
if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) {
|
||||||
if (vcpu_has_sve(vcpu)) {
|
if (vcpu_has_sve(vcpu)) {
|
||||||
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ extern struct kvm_exception_table_entry __stop___kvm_ex_table;
|
||||||
/* Check whether the FP regs are owned by the guest */
|
/* Check whether the FP regs are owned by the guest */
|
||||||
static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
|
static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
|
return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save the 32-bit only FPSIMD system register state */
|
/* Save the 32-bit only FPSIMD system register state */
|
||||||
|
@ -376,7 +376,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
/* Write out the host state if it's in the registers */
|
/* Write out the host state if it's in the registers */
|
||||||
if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
|
if (*host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED)
|
||||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||||
|
|
||||||
/* Restore the guest state */
|
/* Restore the guest state */
|
||||||
|
@ -389,7 +389,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
if (!(read_sysreg(hcr_el2) & HCR_RW))
|
if (!(read_sysreg(hcr_el2) & HCR_RW))
|
||||||
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
|
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
|
||||||
|
|
||||||
vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
|
*host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||||
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
|
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
|
||||||
|
|
||||||
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
|
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
|
||||||
hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
|
|
||||||
|
|
||||||
hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
|
hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
|
||||||
|
|
||||||
|
@ -63,7 +62,6 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||||
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
|
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
|
||||||
|
|
||||||
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
|
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
|
||||||
host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
|
|
||||||
|
|
||||||
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
|
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
|
||||||
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
|
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
|
||||||
|
|
|
@ -337,7 +337,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
__sysreg_restore_state_nvhe(host_ctxt);
|
__sysreg_restore_state_nvhe(host_ctxt);
|
||||||
|
|
||||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
|
if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED)
|
||||||
__fpsimd_save_fpexc32(vcpu);
|
__fpsimd_save_fpexc32(vcpu);
|
||||||
|
|
||||||
__debug_switch_to_host(vcpu);
|
__debug_switch_to_host(vcpu);
|
||||||
|
|
|
@ -258,7 +258,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
sysreg_restore_host_state_vhe(host_ctxt);
|
sysreg_restore_host_state_vhe(host_ctxt);
|
||||||
|
|
||||||
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
|
if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED)
|
||||||
__fpsimd_save_fpexc32(vcpu);
|
__fpsimd_save_fpexc32(vcpu);
|
||||||
|
|
||||||
__debug_switch_to_host(vcpu);
|
__debug_switch_to_host(vcpu);
|
||||||
|
|
Loading…
Add table
Reference in a new issue