KVM: arm64: Handle counter access early in non-HYP context
We already deal with CNTPCT_EL0 accesses in non-HYP context. Let's add CNTVCT_EL0 as a good measure. This is also an opportunity to simplify things and make it plain that this code is only for non-HYP context handling. Acked-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20241217142321.763801-8-maz@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
9b3b2f0029
commit
b86fc215dc
1 changed files with 21 additions and 13 deletions
|
@ -506,7 +506,7 @@ static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
|
||||||
return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt);
|
return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
|
static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct arch_timer_context *ctxt;
|
struct arch_timer_context *ctxt;
|
||||||
u32 sysreg;
|
u32 sysreg;
|
||||||
|
@ -516,18 +516,19 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
|
||||||
* We only get here for 64bit guests, 32bit guests will hit
|
* We only get here for 64bit guests, 32bit guests will hit
|
||||||
* the long and winding road all the way to the standard
|
* the long and winding road all the way to the standard
|
||||||
* handling. Yes, it sucks to be irrelevant.
|
* handling. Yes, it sucks to be irrelevant.
|
||||||
|
*
|
||||||
|
* Also, we only deal with non-hypervisor context here (either
|
||||||
|
* an EL1 guest, or a non-HYP context of an EL2 guest).
|
||||||
*/
|
*/
|
||||||
|
if (is_hyp_ctxt(vcpu))
|
||||||
|
return false;
|
||||||
|
|
||||||
sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
|
sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
|
||||||
|
|
||||||
switch (sysreg) {
|
switch (sysreg) {
|
||||||
case SYS_CNTPCT_EL0:
|
case SYS_CNTPCT_EL0:
|
||||||
case SYS_CNTPCTSS_EL0:
|
case SYS_CNTPCTSS_EL0:
|
||||||
if (vcpu_has_nv(vcpu)) {
|
if (vcpu_has_nv(vcpu)) {
|
||||||
if (is_hyp_ctxt(vcpu)) {
|
|
||||||
ctxt = vcpu_hptimer(vcpu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for guest hypervisor trapping */
|
/* Check for guest hypervisor trapping */
|
||||||
val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
|
val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
|
||||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||||
|
@ -539,16 +540,23 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
ctxt = vcpu_ptimer(vcpu);
|
ctxt = vcpu_ptimer(vcpu);
|
||||||
break;
|
break;
|
||||||
|
case SYS_CNTVCT_EL0:
|
||||||
|
case SYS_CNTVCTSS_EL0:
|
||||||
|
if (vcpu_has_nv(vcpu)) {
|
||||||
|
/* Check for guest hypervisor trapping */
|
||||||
|
val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
|
||||||
|
|
||||||
|
if (val & CNTHCTL_EL1TVCT)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctxt = vcpu_vtimer(vcpu);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
val = arch_timer_read_cntpct_el0();
|
val = compute_counter_value(ctxt);
|
||||||
|
|
||||||
if (ctxt->offset.vm_offset)
|
|
||||||
val -= *kern_hyp_va(ctxt->offset.vm_offset);
|
|
||||||
if (ctxt->offset.vcpu_offset)
|
|
||||||
val -= *kern_hyp_va(ctxt->offset.vcpu_offset);
|
|
||||||
|
|
||||||
vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
|
vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
|
||||||
__kvm_skip_instr(vcpu);
|
__kvm_skip_instr(vcpu);
|
||||||
|
@ -593,7 +601,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
__vgic_v3_perform_cpuif_access(vcpu) == 1)
|
__vgic_v3_perform_cpuif_access(vcpu) == 1)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (kvm_hyp_handle_cntpct(vcpu))
|
if (kvm_handle_cntxct(vcpu))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
Loading…
Add table
Reference in a new issue