Merge branch kvm-arm64/mops into kvmarm/next
* kvm-arm64/mops: : KVM support for MOPS, courtesy of Kristina Martsenko : : MOPS adds new instructions for accelerating memcpy(), memset(), and : memmove() operations in hardware. This series brings virtualization : support for KVM guests, and allows VMs to run on asymmetrict systems : that may have different MOPS implementations. KVM: arm64: Expose MOPS instructions to guests KVM: arm64: Add handler for MOPS exceptions Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
commit
53ce49ea75
8 changed files with 78 additions and 53 deletions
|
@ -102,7 +102,9 @@
|
|||
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
|
||||
#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
|
||||
#define HCRX_GUEST_FLAGS \
|
||||
(HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \
|
||||
(cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0))
|
||||
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
|
|
|
@ -9,10 +9,9 @@
|
|||
|
||||
#include <linux/list.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#ifdef CONFIG_ARMV8_DEPRECATED
|
||||
bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn);
|
||||
#else
|
||||
|
@ -101,4 +100,55 @@ static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr)
|
|||
|
||||
bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr);
|
||||
void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr);
|
||||
|
||||
static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION;
|
||||
bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A;
|
||||
int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
|
||||
int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
|
||||
int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
|
||||
unsigned long dst, src, size;
|
||||
|
||||
dst = regs->regs[dstreg];
|
||||
src = regs->regs[srcreg];
|
||||
size = regs->regs[sizereg];
|
||||
|
||||
/*
|
||||
* Put the registers back in the original format suitable for a
|
||||
* prologue instruction, using the generic return routine from the
|
||||
* Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH.
|
||||
*/
|
||||
if (esr & ESR_ELx_MOPS_ISS_MEM_INST) {
|
||||
/* SET* instruction */
|
||||
if (option_a ^ wrong_option) {
|
||||
/* Format is from Option A; forward set */
|
||||
regs->regs[dstreg] = dst + size;
|
||||
regs->regs[sizereg] = -size;
|
||||
}
|
||||
} else {
|
||||
/* CPY* instruction */
|
||||
if (!(option_a ^ wrong_option)) {
|
||||
/* Format is from Option B */
|
||||
if (regs->pstate & PSR_N_BIT) {
|
||||
/* Backward copy */
|
||||
regs->regs[dstreg] = dst - size;
|
||||
regs->regs[srcreg] = src - size;
|
||||
}
|
||||
} else {
|
||||
/* Format is from Option A */
|
||||
if (size & BIT(63)) {
|
||||
/* Forward copy */
|
||||
regs->regs[dstreg] = dst + size;
|
||||
regs->regs[srcreg] = src + size;
|
||||
regs->regs[sizereg] = -size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE)
|
||||
regs->pc -= 8;
|
||||
else
|
||||
regs->pc -= 4;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -516,53 +516,7 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
|
|||
|
||||
void do_el0_mops(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION;
|
||||
bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A;
|
||||
int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
|
||||
int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
|
||||
int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
|
||||
unsigned long dst, src, size;
|
||||
|
||||
dst = pt_regs_read_reg(regs, dstreg);
|
||||
src = pt_regs_read_reg(regs, srcreg);
|
||||
size = pt_regs_read_reg(regs, sizereg);
|
||||
|
||||
/*
|
||||
* Put the registers back in the original format suitable for a
|
||||
* prologue instruction, using the generic return routine from the
|
||||
* Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH.
|
||||
*/
|
||||
if (esr & ESR_ELx_MOPS_ISS_MEM_INST) {
|
||||
/* SET* instruction */
|
||||
if (option_a ^ wrong_option) {
|
||||
/* Format is from Option A; forward set */
|
||||
pt_regs_write_reg(regs, dstreg, dst + size);
|
||||
pt_regs_write_reg(regs, sizereg, -size);
|
||||
}
|
||||
} else {
|
||||
/* CPY* instruction */
|
||||
if (!(option_a ^ wrong_option)) {
|
||||
/* Format is from Option B */
|
||||
if (regs->pstate & PSR_N_BIT) {
|
||||
/* Backward copy */
|
||||
pt_regs_write_reg(regs, dstreg, dst - size);
|
||||
pt_regs_write_reg(regs, srcreg, src - size);
|
||||
}
|
||||
} else {
|
||||
/* Format is from Option A */
|
||||
if (size & BIT(63)) {
|
||||
/* Forward copy */
|
||||
pt_regs_write_reg(regs, dstreg, dst + size);
|
||||
pt_regs_write_reg(regs, srcreg, src + size);
|
||||
pt_regs_write_reg(regs, sizereg, -size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE)
|
||||
regs->pc -= 8;
|
||||
else
|
||||
regs->pc -= 4;
|
||||
arm64_mops_reset_regs(®s->user_regs, esr);
|
||||
|
||||
/*
|
||||
* If single stepping then finish the step before executing the
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/fpsimd.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
struct kvm_exception_table_entry {
|
||||
int insn, fixup;
|
||||
|
@ -265,6 +266,22 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
|
|||
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
|
||||
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
|
||||
|
||||
/*
|
||||
* Finish potential single step before executing the prologue
|
||||
* instruction.
|
||||
*/
|
||||
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
|
||||
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||
|
|
|
@ -197,7 +197,8 @@
|
|||
|
||||
#define PVM_ID_AA64ISAR2_ALLOW (\
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) \
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \
|
||||
)
|
||||
|
||||
u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
|
||||
|
|
|
@ -192,6 +192,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
|||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
||||
};
|
||||
|
||||
static const exit_handler_fn pvm_exit_handlers[] = {
|
||||
|
@ -203,6 +204,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
|
|||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
||||
};
|
||||
|
||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -139,6 +139,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
|||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
||||
};
|
||||
|
||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -1348,7 +1348,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
|
||||
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
|
||||
break;
|
||||
case SYS_ID_AA64MMFR2_EL1:
|
||||
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
|
||||
|
@ -2099,7 +2098,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
ID_AA64ISAR1_EL1_API |
|
||||
ID_AA64ISAR1_EL1_APA)),
|
||||
ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
|
||||
ID_AA64ISAR2_EL1_MOPS |
|
||||
ID_AA64ISAR2_EL1_APA3 |
|
||||
ID_AA64ISAR2_EL1_GPA3)),
|
||||
ID_UNALLOCATED(6,3),
|
||||
|
|
Loading…
Add table
Reference in a new issue