x86/fpu: Provide infrastructure for KVM FPU cleanup
For the upcoming AMX support it's necessary to do a proper integration with KVM. Currently KVM allocates two FPU structs which are used for saving the user state of the vCPU thread and restoring the guest state when entering vcpu_run() and doing the reverse operation before leaving vcpu_run(). With the new fpstate mechanism this can be reduced to one extra buffer by swapping the fpstate pointer in current:🧵:fpu. This makes the upcoming support for AMX and XFD simpler because then fpstate information (features, sizes, xfd) are always consistent and it does not require any nasty workarounds. Provide: - An allocator which initializes the state properly - A replacement for the existing FPU swap mechanim Aside of the reduced memory footprint, this also makes state switching more efficient when TIF_FPU_NEED_LOAD is set. It does not require a memcpy as the state is already correct in the to be swapped out fpstate. The existing interfaces will be removed once KVM is converted over. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20211022185312.954684740@linutronix.de
This commit is contained in:
parent
75c52dad5e
commit
69f6ed1d14
2 changed files with 92 additions and 6 deletions
|
@ -135,9 +135,22 @@ extern void fpu_init_fpstate_user(struct fpu *fpu);
|
|||
extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature);
|
||||
|
||||
/* KVM specific functions */
|
||||
extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
|
||||
extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
|
||||
extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest);
|
||||
extern void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask);
|
||||
|
||||
extern int fpu_copy_kvm_uabi_to_fpstate(struct fpu *fpu, const void *buf, u64 xcr0, u32 *pkru);
|
||||
extern void fpu_copy_fpstate_to_kvm_uabi(struct fpu *fpu, void *buf, unsigned int size, u32 pkru);
|
||||
|
||||
static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
|
||||
{
|
||||
gfpu->fpstate->is_confidential = true;
|
||||
}
|
||||
|
||||
static inline bool fpstate_is_confidential(struct fpu_guest *gfpu)
|
||||
{
|
||||
return gfpu->fpstate->is_confidential;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_FPU_API_H */
|
||||
|
|
|
@ -176,6 +176,75 @@ void fpu_reset_from_exception_fixup(void)
|
|||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
static void __fpstate_reset(struct fpstate *fpstate);
|
||||
|
||||
bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
|
||||
{
|
||||
struct fpstate *fpstate;
|
||||
unsigned int size;
|
||||
|
||||
size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
|
||||
fpstate = vzalloc(size);
|
||||
if (!fpstate)
|
||||
return false;
|
||||
|
||||
__fpstate_reset(fpstate);
|
||||
fpstate_init_user(fpstate);
|
||||
fpstate->is_valloc = true;
|
||||
fpstate->is_guest = true;
|
||||
|
||||
gfpu->fpstate = fpstate;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
|
||||
|
||||
void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
|
||||
{
|
||||
struct fpstate *fps = gfpu->fpstate;
|
||||
|
||||
if (!fps)
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use))
|
||||
return;
|
||||
|
||||
gfpu->fpstate = NULL;
|
||||
vfree(fps);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
|
||||
|
||||
int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
|
||||
{
|
||||
struct fpstate *guest_fps = guest_fpu->fpstate;
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
struct fpstate *cur_fps = fpu->fpstate;
|
||||
|
||||
fpregs_lock();
|
||||
if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
save_fpregs_to_fpstate(fpu);
|
||||
|
||||
/* Swap fpstate */
|
||||
if (enter_guest) {
|
||||
fpu->__task_fpstate = cur_fps;
|
||||
fpu->fpstate = guest_fps;
|
||||
guest_fps->in_use = true;
|
||||
} else {
|
||||
guest_fps->in_use = false;
|
||||
fpu->fpstate = fpu->__task_fpstate;
|
||||
fpu->__task_fpstate = NULL;
|
||||
}
|
||||
|
||||
cur_fps = fpu->fpstate;
|
||||
|
||||
if (!cur_fps->is_confidential)
|
||||
restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
|
||||
|
||||
fpregs_mark_activate();
|
||||
fpregs_unlock();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
|
||||
|
||||
void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask)
|
||||
{
|
||||
fpregs_lock();
|
||||
|
@ -352,16 +421,20 @@ void fpstate_init_user(struct fpstate *fpstate)
|
|||
fpstate_init_fstate(fpstate);
|
||||
}
|
||||
|
||||
static void __fpstate_reset(struct fpstate *fpstate)
|
||||
{
|
||||
/* Initialize sizes and feature masks */
|
||||
fpstate->size = fpu_kernel_cfg.default_size;
|
||||
fpstate->user_size = fpu_user_cfg.default_size;
|
||||
fpstate->xfeatures = fpu_kernel_cfg.default_features;
|
||||
fpstate->user_xfeatures = fpu_user_cfg.default_features;
|
||||
}
|
||||
|
||||
void fpstate_reset(struct fpu *fpu)
|
||||
{
|
||||
/* Set the fpstate pointer to the default fpstate */
|
||||
fpu->fpstate = &fpu->__fpstate;
|
||||
|
||||
/* Initialize sizes and feature masks */
|
||||
fpu->fpstate->size = fpu_kernel_cfg.default_size;
|
||||
fpu->fpstate->user_size = fpu_user_cfg.default_size;
|
||||
fpu->fpstate->xfeatures = fpu_kernel_cfg.default_features;
|
||||
fpu->fpstate->user_xfeatures = fpu_user_cfg.default_features;
|
||||
__fpstate_reset(fpu->fpstate);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
|
|
Loading…
Add table
Reference in a new issue