x86/asm: Carve out a generic movdir64b() helper for general usage
Carve out the MOVDIR64B inline asm primitive into a generic helper so that it can be used by other functions. Move it to special_insns.h and have iosubmit_cmds512() call it. [ bp: Massage commit message. ] Suggested-by: Michael Matz <matz@suse.de> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Tony Luck <tony.luck@intel.com> Reviewed-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20201005151126.657029-2-dave.jiang@intel.com
This commit is contained in:
parent
20f0afd1fb
commit
0888e1030d
2 changed files with 25 additions and 14 deletions
|
@ -401,7 +401,7 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
|
* iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
|
||||||
* @__dst: destination, in MMIO space (must be 512-bit aligned)
|
* @dst: destination, in MMIO space (must be 512-bit aligned)
|
||||||
* @src: source
|
* @src: source
|
||||||
* @count: number of 512 bits quantities to submit
|
* @count: number of 512 bits quantities to submit
|
||||||
*
|
*
|
||||||
|
@ -412,25 +412,14 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
|
||||||
* Warning: Do not use this helper unless your driver has checked that the CPU
|
* Warning: Do not use this helper unless your driver has checked that the CPU
|
||||||
* instruction is supported on the platform.
|
* instruction is supported on the platform.
|
||||||
*/
|
*/
|
||||||
static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
|
static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
|
||||||
size_t count)
|
size_t count)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Note that this isn't an "on-stack copy", just definition of "dst"
|
|
||||||
* as a pointer to 64-bytes of stuff that is going to be overwritten.
|
|
||||||
* In the MOVDIR64B case that may be needed as you can use the
|
|
||||||
* MOVDIR64B instruction to copy arbitrary memory around. This trick
|
|
||||||
* lets the compiler know how much gets clobbered.
|
|
||||||
*/
|
|
||||||
volatile struct { char _[64]; } *dst = __dst;
|
|
||||||
const u8 *from = src;
|
const u8 *from = src;
|
||||||
const u8 *end = from + count * 64;
|
const u8 *end = from + count * 64;
|
||||||
|
|
||||||
while (from < end) {
|
while (from < end) {
|
||||||
/* MOVDIR64B [rdx], rax */
|
movdir64b(dst, from);
|
||||||
asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
|
|
||||||
: "=m" (dst)
|
|
||||||
: "d" (from), "a" (dst));
|
|
||||||
from += 64;
|
from += 64;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -234,6 +234,28 @@ static inline void clwb(volatile void *__p)
|
||||||
|
|
||||||
#define nop() asm volatile ("nop")
|
#define nop() asm volatile ("nop")
|
||||||
|
|
||||||
|
/* The dst parameter must be 64-bytes aligned */
|
||||||
|
static inline void movdir64b(void *dst, const void *src)
|
||||||
|
{
|
||||||
|
const struct { char _[64]; } *__src = src;
|
||||||
|
struct { char _[64]; } *__dst = dst;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MOVDIR64B %(rdx), rax.
|
||||||
|
*
|
||||||
|
* Both __src and __dst must be memory constraints in order to tell the
|
||||||
|
* compiler that no other memory accesses should be reordered around
|
||||||
|
* this one.
|
||||||
|
*
|
||||||
|
* Also, both must be supplied as lvalues because this tells
|
||||||
|
* the compiler what the object is (its size) the instruction accesses.
|
||||||
|
* I.e., not the pointers but what they point to, thus the deref'ing '*'.
|
||||||
|
*/
|
||||||
|
asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
|
||||||
|
: "+m" (*__dst)
|
||||||
|
: "m" (*__src), "a" (__dst), "d" (__src));
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#endif /* _ASM_X86_SPECIAL_INSNS_H */
|
#endif /* _ASM_X86_SPECIAL_INSNS_H */
|
||||||
|
|
Loading…
Add table
Reference in a new issue