x86/cpufeatures: Add support for fast short REP; MOVSB
>From the Intel Optimization Reference Manual: 3.7.6.1 Fast Short REP MOVSB Beginning with processors based on Ice Lake Client microarchitecture, REP MOVSB performance of short operations is enhanced. The enhancement applies to string lengths between 1 and 128 bytes long. Support for fast-short REP MOVSB is enumerated by the CPUID feature flag: CPUID [EAX=7H, ECX=0H).EDX.FAST_SHORT_REP_MOVSB[bit 4] = 1. There is no change in the REP STOS performance. Add an X86_FEATURE_FSRM flag for this. memmove() avoids REP MOVSB for short (< 32 byte) copies. Check FSRM and use REP MOVSB for short copies on systems that support it. [ bp: Massage and add comment. ] Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20191216214254.26492-1-tony.luck@intel.com
This commit is contained in:
parent
50cc02e599
commit
f444a5ff95
2 changed files with 5 additions and 3 deletions
|
@ -357,6 +357,7 @@
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||||
|
#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
|
||||||
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
|
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
|
||||||
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
|
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
|
||||||
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
|
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
|
||||||
|
|
|
@ -29,10 +29,7 @@
|
||||||
SYM_FUNC_START_ALIAS(memmove)
|
SYM_FUNC_START_ALIAS(memmove)
|
||||||
SYM_FUNC_START(__memmove)
|
SYM_FUNC_START(__memmove)
|
||||||
|
|
||||||
/* Handle more 32 bytes in loop */
|
|
||||||
mov %rdi, %rax
|
mov %rdi, %rax
|
||||||
cmp $0x20, %rdx
|
|
||||||
jb 1f
|
|
||||||
|
|
||||||
/* Decide forward/backward copy mode */
|
/* Decide forward/backward copy mode */
|
||||||
cmp %rdi, %rsi
|
cmp %rdi, %rsi
|
||||||
|
@ -42,7 +39,9 @@ SYM_FUNC_START(__memmove)
|
||||||
cmp %rdi, %r8
|
cmp %rdi, %r8
|
||||||
jg 2f
|
jg 2f
|
||||||
|
|
||||||
|
/* FSRM implies ERMS => no length checks, do the copy directly */
|
||||||
.Lmemmove_begin_forward:
|
.Lmemmove_begin_forward:
|
||||||
|
ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
|
||||||
ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
|
ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -114,6 +113,8 @@ SYM_FUNC_START(__memmove)
|
||||||
*/
|
*/
|
||||||
.p2align 4
|
.p2align 4
|
||||||
2:
|
2:
|
||||||
|
cmp $0x20, %rdx
|
||||||
|
jb 1f
|
||||||
cmp $680, %rdx
|
cmp $680, %rdx
|
||||||
jb 6f
|
jb 6f
|
||||||
cmp %dil, %sil
|
cmp %dil, %sil
|
||||||
|
|
Loading…
Add table
Reference in a new issue