Make use of an upcoming GCC feature to mitigate straight-line-speculation for x86: https://gcc.gnu.org/g:53a643f8568067d7700a9f2facc8ba39974973d3 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102952 https://bugs.llvm.org/show_bug.cgi?id=52323 It's built tested on x86_64-allyesconfig using GCC-12 and GCC-11. Maintenance overhead of this should be fairly low due to objtool validation. Size overhead of all these additional int3 instructions comes to: text data bss dec hex filename 22267751 6933356 2011368 31212475 1dc43bb defconfig-build/vmlinux 22804126 6933356 1470696 31208178 1dc32f2 defconfig-build/vmlinux.sls Or roughly 2.4% additional text. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lore.kernel.org/r/20211204134908.140103474@infradead.org
38 lines
720 B
C
38 lines
720 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_LINKAGE_H
|
|
#define _ASM_X86_LINKAGE_H
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#undef notrace
|
|
#define notrace __attribute__((no_instrument_function))
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
|
|
#define __ALIGN .p2align 4, 0x90
|
|
#define __ALIGN_STR __stringify(__ALIGN)
|
|
#endif
|
|
|
|
#ifdef CONFIG_SLS
|
|
#define RET ret; int3
|
|
#else
|
|
#define RET ret
|
|
#endif
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_SLS
|
|
#define ASM_RET "ret; int3\n\t"
|
|
#else
|
|
#define ASM_RET "ret\n\t"
|
|
#endif
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_LINKAGE_H */
|
|
|