When compiling linux 6.1.0-rc3 configured with CONFIG_64BIT=y and
CONFIG_PARAVIRT_SPINLOCKS=y on x86_64 using LLVM 11.0, an error:
"<inline asm> error: changed section flags for .spinlock.text,
expected:: 0x6" occurred.
The reason is the .spinlock.text in kernel/locking/qspinlock.o
is used many times, but its flags are omitted in subsequent use.
LLVM 11.0 assembler didn't permit to
leave out flags in subsequent uses of the same sections.
So this patch adds the corresponding flags to avoid above error.
Fixes: 501f7f69bc
("locking: Add __lockfunc to slow path functions")
Signed-off-by: Guo Jin <guoj17@chinatelecom.cn>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Link: https://lore.kernel.org/r/20221108060126.2505-1-guoj17@chinatelecom.cn
73 lines
2.1 KiB
C
73 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_QSPINLOCK_PARAVIRT_H
|
|
#define __ASM_QSPINLOCK_PARAVIRT_H
|
|
|
|
#include <asm/ibt.h>
|
|
|
|
/*
|
|
* For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
|
|
* registers. For i386, however, only 1 32-bit register needs to be saved
|
|
* and restored. So an optimized version of __pv_queued_spin_unlock() is
|
|
* hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
|
|
*/
|
|
#ifdef CONFIG_64BIT
|
|
|
|
__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
|
|
#define __pv_queued_spin_unlock __pv_queued_spin_unlock
|
|
#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock"
|
|
#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath"
|
|
|
|
/*
|
|
* Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
|
|
* which combines the registers saving trunk and the body of the following
|
|
* C code. Note that it puts the code in the .spinlock.text section which
|
|
* is equivalent to adding __lockfunc in the C code:
|
|
*
|
|
* void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
|
|
* {
|
|
* u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
|
|
*
|
|
* if (likely(lockval == _Q_LOCKED_VAL))
|
|
* return;
|
|
* pv_queued_spin_unlock_slowpath(lock, lockval);
|
|
* }
|
|
*
|
|
* For x86-64,
|
|
* rdi = lock (first argument)
|
|
* rsi = lockval (second argument)
|
|
* rdx = internal variable (set to 0)
|
|
*/
|
|
asm (".pushsection .spinlock.text, \"ax\";"
|
|
".globl " PV_UNLOCK ";"
|
|
".type " PV_UNLOCK ", @function;"
|
|
".align 4,0x90;"
|
|
PV_UNLOCK ": "
|
|
ASM_ENDBR
|
|
FRAME_BEGIN
|
|
"push %rdx;"
|
|
"mov $0x1,%eax;"
|
|
"xor %edx,%edx;"
|
|
LOCK_PREFIX "cmpxchg %dl,(%rdi);"
|
|
"cmp $0x1,%al;"
|
|
"jne .slowpath;"
|
|
"pop %rdx;"
|
|
FRAME_END
|
|
ASM_RET
|
|
".slowpath: "
|
|
"push %rsi;"
|
|
"movzbl %al,%esi;"
|
|
"call " PV_UNLOCK_SLOWPATH ";"
|
|
"pop %rsi;"
|
|
"pop %rdx;"
|
|
FRAME_END
|
|
ASM_RET
|
|
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
|
|
".popsection");
|
|
|
|
#else /* CONFIG_64BIT */
|
|
|
|
extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
|
|
__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text");
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
#endif
|