Michael reported that we are seeing an ftrace bug on bootup when KASAN
is enabled and we are using -fpatchable-function-entry:
ftrace: allocating 47780 entries in 18 pages
ftrace-powerpc: 0xc0000000020b3d5c: No module provided for non-kernel address
------------[ ftrace bug ]------------
ftrace faulted on modifying
[<c0000000020b3d5c>] 0xc0000000020b3d5c
Initializing ftrace call sites
ftrace record flags: 0
(0)
expected tramp: c00000000008cef4
------------[ cut here ]------------
WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2180 ftrace_bug+0x3c0/0x424
Modules linked in:
CPU: 0 PID: 0 Comm: swapper Not tainted 6.5.0-rc3-00120-g0f71dcfb4aef #860
Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1202 0xf000005 of:SLOF,HEAD hv:linux,kvm pSeries
NIP: c0000000003aa81c LR: c0000000003aa818 CTR: 0000000000000000
REGS: c0000000033cfab0 TRAP: 0700 Not tainted (6.5.0-rc3-00120-g0f71dcfb4aef)
MSR: 8000000002021033 <SF,VEC,ME,IR,DR,RI,LE> CR: 28028240 XER: 00000000
CFAR: c0000000002781a8 IRQMASK: 3
...
NIP [c0000000003aa81c] ftrace_bug+0x3c0/0x424
LR [c0000000003aa818] ftrace_bug+0x3bc/0x424
Call Trace:
ftrace_bug+0x3bc/0x424 (unreliable)
ftrace_process_locs+0x5f4/0x8a0
ftrace_init+0xc0/0x1d0
start_kernel+0x1d8/0x484
With CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y and
CONFIG_KASAN=y, compiler emits nops in functions that it generates for
registering and unregistering global variables (unlike with -pg and
-mprofile-kernel where calls to _mcount() are not generated in those
functions). Those functions then end up in INIT_TEXT and EXIT_TEXT
respectively. We don't expect to see any profiled functions in
EXIT_TEXT, so ftrace_init_nop() assumes that all addresses that aren't
in the core kernel text belongs to a module. Since these functions do
not match that criteria, we see the above bug.
Address this by having ftrace ignore all locations in the text exit
sections of vmlinux.
Fixes: 0f71dcfb4a
("powerpc/ftrace: Add support for -fpatchable-function-entry")
Cc: stable@vger.kernel.org # v6.6+
Reported-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Naveen N Rao <naveen@kernel.org>
Reviewed-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240213175410.1091313-1-naveen@kernel.org
83 lines
2 KiB
C
83 lines
2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_SECTIONS_H
|
|
#define _ASM_POWERPC_SECTIONS_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/elf.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
|
|
typedef struct func_desc func_desc_t;
|
|
#endif
|
|
|
|
#include <asm-generic/sections.h>
|
|
|
|
extern char __head_end[];
|
|
extern char __srwx_boundary[];
|
|
extern char __exittext_begin[], __exittext_end[];
|
|
|
|
/* Patch sites */
|
|
extern s32 patch__call_flush_branch_caches1;
|
|
extern s32 patch__call_flush_branch_caches2;
|
|
extern s32 patch__call_flush_branch_caches3;
|
|
extern s32 patch__flush_count_cache_return;
|
|
extern s32 patch__flush_link_stack_return;
|
|
extern s32 patch__call_kvm_flush_link_stack;
|
|
extern s32 patch__call_kvm_flush_link_stack_p9;
|
|
extern s32 patch__memset_nocache, patch__memcpy_nocache;
|
|
|
|
extern long flush_branch_caches;
|
|
extern long kvm_flush_link_stack;
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
extern char __start_interrupts[];
|
|
extern char __end_interrupts[];
|
|
|
|
#ifdef CONFIG_PPC_POWERNV
|
|
extern char start_real_trampolines[];
|
|
extern char end_real_trampolines[];
|
|
extern char start_virt_trampolines[];
|
|
extern char end_virt_trampolines[];
|
|
#endif
|
|
|
|
/*
|
|
* This assumes the kernel is never compiled -mcmodel=small or
|
|
* the total .toc is always less than 64k.
|
|
*/
|
|
static inline unsigned long kernel_toc_addr(void)
|
|
{
|
|
#ifdef CONFIG_PPC_KERNEL_PCREL
|
|
BUILD_BUG();
|
|
return -1UL;
|
|
#else
|
|
unsigned long toc_ptr;
|
|
|
|
asm volatile("mr %0, 2" : "=r" (toc_ptr));
|
|
return toc_ptr;
|
|
#endif
|
|
}
|
|
|
|
static inline int overlaps_interrupt_vector_text(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
unsigned long real_start, real_end;
|
|
real_start = __start_interrupts - _stext;
|
|
real_end = __end_interrupts - _stext;
|
|
|
|
return start < (unsigned long)__va(real_end) &&
|
|
(unsigned long)__va(real_start) < end;
|
|
}
|
|
|
|
static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
|
|
{
|
|
return start < (unsigned long)__init_end &&
|
|
(unsigned long)_stext < end;
|
|
}
|
|
|
|
#else
|
|
static inline unsigned long kernel_toc_addr(void) { BUILD_BUG(); return -1UL; }
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_SECTIONS_H */
|