Michael reported that we are seeing an ftrace bug on bootup when KASAN
is enabled and we are using -fpatchable-function-entry:
ftrace: allocating 47780 entries in 18 pages
ftrace-powerpc: 0xc0000000020b3d5c: No module provided for non-kernel address
------------[ ftrace bug ]------------
ftrace faulted on modifying
[<c0000000020b3d5c>] 0xc0000000020b3d5c
Initializing ftrace call sites
ftrace record flags: 0
(0)
expected tramp: c00000000008cef4
------------[ cut here ]------------
WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2180 ftrace_bug+0x3c0/0x424
Modules linked in:
CPU: 0 PID: 0 Comm: swapper Not tainted 6.5.0-rc3-00120-g0f71dcfb4aef #860
Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1202 0xf000005 of:SLOF,HEAD hv:linux,kvm pSeries
NIP: c0000000003aa81c LR: c0000000003aa818 CTR: 0000000000000000
REGS: c0000000033cfab0 TRAP: 0700 Not tainted (6.5.0-rc3-00120-g0f71dcfb4aef)
MSR: 8000000002021033 <SF,VEC,ME,IR,DR,RI,LE> CR: 28028240 XER: 00000000
CFAR: c0000000002781a8 IRQMASK: 3
...
NIP [c0000000003aa81c] ftrace_bug+0x3c0/0x424
LR [c0000000003aa818] ftrace_bug+0x3bc/0x424
Call Trace:
ftrace_bug+0x3bc/0x424 (unreliable)
ftrace_process_locs+0x5f4/0x8a0
ftrace_init+0xc0/0x1d0
start_kernel+0x1d8/0x484
With CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y and
CONFIG_KASAN=y, compiler emits nops in functions that it generates for
registering and unregistering global variables (unlike with -pg and
-mprofile-kernel where calls to _mcount() are not generated in those
functions). Those functions then end up in INIT_TEXT and EXIT_TEXT
respectively. We don't expect to see any profiled functions in
EXIT_TEXT, so ftrace_init_nop() assumes that all addresses that aren't
in the core kernel text belongs to a module. Since these functions do
not match that criteria, we see the above bug.
Address this by having ftrace ignore all locations in the text exit
sections of vmlinux.
Fixes: 0f71dcfb4a
("powerpc/ftrace: Add support for -fpatchable-function-entry")
Cc: stable@vger.kernel.org # v6.6+
Reported-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Naveen N Rao <naveen@kernel.org>
Reviewed-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240213175410.1091313-1-naveen@kernel.org
144 lines
4.2 KiB
C
144 lines
4.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_FTRACE
|
|
#define _ASM_POWERPC_FTRACE
|
|
|
|
#include <asm/types.h>
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
#define MCOUNT_ADDR ((unsigned long)(_mcount))
|
|
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
|
|
|
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
|
|
|
/* Ignore unused weak functions which will have larger offsets */
|
|
#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
|
|
#define FTRACE_MCOUNT_MAX_OFFSET 16
|
|
#elif defined(CONFIG_PPC32)
|
|
#define FTRACE_MCOUNT_MAX_OFFSET 8
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
extern void _mcount(void);
|
|
|
|
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
|
|
unsigned long sp);
|
|
|
|
struct module;
|
|
struct dyn_ftrace;
|
|
struct dyn_arch_ftrace {
|
|
struct module *mod;
|
|
};
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
|
|
#define ftrace_need_init_nop() (true)
|
|
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
|
#define ftrace_init_nop ftrace_init_nop
|
|
|
|
struct ftrace_regs {
|
|
struct pt_regs regs;
|
|
};
|
|
|
|
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
|
|
{
|
|
/* We clear regs.msr in ftrace_call */
|
|
return fregs->regs.msr ? &fregs->regs : NULL;
|
|
}
|
|
|
|
static __always_inline void
|
|
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
|
|
unsigned long ip)
|
|
{
|
|
regs_set_return_ip(&fregs->regs, ip);
|
|
}
|
|
|
|
static __always_inline unsigned long
|
|
ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
|
|
{
|
|
return instruction_pointer(&fregs->regs);
|
|
}
|
|
|
|
#define ftrace_regs_get_argument(fregs, n) \
|
|
regs_get_kernel_argument(&(fregs)->regs, n)
|
|
#define ftrace_regs_get_stack_pointer(fregs) \
|
|
kernel_stack_pointer(&(fregs)->regs)
|
|
#define ftrace_regs_return_value(fregs) \
|
|
regs_return_value(&(fregs)->regs)
|
|
#define ftrace_regs_set_return_value(fregs, ret) \
|
|
regs_set_return_value(&(fregs)->regs, ret)
|
|
#define ftrace_override_function_with_return(fregs) \
|
|
override_function_with_return(&(fregs)->regs)
|
|
#define ftrace_regs_query_register_offset(name) \
|
|
regs_query_register_offset(name)
|
|
|
|
struct ftrace_ops;
|
|
|
|
#define ftrace_graph_func ftrace_graph_func
|
|
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *op, struct ftrace_regs *fregs);
|
|
#endif
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
|
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
|
#endif
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#ifdef CONFIG_FTRACE_SYSCALLS
|
|
/*
|
|
* Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
|
|
* for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
|
|
* those.
|
|
*/
|
|
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
|
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
|
|
{
|
|
return !strcmp(sym, name) ||
|
|
(!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
|
|
(!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
|
|
(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
|
|
(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
|
|
}
|
|
#endif /* CONFIG_FTRACE_SYSCALLS */
|
|
|
|
#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
|
|
#include <asm/paca.h>
|
|
|
|
static inline void this_cpu_disable_ftrace(void)
|
|
{
|
|
get_paca()->ftrace_enabled = 0;
|
|
}
|
|
|
|
static inline void this_cpu_enable_ftrace(void)
|
|
{
|
|
get_paca()->ftrace_enabled = 1;
|
|
}
|
|
|
|
/* Disable ftrace on this CPU if possible (may not be implemented) */
|
|
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
|
|
{
|
|
get_paca()->ftrace_enabled = ftrace_enabled;
|
|
}
|
|
|
|
static inline u8 this_cpu_get_ftrace_enabled(void)
|
|
{
|
|
return get_paca()->ftrace_enabled;
|
|
}
|
|
#else /* CONFIG_PPC64 */
|
|
static inline void this_cpu_disable_ftrace(void) { }
|
|
static inline void this_cpu_enable_ftrace(void) { }
|
|
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
|
|
static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
|
|
void ftrace_free_init_tramp(void);
|
|
unsigned long ftrace_call_adjust(unsigned long addr);
|
|
#else
|
|
static inline void ftrace_free_init_tramp(void) { }
|
|
static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
|
|
#endif
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_POWERPC_FTRACE */
|