tracing: Remove definition of trace_*_rcuidle()
The trace_*_rcuidle() variant of a tracepoint was to handle places where a tracepoint was located but RCU was not "watching". All those locations have been removed, and RCU should be watching where all tracepoints are located. We can now remove the trace_*_rcuidle() variant. Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Joel Fernandes <joel@joelfernandes.org> Link: https://lore.kernel.org/20241003181629.36209057@gandalf.local.home Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
4a8840af5f
commit
48bcda6848
4 changed files with 8 additions and 78 deletions
|
@ -196,67 +196,25 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||||
#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args)
|
#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args)
|
||||||
#endif /* CONFIG_HAVE_STATIC_CALL */
|
#endif /* CONFIG_HAVE_STATIC_CALL */
|
||||||
|
|
||||||
/*
|
|
||||||
* ARCH_WANTS_NO_INSTR archs are expected to have sanitized entry and idle
|
|
||||||
* code that disallow any/all tracing/instrumentation when RCU isn't watching.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_ARCH_WANTS_NO_INSTR
|
|
||||||
#define RCUIDLE_COND(rcuidle) (rcuidle)
|
|
||||||
#else
|
|
||||||
/* srcu can't be used from NMI */
|
|
||||||
#define RCUIDLE_COND(rcuidle) (rcuidle && in_nmi())
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* it_func[0] is never NULL because there is at least one element in the array
|
* it_func[0] is never NULL because there is at least one element in the array
|
||||||
* when the array itself is non NULL.
|
* when the array itself is non NULL.
|
||||||
*/
|
*/
|
||||||
#define __DO_TRACE(name, args, cond, rcuidle) \
|
#define __DO_TRACE(name, args, cond) \
|
||||||
do { \
|
do { \
|
||||||
int __maybe_unused __idx = 0; \
|
int __maybe_unused __idx = 0; \
|
||||||
\
|
\
|
||||||
if (!(cond)) \
|
if (!(cond)) \
|
||||||
return; \
|
return; \
|
||||||
\
|
\
|
||||||
if (WARN_ONCE(RCUIDLE_COND(rcuidle), \
|
|
||||||
"Bad RCU usage for tracepoint")) \
|
|
||||||
return; \
|
|
||||||
\
|
|
||||||
/* keep srcu and sched-rcu usage consistent */ \
|
/* keep srcu and sched-rcu usage consistent */ \
|
||||||
preempt_disable_notrace(); \
|
preempt_disable_notrace(); \
|
||||||
\
|
\
|
||||||
/* \
|
|
||||||
* For rcuidle callers, use srcu since sched-rcu \
|
|
||||||
* doesn't work from the idle path. \
|
|
||||||
*/ \
|
|
||||||
if (rcuidle) { \
|
|
||||||
__idx = srcu_read_lock_notrace(&tracepoint_srcu);\
|
|
||||||
ct_irq_enter_irqson(); \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||||
\
|
\
|
||||||
if (rcuidle) { \
|
|
||||||
ct_irq_exit_irqson(); \
|
|
||||||
srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
preempt_enable_notrace(); \
|
preempt_enable_notrace(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifndef MODULE
|
|
||||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond) \
|
|
||||||
static inline void trace_##name##_rcuidle(proto) \
|
|
||||||
{ \
|
|
||||||
if (static_branch_unlikely(&__tracepoint_##name.key)) \
|
|
||||||
__DO_TRACE(name, \
|
|
||||||
TP_ARGS(args), \
|
|
||||||
TP_CONDITION(cond), 1); \
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure the alignment of the structure in the __tracepoints section will
|
* Make sure the alignment of the structure in the __tracepoints section will
|
||||||
* not add unwanted padding between the beginning of the section and the
|
* not add unwanted padding between the beginning of the section and the
|
||||||
|
@ -277,14 +235,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||||
if (static_branch_unlikely(&__tracepoint_##name.key)) \
|
if (static_branch_unlikely(&__tracepoint_##name.key)) \
|
||||||
__DO_TRACE(name, \
|
__DO_TRACE(name, \
|
||||||
TP_ARGS(args), \
|
TP_ARGS(args), \
|
||||||
TP_CONDITION(cond), 0); \
|
TP_CONDITION(cond)); \
|
||||||
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
|
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
|
||||||
WARN_ONCE(!rcu_is_watching(), \
|
WARN_ONCE(!rcu_is_watching(), \
|
||||||
"RCU not watching for tracepoint"); \
|
"RCU not watching for tracepoint"); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
|
|
||||||
PARAMS(cond)) \
|
|
||||||
static inline int \
|
static inline int \
|
||||||
register_trace_##name(void (*probe)(data_proto), void *data) \
|
register_trace_##name(void (*probe)(data_proto), void *data) \
|
||||||
{ \
|
{ \
|
||||||
|
@ -375,8 +331,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||||
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
|
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
|
||||||
static inline void trace_##name(proto) \
|
static inline void trace_##name(proto) \
|
||||||
{ } \
|
{ } \
|
||||||
static inline void trace_##name##_rcuidle(proto) \
|
|
||||||
{ } \
|
|
||||||
static inline int \
|
static inline int \
|
||||||
register_trace_##name(void (*probe)(data_proto), \
|
register_trace_##name(void (*probe)(data_proto), \
|
||||||
void *data) \
|
void *data) \
|
||||||
|
|
|
@ -43,8 +43,6 @@ DEFINE_EVENT(preemptirq_template, irq_enable,
|
||||||
#else
|
#else
|
||||||
#define trace_irq_enable(...)
|
#define trace_irq_enable(...)
|
||||||
#define trace_irq_disable(...)
|
#define trace_irq_disable(...)
|
||||||
#define trace_irq_enable_rcuidle(...)
|
|
||||||
#define trace_irq_disable_rcuidle(...)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
|
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
|
||||||
|
@ -58,8 +56,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
|
||||||
#else
|
#else
|
||||||
#define trace_preempt_enable(...)
|
#define trace_preempt_enable(...)
|
||||||
#define trace_preempt_disable(...)
|
#define trace_preempt_disable(...)
|
||||||
#define trace_preempt_enable_rcuidle(...)
|
|
||||||
#define trace_preempt_disable_rcuidle(...)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _TRACE_PREEMPTIRQ_H */
|
#endif /* _TRACE_PREEMPTIRQ_H */
|
||||||
|
@ -69,10 +65,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
|
||||||
#else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */
|
#else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */
|
||||||
#define trace_irq_enable(...)
|
#define trace_irq_enable(...)
|
||||||
#define trace_irq_disable(...)
|
#define trace_irq_disable(...)
|
||||||
#define trace_irq_enable_rcuidle(...)
|
|
||||||
#define trace_irq_disable_rcuidle(...)
|
|
||||||
#define trace_preempt_enable(...)
|
#define trace_preempt_enable(...)
|
||||||
#define trace_preempt_disable(...)
|
#define trace_preempt_disable(...)
|
||||||
#define trace_preempt_enable_rcuidle(...)
|
|
||||||
#define trace_preempt_disable_rcuidle(...)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -15,20 +15,6 @@
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/preemptirq.h>
|
#include <trace/events/preemptirq.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* Use regular trace points on architectures that implement noinstr
|
|
||||||
* tooling: these calls will only happen with RCU enabled, which can
|
|
||||||
* use a regular tracepoint.
|
|
||||||
*
|
|
||||||
* On older architectures, use the rcuidle tracing methods (which
|
|
||||||
* aren't NMI-safe - so exclude NMI contexts):
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_ARCH_WANTS_NO_INSTR
|
|
||||||
#define trace(point) trace_##point
|
|
||||||
#else
|
|
||||||
#define trace(point) if (!in_nmi()) trace_##point##_rcuidle
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
/* Per-cpu variable to prevent redundant calls when IRQs already off */
|
/* Per-cpu variable to prevent redundant calls when IRQs already off */
|
||||||
static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
||||||
|
@ -42,7 +28,7 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
||||||
void trace_hardirqs_on_prepare(void)
|
void trace_hardirqs_on_prepare(void)
|
||||||
{
|
{
|
||||||
if (this_cpu_read(tracing_irq_cpu)) {
|
if (this_cpu_read(tracing_irq_cpu)) {
|
||||||
trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
|
trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
this_cpu_write(tracing_irq_cpu, 0);
|
this_cpu_write(tracing_irq_cpu, 0);
|
||||||
}
|
}
|
||||||
|
@ -53,7 +39,7 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
|
||||||
void trace_hardirqs_on(void)
|
void trace_hardirqs_on(void)
|
||||||
{
|
{
|
||||||
if (this_cpu_read(tracing_irq_cpu)) {
|
if (this_cpu_read(tracing_irq_cpu)) {
|
||||||
trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
|
trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
this_cpu_write(tracing_irq_cpu, 0);
|
this_cpu_write(tracing_irq_cpu, 0);
|
||||||
}
|
}
|
||||||
|
@ -75,7 +61,7 @@ void trace_hardirqs_off_finish(void)
|
||||||
if (!this_cpu_read(tracing_irq_cpu)) {
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
||||||
this_cpu_write(tracing_irq_cpu, 1);
|
this_cpu_write(tracing_irq_cpu, 1);
|
||||||
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
|
trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -89,7 +75,7 @@ void trace_hardirqs_off(void)
|
||||||
if (!this_cpu_read(tracing_irq_cpu)) {
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
||||||
this_cpu_write(tracing_irq_cpu, 1);
|
this_cpu_write(tracing_irq_cpu, 1);
|
||||||
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
|
trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(trace_hardirqs_off);
|
EXPORT_SYMBOL(trace_hardirqs_off);
|
||||||
|
@ -100,13 +86,13 @@ NOKPROBE_SYMBOL(trace_hardirqs_off);
|
||||||
|
|
||||||
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
||||||
{
|
{
|
||||||
trace(preempt_enable)(a0, a1);
|
trace_preempt_enable(a0, a1);
|
||||||
tracer_preempt_on(a0, a1);
|
tracer_preempt_on(a0, a1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
||||||
{
|
{
|
||||||
trace(preempt_disable)(a0, a1);
|
trace_preempt_disable(a0, a1);
|
||||||
tracer_preempt_off(a0, a1);
|
tracer_preempt_off(a0, a1);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -152,9 +152,7 @@ regex_c=(
|
||||||
'/^BPF_CALL_[0-9]([[:space:]]*\([[:alnum:]_]*\).*/\1/'
|
'/^BPF_CALL_[0-9]([[:space:]]*\([[:alnum:]_]*\).*/\1/'
|
||||||
'/^COMPAT_SYSCALL_DEFINE[0-9]([[:space:]]*\([[:alnum:]_]*\).*/compat_sys_\1/'
|
'/^COMPAT_SYSCALL_DEFINE[0-9]([[:space:]]*\([[:alnum:]_]*\).*/compat_sys_\1/'
|
||||||
'/^TRACE_EVENT([[:space:]]*\([[:alnum:]_]*\).*/trace_\1/'
|
'/^TRACE_EVENT([[:space:]]*\([[:alnum:]_]*\).*/trace_\1/'
|
||||||
'/^TRACE_EVENT([[:space:]]*\([[:alnum:]_]*\).*/trace_\1_rcuidle/'
|
|
||||||
'/^DEFINE_EVENT([^,)]*,[[:space:]]*\([[:alnum:]_]*\).*/trace_\1/'
|
'/^DEFINE_EVENT([^,)]*,[[:space:]]*\([[:alnum:]_]*\).*/trace_\1/'
|
||||||
'/^DEFINE_EVENT([^,)]*,[[:space:]]*\([[:alnum:]_]*\).*/trace_\1_rcuidle/'
|
|
||||||
'/^DEFINE_INSN_CACHE_OPS([[:space:]]*\([[:alnum:]_]*\).*/get_\1_slot/'
|
'/^DEFINE_INSN_CACHE_OPS([[:space:]]*\([[:alnum:]_]*\).*/get_\1_slot/'
|
||||||
'/^DEFINE_INSN_CACHE_OPS([[:space:]]*\([[:alnum:]_]*\).*/free_\1_slot/'
|
'/^DEFINE_INSN_CACHE_OPS([[:space:]]*\([[:alnum:]_]*\).*/free_\1_slot/'
|
||||||
'/^PAGEFLAG([[:space:]]*\([[:alnum:]_]*\).*/Page\1/'
|
'/^PAGEFLAG([[:space:]]*\([[:alnum:]_]*\).*/Page\1/'
|
||||||
|
|
Loading…
Add table
Reference in a new issue