1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/powerpc/include/asm/irq.h
Douglas Anderson 8d539b84f1 nmi_backtrace: allow excluding an arbitrary CPU
The APIs that allow backtracing across CPUs have always had a way to
exclude the current CPU.  This convenience means callers didn't need to
find a place to allocate a CPU mask just to handle the common case.

Let's extend the API to take a CPU ID to exclude instead of just a
boolean.  This isn't any more complex for the API to handle and allows the
hardlockup detector to exclude a different CPU (the one it already did a
trace for) without needing to find space for a CPU mask.

Arguably, this new API also encourages safer behavior.  Specifically if
the caller wants to avoid tracing the current CPU (maybe because they
already traced the current CPU) this makes it more obvious to the caller
that they need to make sure that the current CPU ID can't change.

[akpm@linux-foundation.org: fix trigger_allbutcpu_cpu_backtrace() stub]
Link: https://lkml.kernel.org/r/20230804065935.v4.1.Ia35521b91fc781368945161d7b28538f9996c182@changeid
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Lecopzer Chen <lecopzer.chen@mediatek.com>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Pingfan Liu <kernelfans@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-18 10:19:00 -07:00

63 lines
1.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifdef __KERNEL__
#ifndef _ASM_POWERPC_IRQ_H
#define _ASM_POWERPC_IRQ_H
/*
*/
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <asm/types.h>
#include <linux/atomic.h>
extern atomic_t ppc_n_lost_interrupts;
/* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS
/* Number of irqs reserved for a legacy isa controller */
#define NR_IRQS_LEGACY 16
extern irq_hw_number_t virq_to_hw(unsigned int virq);
static __inline__ int irq_canonicalize(int irq)
{
return irq;
}
extern int distribute_irqs;
struct pt_regs;
#ifdef CONFIG_BOOKE_OR_40x
/*
* Per-cpu stacks for handling critical, debug and machine check
* level interrupts.
*/
extern void *critirq_ctx[NR_CPUS];
extern void *dbgirq_ctx[NR_CPUS];
extern void *mcheckirq_ctx[NR_CPUS];
#endif
/*
* Per-cpu stacks for handling hard and soft interrupts.
*/
extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS];
void __do_IRQ(struct pt_regs *regs);
int irq_choose_cpu(const struct cpumask *mask);
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
int exclude_cpu);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
#endif /* _ASM_IRQ_H */
#endif /* __KERNEL__ */