powerpc/64s/perf: add power_pmu_wants_prompt_pmi to say whether perf wants PMIs to be soft-NMI
Interrupt code enables MSR[EE] in some irq handlers while keeping local irqs disabled via soft-mask, allowing PMI interrupts to be taken as soft-NMI to improve profiling of irq handlers. When perf is not enabled, there is no point to doing this, it's additional overhead. So provide a function that can say if PMIs should be taken promptly if possible. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210922145452.352571-4-npiggin@gmail.com
This commit is contained in:
parent
ff0b0d6e1a
commit
5a7745b96f
2 changed files with 33 additions and 0 deletions
|
@ -342,6 +342,8 @@ static inline bool lazy_irq_pending_nocheck(void)
|
||||||
return __lazy_irq_pending(local_paca->irq_happened);
|
return __lazy_irq_pending(local_paca->irq_happened);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool power_pmu_wants_prompt_pmi(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is called by asynchronous interrupts to conditionally
|
* This is called by asynchronous interrupts to conditionally
|
||||||
* re-enable hard interrupts after having cleared the source
|
* re-enable hard interrupts after having cleared the source
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <asm/firmware.h>
|
#include <asm/firmware.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/code-patching.h>
|
#include <asm/code-patching.h>
|
||||||
|
#include <asm/hw_irq.h>
|
||||||
#include <asm/interrupt.h>
|
#include <asm/interrupt.h>
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
|
@ -2437,6 +2438,36 @@ static void perf_event_interrupt(struct pt_regs *regs)
|
||||||
perf_sample_event_took(sched_clock() - start_clock);
|
perf_sample_event_took(sched_clock() - start_clock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the perf subsystem wants performance monitor interrupts as soon as
|
||||||
|
* possible (e.g., to sample the instruction address and stack chain),
|
||||||
|
* this should return true. The IRQ masking code can then enable MSR[EE]
|
||||||
|
* in some places (e.g., interrupt handlers) that allows PMI interrupts
|
||||||
|
* though to improve accuracy of profiles, at the cost of some performance.
|
||||||
|
*
|
||||||
|
* The PMU counters can be enabled by other means (e.g., sysfs raw SPR
|
||||||
|
* access), but in that case there is no need for prompt PMI handling.
|
||||||
|
*
|
||||||
|
* This currently returns true if any perf counter is being used. It
|
||||||
|
* could possibly return false if only events are being counted rather than
|
||||||
|
* samples being taken, but for now this is good enough.
|
||||||
|
*/
|
||||||
|
bool power_pmu_wants_prompt_pmi(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuhw;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This could simply test local_paca->pmcregs_in_use if that were not
|
||||||
|
* under ifdef KVM.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (!ppmu)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
cpuhw = this_cpu_ptr(&cpu_hw_events);
|
||||||
|
return cpuhw->n_events;
|
||||||
|
}
|
||||||
|
|
||||||
static int power_pmu_prepare_cpu(unsigned int cpu)
|
static int power_pmu_prepare_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
|
Loading…
Add table
Reference in a new issue