The current perf assumes that the counters that support PEBS are contiguous. But it's not guaranteed with the new leaf 0x23 introduced. The counters are enumerated with a counter mask. There may be holes in the counter mask for future platforms or in a virtualization environment. Store the PEBS event mask rather than the maximum number of PEBS counters in the x86 PMU structures. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Ian Rogers <irogers@google.com> Link: https://lkml.kernel.org/r/20240626143545.480761-2-kan.liang@linux.intel.com
39 lines
947 B
C
39 lines
947 B
C
#ifndef _ASM_INTEL_DS_H
|
|
#define _ASM_INTEL_DS_H
|
|
|
|
#include <linux/percpu-defs.h>
|
|
|
|
#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
|
|
#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
|
|
|
|
/* The maximal number of PEBS events: */
|
|
#define MAX_PEBS_EVENTS_FMT4 8
|
|
#define MAX_PEBS_EVENTS 32
|
|
#define MAX_PEBS_EVENTS_MASK GENMASK_ULL(MAX_PEBS_EVENTS - 1, 0)
|
|
#define MAX_FIXED_PEBS_EVENTS 16
|
|
|
|
/*
|
|
* A debug store configuration.
|
|
*
|
|
* We only support architectures that use 64bit fields.
|
|
*/
|
|
struct debug_store {
|
|
u64 bts_buffer_base;
|
|
u64 bts_index;
|
|
u64 bts_absolute_maximum;
|
|
u64 bts_interrupt_threshold;
|
|
u64 pebs_buffer_base;
|
|
u64 pebs_index;
|
|
u64 pebs_absolute_maximum;
|
|
u64 pebs_interrupt_threshold;
|
|
u64 pebs_event_reset[MAX_PEBS_EVENTS + MAX_FIXED_PEBS_EVENTS];
|
|
} __aligned(PAGE_SIZE);
|
|
|
|
DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
|
|
|
|
struct debug_store_buffers {
|
|
char bts_buffer[BTS_BUFFER_SIZE];
|
|
char pebs_buffer[PEBS_BUFFER_SIZE];
|
|
};
|
|
|
|
#endif
|