perf evsel: Fallback to "task-clock" when not system wide
When the "cycles" event isn't available evsel will fallback to the "cpu-clock" software event. "task-clock" is similar to "cpu-clock" but only runs when the process is running. Falling back to "cpu-clock" when not system wide leads to confusion, by falling back to "task-clock" it is hoped the confusion is less. Pass the target to determine if "task-clock" is more appropriate. Update a nearby comment and debug string for the change. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Ajay Kaher <akaher@vmware.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Makhalov <amakhalov@vmware.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Yang Jihong <yangjihong1@huawei.com> Link: https://lore.kernel.org/r/20231121000420.368075-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
018b042485
commit
eb2eac0c7b
5 changed files with 15 additions and 12 deletions
|
@ -1360,7 +1360,7 @@ static int record__open(struct record *rec)
|
||||||
evlist__for_each_entry(evlist, pos) {
|
evlist__for_each_entry(evlist, pos) {
|
||||||
try_again:
|
try_again:
|
||||||
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
|
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
|
||||||
if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
|
if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
|
||||||
if (verbose > 0)
|
if (verbose > 0)
|
||||||
ui__warning("%s\n", msg);
|
ui__warning("%s\n", msg);
|
||||||
goto try_again;
|
goto try_again;
|
||||||
|
|
|
@ -653,7 +653,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
|
||||||
if ((evsel__leader(counter) != counter) ||
|
if ((evsel__leader(counter) != counter) ||
|
||||||
!(counter->core.leader->nr_members > 1))
|
!(counter->core.leader->nr_members > 1))
|
||||||
return COUNTER_SKIP;
|
return COUNTER_SKIP;
|
||||||
} else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
|
} else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
|
||||||
if (verbose > 0)
|
if (verbose > 0)
|
||||||
ui__warning("%s\n", msg);
|
ui__warning("%s\n", msg);
|
||||||
return COUNTER_RETRY;
|
return COUNTER_RETRY;
|
||||||
|
|
|
@ -1044,7 +1044,7 @@ try_again:
|
||||||
perf_top_overwrite_fallback(top, counter))
|
perf_top_overwrite_fallback(top, counter))
|
||||||
goto try_again;
|
goto try_again;
|
||||||
|
|
||||||
if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
|
if (evsel__fallback(counter, &opts->target, errno, msg, sizeof(msg))) {
|
||||||
if (verbose > 0)
|
if (verbose > 0)
|
||||||
ui__warning("%s\n", msg);
|
ui__warning("%s\n", msg);
|
||||||
goto try_again;
|
goto try_again;
|
||||||
|
|
|
@ -2853,7 +2853,8 @@ u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
|
bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
|
||||||
|
char *msg, size_t msgsize)
|
||||||
{
|
{
|
||||||
int paranoid;
|
int paranoid;
|
||||||
|
|
||||||
|
@ -2861,18 +2862,19 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
|
||||||
evsel->core.attr.type == PERF_TYPE_HARDWARE &&
|
evsel->core.attr.type == PERF_TYPE_HARDWARE &&
|
||||||
evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
|
evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
|
||||||
/*
|
/*
|
||||||
* If it's cycles then fall back to hrtimer based
|
* If it's cycles then fall back to hrtimer based cpu-clock sw
|
||||||
* cpu-clock-tick sw counter, which is always available even if
|
* counter, which is always available even if no PMU support.
|
||||||
* no PMU support.
|
|
||||||
*
|
*
|
||||||
* PPC returns ENXIO until 2.6.37 (behavior changed with commit
|
* PPC returns ENXIO until 2.6.37 (behavior changed with commit
|
||||||
* b0a873e).
|
* b0a873e).
|
||||||
*/
|
*/
|
||||||
scnprintf(msg, msgsize, "%s",
|
|
||||||
"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
|
|
||||||
|
|
||||||
evsel->core.attr.type = PERF_TYPE_SOFTWARE;
|
evsel->core.attr.type = PERF_TYPE_SOFTWARE;
|
||||||
evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
|
evsel->core.attr.config = target__has_cpu(target)
|
||||||
|
? PERF_COUNT_SW_CPU_CLOCK
|
||||||
|
: PERF_COUNT_SW_TASK_CLOCK;
|
||||||
|
scnprintf(msg, msgsize,
|
||||||
|
"The cycles event is not supported, trying to fall back to %s",
|
||||||
|
target__has_cpu(target) ? "cpu-clock" : "task-clock");
|
||||||
|
|
||||||
zfree(&evsel->name);
|
zfree(&evsel->name);
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -460,7 +460,8 @@ static inline bool evsel__is_clock(const struct evsel *evsel)
|
||||||
evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
|
evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
|
bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
|
||||||
|
char *msg, size_t msgsize);
|
||||||
int evsel__open_strerror(struct evsel *evsel, struct target *target,
|
int evsel__open_strerror(struct evsel *evsel, struct target *target,
|
||||||
int err, char *msg, size_t size);
|
int err, char *msg, size_t size);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue