1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

cpufreq/amd-pstate: Add trace event for EPP perf updates

In "active" mode the most important thing for debugging whether
an issue is hardware or software based is to look at what was the
last thing written to the CPPC request MSR or shared memory region.

The 'amd_pstate_epp_perf' trace event shows the values being written
for all CPUs.

Reviewed-by: Perry Yuan <perry.yuan@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://lore.kernel.org/r/20241209185248.16301-4-mario.limonciello@amd.com
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
This commit is contained in:
Mario Limonciello 2024-12-09 12:52:36 -06:00
parent 53ec2101df
commit 4dcd130151
2 changed files with 73 additions and 0 deletions

View file

@ -88,6 +88,51 @@ TRACE_EVENT(amd_pstate_perf,
)
);
TRACE_EVENT(amd_pstate_epp_perf,
TP_PROTO(unsigned int cpu_id,
unsigned int highest_perf,
unsigned int epp,
unsigned int min_perf,
unsigned int max_perf,
bool boost
),
TP_ARGS(cpu_id,
highest_perf,
epp,
min_perf,
max_perf,
boost),
TP_STRUCT__entry(
__field(unsigned int, cpu_id)
__field(unsigned int, highest_perf)
__field(unsigned int, epp)
__field(unsigned int, min_perf)
__field(unsigned int, max_perf)
__field(bool, boost)
),
TP_fast_assign(
__entry->cpu_id = cpu_id;
__entry->highest_perf = highest_perf;
__entry->epp = epp;
__entry->min_perf = min_perf;
__entry->max_perf = max_perf;
__entry->boost = boost;
),
TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
(unsigned int)__entry->cpu_id,
(unsigned int)__entry->min_perf,
(unsigned int)__entry->max_perf,
(unsigned int)__entry->highest_perf,
(unsigned int)__entry->epp,
(bool)__entry->boost
)
);
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */

View file

@ -324,6 +324,14 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
return -EBUSY;
}
if (trace_amd_pstate_epp_perf_enabled()) {
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
epp,
AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
AMD_CPPC_MAX_PERF(cpudata->cppc_req_cached),
cpudata->boost_state);
}
ret = amd_pstate_set_epp(cpudata, epp);
return ret;
@ -1598,6 +1606,13 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
WRITE_ONCE(cpudata->cppc_req_cached, value);
if (trace_amd_pstate_epp_perf_enabled()) {
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
cpudata->min_limit_perf,
cpudata->max_limit_perf,
policy->boost_enabled);
}
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
cpudata->max_limit_perf, false);
@ -1641,6 +1656,13 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
max_perf = READ_ONCE(cpudata->highest_perf);
if (trace_amd_pstate_epp_perf_enabled()) {
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
cpudata->epp_cached,
AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
max_perf, cpudata->boost_state);
}
amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
amd_pstate_set_epp(cpudata, cpudata->epp_cached);
}
@ -1669,6 +1691,12 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
mutex_lock(&amd_pstate_limits_lock);
if (trace_amd_pstate_epp_perf_enabled()) {
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
AMD_CPPC_EPP_BALANCE_POWERSAVE,
min_perf, min_perf, policy->boost_enabled);
}
amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);