diff mbox series

[01/15] cpufreq/amd-pstate: Add trace event for EPP perf updates

Message ID 20241205222847.7889-2-mario.limonciello@amd.com
State Superseded
Headers show
Series amd-pstate 6.14 cleanups and improvements | expand

Commit Message

Mario Limonciello Dec. 5, 2024, 10:28 p.m. UTC
In "active" mode the most important thing for debugging whether
an issue is hardware or software based is to look at what was the
last thing written to the CPPC request MSR or shared memory region.

The 'amd_pstate_epp_perf' trace event shows the values being written
for all CPUs.

Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
 drivers/cpufreq/amd-pstate-trace.h | 45 ++++++++++++++++++++++++++++++
 drivers/cpufreq/amd-pstate.c       | 28 +++++++++++++++++++
 2 files changed, 73 insertions(+)

Comments

Yuan, Perry Dec. 6, 2024, 5:05 a.m. UTC | #1
[AMD Official Use Only - AMD Internal Distribution Only]

> -----Original Message-----
> From: Limonciello, Mario <Mario.Limonciello@amd.com>
> Sent: Friday, December 6, 2024 6:29 AM
> To: Shenoy, Gautham Ranjal <gautham.shenoy@amd.com>
> Cc: Yuan, Perry <Perry.Yuan@amd.com>; linux-kernel@vger.kernel.org; linux-
> pm@vger.kernel.org; Ugwekar, Dhananjay <Dhananjay.Ugwekar@amd.com>;
> Limonciello, Mario <Mario.Limonciello@amd.com>
> Subject: [PATCH 01/15] cpufreq/amd-pstate: Add trace event for EPP perf updates
>
> In "active" mode the most important thing for debugging whether an issue is
> hardware or software based is to look at what was the last thing written to the CPPC
> request MSR or shared memory region.
>
> The 'amd_pstate_epp_perf' trace event shows the values being written for all CPUs.
>
> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> ---
>  drivers/cpufreq/amd-pstate-trace.h | 45 ++++++++++++++++++++++++++++++
>  drivers/cpufreq/amd-pstate.c       | 28 +++++++++++++++++++
>  2 files changed, 73 insertions(+)
>
> diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
> index 35f38ae67fb13..e2221a4b6901c 100644
> --- a/drivers/cpufreq/amd-pstate-trace.h
> +++ b/drivers/cpufreq/amd-pstate-trace.h
> @@ -88,6 +88,51 @@ TRACE_EVENT(amd_pstate_perf,
>                )
>  );
>
> +TRACE_EVENT(amd_pstate_epp_perf,
> +
> +     TP_PROTO(unsigned int cpu_id,
> +              unsigned int highest_perf,
> +              unsigned int epp,
> +              unsigned int min_perf,
> +              unsigned int max_perf,
> +              bool boost
> +              ),
> +
> +     TP_ARGS(cpu_id,
> +             highest_perf,
> +             epp,
> +             min_perf,
> +             max_perf,
> +             boost),
> +
> +     TP_STRUCT__entry(
> +             __field(unsigned int, cpu_id)
> +             __field(unsigned int, highest_perf)
> +             __field(unsigned int, epp)
> +             __field(unsigned int, min_perf)
> +             __field(unsigned int, max_perf)
> +             __field(bool, boost)
> +             ),
> +
> +     TP_fast_assign(
> +             __entry->cpu_id = cpu_id;
> +             __entry->highest_perf = highest_perf;
> +             __entry->epp = epp;
> +             __entry->min_perf = min_perf;
> +             __entry->max_perf = max_perf;
> +             __entry->boost = boost;
> +             ),
> +
> +     TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
> +               (unsigned int)__entry->cpu_id,
> +               (unsigned int)__entry->min_perf,
> +               (unsigned int)__entry->max_perf,
> +               (unsigned int)__entry->highest_perf,
> +               (unsigned int)__entry->epp,
> +               (bool)__entry->boost
> +              )
> +);
> +
>  #endif /* _AMD_PSTATE_TRACE_H */
>
>  /* This part must be outside protection */ diff --git a/drivers/cpufreq/amd-pstate.c
> b/drivers/cpufreq/amd-pstate.c index 66fb7aee95d24..4d1da49d345ec 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -324,6 +324,14 @@ static int amd_pstate_set_energy_pref_index(struct
> amd_cpudata *cpudata,
>               return -EBUSY;
>       }
>
> +     if (trace_amd_pstate_epp_perf_enabled()) {
> +             trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> +                                       epp,
> +                                       AMD_CPPC_MIN_PERF(cpudata-
> >cppc_req_cached),
> +                                       AMD_CPPC_MAX_PERF(cpudata-
> >cppc_req_cached),
> +                                       cpudata->boost_state);
> +     }
> +
>       ret = amd_pstate_set_epp(cpudata, epp);
>
>       return ret;
> @@ -1596,6 +1604,13 @@ static int amd_pstate_epp_update_limit(struct
> cpufreq_policy *policy)
>
>       WRITE_ONCE(cpudata->cppc_req_cached, value);
>
> +     if (trace_amd_pstate_epp_perf_enabled()) {
> +             trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> epp,
> +                                       cpudata->min_limit_perf,
> +                                       cpudata->max_limit_perf,
> +                                       cpudata->boost_state);
> +     }
> +
>       amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
>                              cpudata->max_limit_perf, false);
>
> @@ -1639,6 +1654,13 @@ static void amd_pstate_epp_reenable(struct
> amd_cpudata *cpudata)
>
>       max_perf = READ_ONCE(cpudata->highest_perf);
>
> +     if (trace_amd_pstate_epp_perf_enabled()) {
> +             trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> +                                       cpudata->epp_cached,
> +                                       AMD_CPPC_MIN_PERF(cpudata-
> >cppc_req_cached),
> +                                       max_perf, cpudata->boost_state);
> +     }
> +
>       amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
>       amd_pstate_set_epp(cpudata, cpudata->epp_cached);  } @@ -1667,6
> +1689,12 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
>
>       mutex_lock(&amd_pstate_limits_lock);
>
> +     if (trace_amd_pstate_epp_perf_enabled()) {
> +             trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> +
> AMD_CPPC_EPP_BALANCE_POWERSAVE,
> +                                       min_perf, min_perf, cpudata->boost_state);
> +     }
> +
>       amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
>       amd_pstate_set_epp(cpudata,
> AMD_CPPC_EPP_BALANCE_POWERSAVE);
>
> --
> 2.43.0

LGTM

Reviewed-by: Perry Yuan <perry.yuan@amd.com>
Gautham R. Shenoy Dec. 6, 2024, 5:44 a.m. UTC | #2
On Thu, Dec 05, 2024 at 04:28:33PM -0600, Mario Limonciello wrote:
> In "active" mode the most important thing for debugging whether
> an issue is hardware or software based is to look at what was the
> last thing written to the CPPC request MSR or shared memory region.
> 
> The 'amd_pstate_epp_perf' trace event shows the values being written
> for all CPUs.
> 
> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>

Thank you Mario for adding this. This is useful for correlating the
frequencies requested by the userspace with the min_per/max_perf/epp
values chosen by the hardware.

The patch looks good to me.

Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>

--
Thanks and Regards
gautham.

> ---
>  drivers/cpufreq/amd-pstate-trace.h | 45 ++++++++++++++++++++++++++++++
>  drivers/cpufreq/amd-pstate.c       | 28 +++++++++++++++++++
>  2 files changed, 73 insertions(+)
> 
> diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
> index 35f38ae67fb13..e2221a4b6901c 100644
> --- a/drivers/cpufreq/amd-pstate-trace.h
> +++ b/drivers/cpufreq/amd-pstate-trace.h
> @@ -88,6 +88,51 @@ TRACE_EVENT(amd_pstate_perf,
>  		 )
>  );
>  
> +TRACE_EVENT(amd_pstate_epp_perf,
> +
> +	TP_PROTO(unsigned int cpu_id,
> +		 unsigned int highest_perf,
> +		 unsigned int epp,
> +		 unsigned int min_perf,
> +		 unsigned int max_perf,
> +		 bool boost
> +		 ),
> +
> +	TP_ARGS(cpu_id,
> +		highest_perf,
> +		epp,
> +		min_perf,
> +		max_perf,
> +		boost),
> +
> +	TP_STRUCT__entry(
> +		__field(unsigned int, cpu_id)
> +		__field(unsigned int, highest_perf)
> +		__field(unsigned int, epp)
> +		__field(unsigned int, min_perf)
> +		__field(unsigned int, max_perf)
> +		__field(bool, boost)
> +		),
> +
> +	TP_fast_assign(
> +		__entry->cpu_id = cpu_id;
> +		__entry->highest_perf = highest_perf;
> +		__entry->epp = epp;
> +		__entry->min_perf = min_perf;
> +		__entry->max_perf = max_perf;
> +		__entry->boost = boost;
> +		),
> +
> +	TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
> +		  (unsigned int)__entry->cpu_id,
> +		  (unsigned int)__entry->min_perf,
> +		  (unsigned int)__entry->max_perf,
> +		  (unsigned int)__entry->highest_perf,
> +		  (unsigned int)__entry->epp,
> +		  (bool)__entry->boost
> +		 )
> +);
> +
>  #endif /* _AMD_PSTATE_TRACE_H */
>  
>  /* This part must be outside protection */
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index 66fb7aee95d24..4d1da49d345ec 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -324,6 +324,14 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
>  		return -EBUSY;
>  	}
>  
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> +					  epp,
> +					  AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
> +					  AMD_CPPC_MAX_PERF(cpudata->cppc_req_cached),
> +					  cpudata->boost_state);
> +	}
> +
>  	ret = amd_pstate_set_epp(cpudata, epp);
>  
>  	return ret;
> @@ -1596,6 +1604,13 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>  
>  	WRITE_ONCE(cpudata->cppc_req_cached, value);
>  
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
> +					  cpudata->min_limit_perf,
> +					  cpudata->max_limit_perf,
> +					  cpudata->boost_state);
> +	}
> +
>  	amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
>  			       cpudata->max_limit_perf, false);
>  
> @@ -1639,6 +1654,13 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>  
>  	max_perf = READ_ONCE(cpudata->highest_perf);
>  
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> +					  cpudata->epp_cached,
> +					  AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
> +					  max_perf, cpudata->boost_state);
> +	}
> +
>  	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
>  	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
>  }
> @@ -1667,6 +1689,12 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
>  
>  	mutex_lock(&amd_pstate_limits_lock);
>  
> +	if (trace_amd_pstate_epp_perf_enabled()) {
> +		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> +					  AMD_CPPC_EPP_BALANCE_POWERSAVE,
> +					  min_perf, min_perf, cpudata->boost_state);
> +	}
> +
>  	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
>  	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
>  
> -- 
> 2.43.0
>
diff mbox series

Patch

diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb13..e2221a4b6901c 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -88,6 +88,51 @@  TRACE_EVENT(amd_pstate_perf,
 		 )
 );
 
+TRACE_EVENT(amd_pstate_epp_perf,
+
+	TP_PROTO(unsigned int cpu_id,
+		 unsigned int highest_perf,
+		 unsigned int epp,
+		 unsigned int min_perf,
+		 unsigned int max_perf,
+		 bool boost
+		 ),
+
+	TP_ARGS(cpu_id,
+		highest_perf,
+		epp,
+		min_perf,
+		max_perf,
+		boost),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu_id)
+		__field(unsigned int, highest_perf)
+		__field(unsigned int, epp)
+		__field(unsigned int, min_perf)
+		__field(unsigned int, max_perf)
+		__field(bool, boost)
+		),
+
+	TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+		__entry->highest_perf = highest_perf;
+		__entry->epp = epp;
+		__entry->min_perf = min_perf;
+		__entry->max_perf = max_perf;
+		__entry->boost = boost;
+		),
+
+	TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+		  (unsigned int)__entry->cpu_id,
+		  (unsigned int)__entry->min_perf,
+		  (unsigned int)__entry->max_perf,
+		  (unsigned int)__entry->highest_perf,
+		  (unsigned int)__entry->epp,
+		  (bool)__entry->boost
+		 )
+);
+
 #endif /* _AMD_PSTATE_TRACE_H */
 
 /* This part must be outside protection */
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 66fb7aee95d24..4d1da49d345ec 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -324,6 +324,14 @@  static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
 		return -EBUSY;
 	}
 
+	if (trace_amd_pstate_epp_perf_enabled()) {
+		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+					  epp,
+					  AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+					  AMD_CPPC_MAX_PERF(cpudata->cppc_req_cached),
+					  cpudata->boost_state);
+	}
+
 	ret = amd_pstate_set_epp(cpudata, epp);
 
 	return ret;
@@ -1596,6 +1604,13 @@  static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 
 	WRITE_ONCE(cpudata->cppc_req_cached, value);
 
+	if (trace_amd_pstate_epp_perf_enabled()) {
+		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+					  cpudata->min_limit_perf,
+					  cpudata->max_limit_perf,
+					  cpudata->boost_state);
+	}
+
 	amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
 			       cpudata->max_limit_perf, false);
 
@@ -1639,6 +1654,13 @@  static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
 
 	max_perf = READ_ONCE(cpudata->highest_perf);
 
+	if (trace_amd_pstate_epp_perf_enabled()) {
+		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+					  cpudata->epp_cached,
+					  AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+					  max_perf, cpudata->boost_state);
+	}
+
 	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
 	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
 }
@@ -1667,6 +1689,12 @@  static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
 
 	mutex_lock(&amd_pstate_limits_lock);
 
+	if (trace_amd_pstate_epp_perf_enabled()) {
+		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+					  AMD_CPPC_EPP_BALANCE_POWERSAVE,
+					  min_perf, min_perf, cpudata->boost_state);
+	}
+
 	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
 	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);