@@ -118,6 +118,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU_READ_MOSTLY(u64, dvfs_update_delay);
#ifdef CONFIG_SCHED_DEBUG
/*
@@ -187,13 +187,28 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
* to run at adequate performance point.
*
* This function provides enough headroom to provide adequate performance
- * assuming the CPU continues to be busy.
+ * assuming the CPU continues to be busy. This headroom is based on the
+ * dvfs_update_delay of the cpufreq governor or min(curr.se.slice, TICK_US),
+ * whichever is higher.
*
- * At the moment it is a constant multiplication with 1.25.
+ * XXX: Should we provide headroom when the util is decaying?
*/
-static inline unsigned long sugov_apply_dvfs_headroom(unsigned long util)
+static inline unsigned long sugov_apply_dvfs_headroom(unsigned long util, int cpu)
{
- return util + (util >> 2);
+ struct rq *rq = cpu_rq(cpu);
+ u64 delay;
+
+ /*
+ * What is the possible worst case scenario for updating util_avg, ctx
+ * switch or TICK?
+ */
+ if (rq->cfs.h_nr_running > 1)
+ delay = min(rq->curr->se.slice/1000, TICK_USEC);
+ else
+ delay = TICK_USEC;
+ delay = max(delay, per_cpu(dvfs_update_delay, cpu));
+
+ return approximate_util_avg(util, delay);
}
unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
@@ -201,7 +216,7 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
unsigned long max)
{
/* Add dvfs headroom to actual utilization */
- actual = sugov_apply_dvfs_headroom(actual);
+ actual = sugov_apply_dvfs_headroom(actual, cpu);
/* Actually we don't need to target the max performance */
if (actual < max)
max = actual;
@@ -579,15 +594,21 @@ rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
struct sugov_policy *sg_policy;
unsigned int rate_limit_us;
+ int cpu;
if (kstrtouint(buf, 10, &rate_limit_us))
return -EINVAL;
tunables->rate_limit_us = rate_limit_us;
- list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
+ list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
+
sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
+ for_each_cpu(cpu, sg_policy->policy->cpus)
+ per_cpu(dvfs_update_delay, cpu) = rate_limit_us;
+ }
+
return count;
}
@@ -868,6 +889,9 @@ static int sugov_start(struct cpufreq_policy *policy)
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
+
+ per_cpu(dvfs_update_delay, cpu) = sg_policy->tunables->rate_limit_us;
+
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
}
return 0;
@@ -3068,6 +3068,15 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
unsigned long approximate_util_avg(unsigned long util, u64 delta);
u64 approximate_runtime(unsigned long util);
+/*
+ * Any governor that relies on util signal to drive DVFS, must populate these
+ * percpu dvfs_update_delay variables.
+ *
+ * It should describe the rate/delay at which the governor sends DVFS freq
+ * update to the hardware in us.
+ */
+DECLARE_PER_CPU_READ_MOSTLY(u64, dvfs_update_delay);
+
/*
* Verify the fitness of task @p to run on @cpu taking into account the
* CPU original capacity and the runtime/deadline ratio of the task.
Replace 1.25 headroom in sugov_apply_dvfs_headroom() with better dynamic logic. Instead of the magical 1.25 headroom, use the new approximate_util_avg() to provide headroom based on the dvfs_update_delay, which is the period at which the cpufreq governor will send DVFS updates to the hardware, or min(curr.se.slice, TICK_USEC) which is the max delay for util signal to change and promote a cpufreq update; whichever is higher. Add a new percpu dvfs_update_delay that can be cheaply accessed whenever sugov_apply_dvfs_headroom() is called. We expect cpufreq governors that rely on util to drive its DVFS logic/algorithm to populate these percpu variables. schedutil is the only such governor at the moment. The behavior of schedutil will change. Some systems will experience faster dvfs rampup (because of higher TICK or rate_limit_us), others will experience slower rampup. The impact on performance should not be visible if not for the black hole effect of utilization invariance. A problem that will be addressed in later patches. Later patches will also address how to provide better control of how fast or slow the system should respond to allow userspace to select their power/perf/thermal trade-off. Signed-off-by: Qais Yousef <qyousef@layalina.io> --- kernel/sched/core.c | 1 + kernel/sched/cpufreq_schedutil.c | 36 ++++++++++++++++++++++++++------ kernel/sched/sched.h | 9 ++++++++ 3 files changed, 40 insertions(+), 6 deletions(-)