@@ -466,4 +466,24 @@ int update_irq_load_avg(struct rq *rq, u64 running)
return ret;
}
-#endif
+#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */
+
+/*
+ * Approximate the new util_avg value assuming an entity has continued to run
+ * for @delta us.
+ */
+unsigned long approximate_util_avg(unsigned long util, u64 delta)
+{
+ struct sched_avg sa = {
+ .util_sum = util * PELT_MIN_DIVIDER,
+ .util_avg = util,
+ };
+
+ if (unlikely(!delta))
+ return util;
+
+ accumulate_sum(delta, &sa, 1, 0, 1);
+ ___update_load_avg(&sa, 0);
+
+ return sa.util_avg;
+}
@@ -3002,6 +3002,8 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
unsigned long min,
unsigned long max);
+unsigned long approximate_util_avg(unsigned long util, u64 delta);
+
/*
* DVFS decision are made at discrete points. If CPU stays busy, the util will
* continue to grow, which means it could need to run at a higher frequency
Given a util_avg value, the new function will return the future one given a runtime delta. This will be useful in later patches to help replace some magic margins with more deterministic behavior. Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io> --- kernel/sched/pelt.c | 22 +++++++++++++++++++++- kernel/sched/sched.h | 2 ++ 2 files changed, 23 insertions(+), 1 deletion(-)