@@ -1180,8 +1180,6 @@ static void update_curr_dl(struct rq *rq)
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
-
if (dl_entity_is_special(dl_se))
return;
@@ -7562,6 +7562,8 @@ static unsigned long scale_rt_capacity(int cpu)
used = div_u64(avg, total);
+ used += READ_ONCE(rq->avg_rt.util_avg);
+ used += READ_ONCE(rq->avg_dl.util_avg);
if (likely(used < SCHED_CAPACITY_SCALE))
return SCHED_CAPACITY_SCALE - used;
@@ -237,7 +237,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
*/
sa->load_avg = div_u64(load * sa->load_sum, divider);
sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
- sa->util_avg = sa->util_sum / divider;
+ WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
}
/*
@@ -970,8 +970,6 @@ static void update_curr_rt(struct rq *rq)
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
-
if (!rt_bandwidth_enabled())
return;
the utilization level of the CPU by rt and dl tasks is now tracked with PELT so we can use these metrics and remove them from the rt_avg which will track only interrupt and stolen virtual time. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> --- kernel/sched/deadline.c | 2 -- kernel/sched/fair.c | 2 ++ kernel/sched/pelt.c | 2 +- kernel/sched/rt.c | 2 -- 4 files changed, 3 insertions(+), 5 deletions(-) -- 2.7.4