Message ID | 1530200714-4504-9-git-send-email-vincent.guittot@linaro.org |
---|---|
State | Accepted |
Commit | dfa444dc2ff62edbaf1ff95ed22dd2ce8a5715da |
Headers | show |
Series | track CPU utilization | expand |
On 28-06-18, 17:45, Vincent Guittot wrote: > There is no reason why sugov_get_util() and sugov_aggregate_util() > were in fact separate functions. > > Cc: Ingo Molnar <mingo@redhat.com> > Cc: Peter Zijlstra <peterz@infradead.org> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > [rebased after adding irq tracking and fixed some compilation errors] > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> > --- > kernel/sched/cpufreq_schedutil.c | 44 ++++++++++++++-------------------------- > kernel/sched/sched.h | 2 +- > 2 files changed, 16 insertions(+), 30 deletions(-) > > diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c > index b77bfef..d04f941 100644 > --- a/kernel/sched/cpufreq_schedutil.c > +++ b/kernel/sched/cpufreq_schedutil.c > @@ -53,12 +53,7 @@ struct sugov_cpu { > unsigned int iowait_boost_max; > u64 last_update; > > - /* The fields below are only needed when sharing a policy: */ > - unsigned long util_cfs; > - unsigned long util_dl; > unsigned long bw_dl; > - unsigned long util_rt; > - unsigned long util_irq; > unsigned long max; > > /* The field below is for single-CPU policies only: */ > @@ -182,38 +177,31 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, > return cpufreq_driver_resolve_freq(policy, freq); > } > > -static void sugov_get_util(struct sugov_cpu *sg_cpu) > +static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) > { > struct rq *rq = cpu_rq(sg_cpu->cpu); > + unsigned long util, irq, max; > > - sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); > - sg_cpu->util_cfs = cpu_util_cfs(rq); > - sg_cpu->util_dl = cpu_util_dl(rq); > - sg_cpu->bw_dl = cpu_bw_dl(rq); > - sg_cpu->util_rt = cpu_util_rt(rq); > - sg_cpu->util_irq = cpu_util_irq(rq); > -} > - > -static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) > -{ > - struct rq *rq = cpu_rq(sg_cpu->cpu); > - unsigned long util, max = sg_cpu->max; > + sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); > + sg_cpu->bw_dl = cpu_bw_dl(rq); > > if (rq->rt.rt_nr_running) > - return sg_cpu->max; > + return max; > + > + irq = cpu_util_irq(rq); > > - if (unlikely(sg_cpu->util_irq >= max)) > + if (unlikely(irq >= max)) > return max; > > /* Sum rq utilization */ > - util = sg_cpu->util_cfs; > - util += sg_cpu->util_rt; > + util = cpu_util_cfs(rq); > + util += cpu_util_rt(rq); > > /* > * Interrupt time is not seen by rqs utilization nso we can compare > * them with the CPU capacity > */ > - if ((util + sg_cpu->util_dl) >= max) > + if ((util + cpu_util_dl(rq)) >= max) > return max; > > /* > @@ -231,11 +219,11 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) > */ > > /* Weight rqs utilization to normal context window */ > - util *= (max - sg_cpu->util_irq); > + util *= (max - irq); > util /= max; > > /* Add interrupt utilization */ > - util += sg_cpu->util_irq; > + util += irq; > > /* Add DL bandwidth requirement */ > util += sg_cpu->bw_dl; > @@ -418,9 +406,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, > > busy = sugov_cpu_is_busy(sg_cpu); > > - sugov_get_util(sg_cpu); > + util = sugov_get_util(sg_cpu); > max = sg_cpu->max; > - util = sugov_aggregate_util(sg_cpu); > sugov_iowait_apply(sg_cpu, time, &util, &max); > next_f = get_next_freq(sg_policy, util, max); > /* > @@ -459,9 +446,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) > struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); > unsigned long j_util, j_max; > > - sugov_get_util(j_sg_cpu); > + j_util = sugov_get_util(j_sg_cpu); > j_max = j_sg_cpu->max; > - j_util = sugov_aggregate_util(j_sg_cpu); > sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max); > > if (j_util * max > j_max * util) { > diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h > index 9438e68..59a633d 100644 > --- a/kernel/sched/sched.h > +++ b/kernel/sched/sched.h > @@ -2219,7 +2219,7 @@ static inline unsigned long cpu_util_cfs(struct rq *rq) > > static inline unsigned long cpu_util_rt(struct rq *rq) > { > - return rq->avg_rt.util_avg; > + return READ_ONCE(rq->avg_rt.util_avg); > } > > #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) Acked-by: Viresh Kumar <viresh.kumar@linaro.org> -- viresh
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index b77bfef..d04f941 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -53,12 +53,7 @@ struct sugov_cpu { unsigned int iowait_boost_max; u64 last_update; - /* The fields below are only needed when sharing a policy: */ - unsigned long util_cfs; - unsigned long util_dl; unsigned long bw_dl; - unsigned long util_rt; - unsigned long util_irq; unsigned long max; /* The field below is for single-CPU policies only: */ @@ -182,38 +177,31 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, return cpufreq_driver_resolve_freq(policy, freq); } -static void sugov_get_util(struct sugov_cpu *sg_cpu) +static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) { struct rq *rq = cpu_rq(sg_cpu->cpu); + unsigned long util, irq, max; - sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); - sg_cpu->util_cfs = cpu_util_cfs(rq); - sg_cpu->util_dl = cpu_util_dl(rq); - sg_cpu->bw_dl = cpu_bw_dl(rq); - sg_cpu->util_rt = cpu_util_rt(rq); - sg_cpu->util_irq = cpu_util_irq(rq); -} - -static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) -{ - struct rq *rq = cpu_rq(sg_cpu->cpu); - unsigned long util, max = sg_cpu->max; + sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); + sg_cpu->bw_dl = cpu_bw_dl(rq); if (rq->rt.rt_nr_running) - return sg_cpu->max; + return max; + + irq = cpu_util_irq(rq); - if (unlikely(sg_cpu->util_irq >= max)) + if (unlikely(irq >= max)) return max; /* Sum rq utilization */ - util = sg_cpu->util_cfs; - util += sg_cpu->util_rt; + util = cpu_util_cfs(rq); + util += cpu_util_rt(rq); /* * Interrupt time is not seen by rqs utilization nso we can compare * them with the CPU capacity */ - if ((util + sg_cpu->util_dl) >= max) + if ((util + cpu_util_dl(rq)) >= max) return max; /* @@ -231,11 +219,11 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) */ /* Weight rqs utilization to normal context window */ - util *= (max - sg_cpu->util_irq); + util *= (max - irq); util /= max; /* Add interrupt utilization */ - util += sg_cpu->util_irq; + util += irq; /* Add DL bandwidth requirement */ util += sg_cpu->bw_dl; @@ -418,9 +406,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, busy = sugov_cpu_is_busy(sg_cpu); - sugov_get_util(sg_cpu); + util = sugov_get_util(sg_cpu); max = sg_cpu->max; - util = sugov_aggregate_util(sg_cpu); sugov_iowait_apply(sg_cpu, time, &util, &max); next_f = get_next_freq(sg_policy, util, max); /* @@ -459,9 +446,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); unsigned long j_util, j_max; - sugov_get_util(j_sg_cpu); + j_util = sugov_get_util(j_sg_cpu); j_max = j_sg_cpu->max; - j_util = sugov_aggregate_util(j_sg_cpu); sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max); if (j_util * max > j_max * util) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9438e68..59a633d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2219,7 +2219,7 @@ static inline unsigned long cpu_util_cfs(struct rq *rq) static inline unsigned long cpu_util_rt(struct rq *rq) { - return rq->avg_rt.util_avg; + return READ_ONCE(rq->avg_rt.util_avg); } #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)