@@ -1015,7 +1015,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
}
static unsigned long weighted_cpuload(const int cpu);
-static unsigned long target_load(int cpu, int imbalance_pct);
+static unsigned long biased_load(int cpu, int imbalance_pct);
static unsigned long power_of(int cpu);
static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
@@ -3958,7 +3958,7 @@ static unsigned long weighted_cpuload(const int cpu)
* Return a high guess at the load of a migration-target cpu weighted
* according to the runnable time and "nice" value.
*/
-static unsigned long target_load(int cpu, int imbalance_pct)
+static unsigned long biased_load(int cpu, int imbalance_pct)
{
unsigned long total = weighted_cpuload(cpu);
@@ -4286,7 +4286,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
if (local_group)
load = weighted_cpuload(i);
else
- load = target_load(i, imbalance);
+ load = biased_load(i, imbalance);
avg_load += load;
}
@@ -5737,7 +5737,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/* Bias balancing toward cpus of our domain */
if (local_group && env->idle != CPU_IDLE)
- load = target_load(i, bias);
+ load = biased_load(i, bias);
else
load = weighted_cpuload(i);
There is no source_load now, It is better to change the target_load function name to original meaning: biased_load. Suggested-by: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Alex Shi <alex.shi@linaro.org> --- kernel/sched/fair.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)