@@ -8456,22 +8456,32 @@ static struct rq *find_busiest_queue(struct lb_env *env,
*/
#define MAX_PINNED_INTERVAL 512
-static int need_active_balance(struct lb_env *env)
+static inline bool
+asym_active_balance(enum cpu_idle_type idle, unsigned int flags, int dst, int src)
{
- struct sched_domain *sd = env->sd;
-
- if (env->idle != CPU_NOT_IDLE) {
+ if (idle != CPU_NOT_IDLE) {
/*
* ASYM_PACKING needs to force migrate tasks from busy but
* lower priority CPUs in order to pack all tasks in the
* highest priority CPUs.
*/
- if ((sd->flags & SD_ASYM_PACKING) &&
- sched_asym_prefer(env->dst_cpu, env->src_cpu))
- return 1;
+ if ((flags & SD_ASYM_PACKING) &&
+ sched_asym_prefer(dst, src))
+ return true;
}
+ return false;
+}
+
+static int need_active_balance(struct lb_env *env)
+{
+ struct sched_domain *sd = env->sd;
+
+
+ if (asym_active_balance(env->idle, sd->flags, env->dst_cpu, env->src_cpu))
+ return 1;
+
/*
* The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
* It's worth migrating the task if the src_cpu's capacity is reduced
@@ -8749,7 +8759,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
} else
sd->nr_balance_failed = 0;
- if (likely(!active_balance)) {
+ if (likely(!active_balance) ||
+ asym_active_balance(env.idle, sd->flags, env.dst_cpu, env.src_cpu)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
} else {
In case of active balance, we increase the balance interval to cover pinned tasks cases not covered by all_pinned logic. Neverthless, the active migration triggered by asym packing should be treated as the normal unbalanced case and reset the interval to default value otherwise active migration for asym_packing can be easily delayed for hundreds of ms because of this all_pinned detection mecanism. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> --- kernel/sched/fair.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) -- 2.7.4