@@ -4686,67 +4686,89 @@ find_idlest_group(struct sched_domain *sd,
struct task_struct *p,
return idlest;
}
-/*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
- */
-static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int
this_cpu)
+struct cpumask idle_cpus_mask;
+
+static int find_shallowest_idle_cpu(struct cpumask *cpus)
{
- unsigned long load, min_load = ULONG_MAX;
- unsigned int min_exit_latency = UINT_MAX;
u64 latest_idle_timestamp = 0;
- int least_loaded_cpu = this_cpu;
- int shallowest_idle_cpu = -1;
- int i;
+ unsigned min_exit_latency = UINT_MAX;
+ int i, cpu = -1;
- /* Traverse only the allowed CPUs */
- for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
- if (idle_cpu(i)) {
- struct rq *rq = cpu_rq(i);
- struct cpuidle_state *idle = idle_get_state(rq);
-
- if (idle) {
- if (idle->exit_latency < min_exit_latency) {
- /*
- * We give priority to a CPU
- * whose idle state has the
- * smallest exit latency
- * irrespective of any idle
- * timestamp.
- */
- min_exit_latency = idle->exit_latency;
- latest_idle_timestamp = idle->idle_stamp;
- shallowest_idle_cpu = i;
- } else if (idle->exit_latency == min_exit_latency &&
- idle->idle_stamp > latest_idle_timestamp) {
- /*
- * If the CPU is in the same
- * idle state, choose the more
- * recent one as it might have
- * a warmer cache
- */
- latest_idle_timestamp = idle->idle_stamp;
- shallowest_idle_cpu = i;
- }
- } else if (rq->idle_stamp > latest_idle_timestamp) {
- /*
- * If no active idle state, then the
- * most recent idled CPU might have a
- * warmer cache
- */
+ for_each_cpu(i, cpus) {
+
+ struct rq *rq = cpu_rq(i);
+ struct cpuidle_state *state = idle_get_state(rq);
+
+ if (!state) {
+
+ if (rq->idle_stamp > latest_idle_timestamp) {
latest_idle_timestamp = rq->idle_stamp;
- shallowest_idle_cpu = i;
- }
- } else if (shallowest_idle_cpu == -1) {
- load = weighted_cpuload(i);
- if (load < min_load || (load == min_load && i == this_cpu)) {
- min_load = load;
- least_loaded_cpu = i;
+ cpu = i;
}
+
+ continue;
+ }
+
+ if (state->exit_latency < min_exit_latency) {
+ /*
+ * We give priority to a CPU whose idle state
+ * has the smallest exit latency irrespective
+ * of any idle timestamp.
+ */
+ min_exit_latency = state->exit_latency;
+ cpu = i;
+
+ continue;
+ }
+
+ if (state->exit_latency == min_exit_latency &&
+ state->idle_stamp > latest_idle_timestamp) {
+ /*
+ * If the CPU is in the same idle state,
+ * choose the more recent one as it might have
+ * a warmer cache
+ */
+ cpu = i;
+ }
+ }
+
+ return cpu;
+}
+
+static int find_least_loaded_cpu(struct cpumask *cpus)
+{
+ int i, cpu, this_cpu;
+ int min_load = ULONG_MAX, load = weighted_cpuload(cpu);
+
+ cpu = this_cpu = smp_processor_id();
+
+ for_each_cpu(i, cpus) {
+ if (load < min_load || (load == min_load && i == this_cpu)) {
+ min_load = load;
+ cpu = i;
}
}
- return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
+ return cpu;
+}
+
+/*
+ * find_idlest_cpu - find the idlest cpu among the cpus in group.
+ */
+static int
+find_idlest_cpu(struct sched_group *group, struct task_struct *p)
+{
+ struct cpumask tmp, idle_cpus, running_cpus;
+
+ cpumask_and(&tmp, sched_group_cpus(group), tsk_cpus_allowed(p));
+
+ cpumask_and(&idle_cpus, &idle_cpus_mask, &tmp);
+
+ cpumask_andnot(&running_cpus, &idle_cpus_mask, &tmp);
+
+ return !cpumask_empty(&idle_cpus) ?
+ find_shallowest_idle_cpu(&idle_cpus) :
+ find_least_loaded_cpu(&running_cpus);
}
/*
@@ -4887,7 +4909,7 @@ select_task_rq_fair(struct task_struct *p, int
prev_cpu, int sd_flag, int wake_f
continue;
}
- new_cpu = find_idlest_cpu(group, p, cpu);
+ new_cpu = find_idlest_cpu(group, p);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
@@ -217,6 +217,8 @@ use_default:
*/
static void cpu_idle_loop(void)
{
+ int cpu = smp_processor_id();
+
while (1) {
/*
* If the arch has a polling bit, we maintain an invariant:
@@ -230,6 +232,8 @@ static void cpu_idle_loop(void)
__current_set_polling();
tick_nohz_idle_enter();
+ cpumask_set_cpu(cpu, &idle_cpus_mask);
+
while (!need_resched()) {
check_pgt_cache();
rmb();
@@ -257,6 +261,8 @@ static void cpu_idle_loop(void)
arch_cpu_idle_exit();
}
+ cpumask_clear_cpu(cpu, &idle_cpus_mask);
+
/*
* Since we fell out of the loop above, we know
* TIF_NEED_RESCHED must be set, propagate it into
@@ -544,6 +544,8 @@ struct root_domain {
extern struct root_domain def_root_domain;
+extern struct cpumask idle_cpus_mask;
+
#endif /* CONFIG_SMP */
/*