@@ -9176,6 +9176,63 @@ static void update_sg_lb_stats_scores(struct sg_lb_ipcc_stats *sgcs,
sgs->ipcc_score_before = before;
}
+/**
+ * sched_asym_ipcc_prefer - Select a sched group based on its IPCC score
+ * @a: Load balancing statistics of @sg_a
+ * @b: Load balancing statistics of @sg_b
+ *
+ * Returns: true if preferring @a has a higher IPCC score than @b after
+ * balancing load. Returns false otherwise.
+ */
+static bool sched_asym_ipcc_prefer(struct sg_lb_stats *a,
+ struct sg_lb_stats *b)
+{
+ if (!sched_ipcc_enabled())
+ return false;
+
+ /* @a increases overall throughput after load balance. */
+ if (a->ipcc_score_after > b->ipcc_score_after)
+ return true;
+
+ /*
+ * If @a and @b yield the same overall throughput, pick @a if
+ * its current throughput is lower than that of @b.
+ */
+ if (a->ipcc_score_after == b->ipcc_score_after)
+ return a->ipcc_score_before < b->ipcc_score_before;
+
+ return false;
+}
+
+/**
+ * sched_asym_ipcc_pick - Select a sched group based on its IPCC score
+ * @a: A scheduling group
+ * @b: A second scheduling group
+ * @a_stats: Load balancing statistics of @a
+ * @b_stats: Load balancing statistics of @b
+ *
+ * Returns: true if @a has the same priority and @a has tasks with IPCC classes
+ * that yield higher overall throughput after load balance.
+ * Returns false otherwise.
+ */
+static bool sched_asym_ipcc_pick(struct sched_group *a,
+ struct sched_group *b,
+ struct sg_lb_stats *a_stats,
+ struct sg_lb_stats *b_stats)
+{
+ /*
+ * Only use the class-specific preference selection if both sched
+ * groups have the same priority. We are not looking at a specific
+ * CPU. We do not care about the idle state of the groups'
+ * preferred CPU.
+ */
+ if (arch_asym_cpu_priority(a->asym_prefer_cpu, false) !=
+ arch_asym_cpu_priority(b->asym_prefer_cpu, false))
+ return false;
+
+ return sched_asym_ipcc_prefer(a_stats, b_stats);
+}
+
#else /* CONFIG_IPC_CLASSES */
static void update_sg_lb_ipcc_stats(struct sg_lb_ipcc_stats *sgcs,
struct rq *rq)
@@ -9193,6 +9250,14 @@ static void update_sg_lb_stats_scores(struct sg_lb_ipcc_stats *sgcs,
{
}
+static bool sched_asym_ipcc_pick(struct sched_group *a,
+ struct sched_group *b,
+ struct sg_lb_stats *a_stats,
+ struct sg_lb_stats *b_stats)
+{
+ return false;
+}
+
#endif /* CONFIG_IPC_CLASSES */
/**
@@ -9452,6 +9517,16 @@ static bool update_sd_pick_busiest(struct lb_env *env,
sds->busiest->asym_prefer_cpu,
false))
return false;
+
+ /*
+ * Unlike other callers of sched_asym_prefer(), here both @sg
+ * and @sds::busiest have tasks running. When they have equal
+ * priority, their IPC class scores can be used to select a
+ * better busiest.
+ */
+ if (sched_asym_ipcc_pick(sds->busiest, sg, &sds->busiest_stat, sgs))
+ return false;
+
break;
case group_misfit_task:
As it iterates, update_sd_pick_busiest() keeps on selecting as busiest sched groups of identical priority. Since both groups have the same priority, either group is a good choice. The IPCC score of the tasks placed a sched group can break this tie. Pick as busiest the sched group that yields a higher IPCC score after load balancing. Cc: Ben Segall <bsegall@google.com> Cc: Daniel Bristot de Oliveira <bristot@redhat.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Joel Fernandes (Google) <joel@joelfernandes.org> Cc: Len Brown <len.brown@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Tim C. Chen <tim.c.chen@intel.com> Cc: Valentin Schneider <vschneid@redhat.com> Cc: x86@kernel.org Cc: linux-pm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> --- Changes since v1: * Added a comment to clarify why sched_asym_prefer() needs a tie breaker only in update_sd_pick_busiest(). (PeterZ) * Renamed functions for accuracy: sched_asym_class_prefer() >> sched_asym_ipcc_prefer() sched_asym_class_pick() >> sched_asym_ipcc_pick() * Reworded commit message for clarity. --- kernel/sched/fair.c | 75 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+)