@@ -760,6 +760,30 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
+#ifdef CONFIG_SCHED_SMT
+static inline int arch_cpu_smt_flags(int cpu)
+{
+ int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
+
+ if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
+ printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
+ flags |= SD_ASYM_PACKING;
+ }
+
+ return flags;
+}
+#endif
+
+static struct sched_domain_topology_info topology_info[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, arch_cpu_smt_flags, SD_NAME(SIBLING) },
+#endif
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, cpu_coregroup_flags, SD_NAME(MC) },
+#endif
+ { cpu_cpu_mask, SD_NAME(CPU) },
+};
+
void __init smp_cpus_done(unsigned int max_cpus)
{
cpumask_var_t old_mask;
@@ -784,15 +808,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
dump_numa_cpu_topology();
-}
-
-int arch_sd_sibling_asym_packing(void)
-{
- if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
- printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
- return SD_ASYM_PACKING;
- }
- return 0;
+ set_sd_topology_info(topology_info, ARRAY_SIZE(topology_info));
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -5233,11 +5233,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
}
-int __weak arch_sd_sibling_asym_packing(void)
-{
- return 0*SD_ASYM_PACKING;
-}
-
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -5662,14 +5657,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
if (sd->flags & SD_SHARE_CPUPOWER) {
sd->imbalance_pct = 110;
sd->smt_gain = 1178; /* ~15% */
-
- /*
- * Call SMT specific arch topology function.
- * This goes away once the powerpc arch uses
- * the new interface for scheduler domain
- * setup.
- */
- sd->flags |= arch_sd_sibling_asym_packing();
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
sd->cache_nice_tries = 1;
sd->busy_idx = 2;