@@ -111,7 +111,7 @@ static void __init parse_dt_topology(void)
continue;
}
- if (parse_cpu_capacity(cn, cpu)) {
+ if (topology_parse_cpu_capacity(cn, cpu)) {
of_node_put(cn);
continue;
}
@@ -160,7 +160,7 @@ static void __init parse_dt_topology(void)
>> (SCHED_CAPACITY_SHIFT-1)) + 1;
if (cap_from_dt)
- normalize_cpu_capacity();
+ topology_normalize_cpu_scale();
}
/*
@@ -173,10 +173,10 @@ static void update_cpu_capacity(unsigned int cpu)
if (!cpu_capacity(cpu) || cap_from_dt)
return;
- set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+ topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
pr_info("CPU%u: update cpu_capacity %lu\n",
- cpu, arch_scale_cpu_capacity(NULL, cpu));
+ cpu, topology_get_cpu_scale(NULL, cpu));
}
#else
@@ -39,7 +39,7 @@ static int __init get_cpu_for_node(struct device_node *node)
for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) {
- parse_cpu_capacity(cpu_node, cpu);
+ topology_parse_cpu_capacity(cpu_node, cpu);
of_node_put(cpu_node);
return cpu;
}
@@ -191,7 +191,7 @@ static int __init parse_dt_topology(void)
if (ret != 0)
goto out_map;
- normalize_cpu_capacity();
+ topology_normalize_cpu_scale();
/*
* Check that all cores are in the topology; the SMP code will
@@ -25,12 +25,12 @@
static DEFINE_MUTEX(cpu_scale_mutex);
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
{
return per_cpu(cpu_scale, cpu);
}
-void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
per_cpu(cpu_scale, cpu) = capacity;
}
@@ -42,7 +42,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%lu\n",
- arch_scale_cpu_capacity(NULL, cpu->dev.id));
+ topology_get_cpu_scale(NULL, cpu->dev.id));
}
static ssize_t cpu_capacity_store(struct device *dev,
@@ -67,7 +67,7 @@ static ssize_t cpu_capacity_store(struct device *dev,
mutex_lock(&cpu_scale_mutex);
for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
- set_capacity_scale(i, new_capacity);
+ topology_set_cpu_scale(i, new_capacity);
mutex_unlock(&cpu_scale_mutex);
return count;
@@ -98,7 +98,7 @@ static u32 capacity_scale;
static u32 *raw_capacity;
static bool cap_parsing_failed;
-void normalize_cpu_capacity(void)
+void topology_normalize_cpu_scale(void)
{
u64 capacity;
int cpu;
@@ -113,14 +113,14 @@ void normalize_cpu_capacity(void)
cpu, raw_capacity[cpu]);
capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
/ capacity_scale;
- set_capacity_scale(cpu, capacity);
+ topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
- cpu, arch_scale_cpu_capacity(NULL, cpu));
+ cpu, topology_get_cpu_scale(NULL, cpu));
}
mutex_unlock(&cpu_scale_mutex);
}
-int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
int ret = 1;
u32 cpu_capacity;
@@ -185,12 +185,12 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpus_to_visit,
policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
+ raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
if (cpumask_empty(cpus_to_visit)) {
- normalize_cpu_capacity();
+ topology_normalize_cpu_scale();
kfree(raw_capacity);
pr_debug("cpu_capacity: parsing done\n");
cap_parsing_done = true;
@@ -4,14 +4,14 @@
#ifndef _LINUX_ARCH_TOPOLOGY_H_
#define _LINUX_ARCH_TOPOLOGY_H_
-void normalize_cpu_capacity(void);
+void topology_normalize_cpu_scale(void);
struct device_node;
-int parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
struct sched_domain;
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu);
+unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
-void set_capacity_scale(unsigned int cpu, unsigned long capacity);
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */