@@ -57,12 +57,18 @@ struct target_cache {
struct node_cache_attrs cache_attrs;
};
+enum {
+ NODE_ACCESS_CLASS_0 = 0,
+ NODE_ACCESS_CLASS_1,
+ NODE_ACCESS_CLASS_MAX,
+};
+
struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
struct resource memregions;
- struct node_hmem_attrs hmem_attrs[2];
+ struct node_hmem_attrs hmem_attrs[NODE_ACCESS_CLASS_MAX];
struct list_head caches;
struct node_cache_attrs cache_attrs;
bool registered;
@@ -338,10 +344,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
if (mem_hier == ACPI_HMAT_MEMORY) {
target = find_mem_target(targs[targ]);
if (target && target->processor_pxm == inits[init]) {
- hmat_update_target_access(target, type, value, 0);
+ hmat_update_target_access(target, type, value,
+ NODE_ACCESS_CLASS_0);
/* If the node has a CPU, update access 1 */
if (node_state(pxm_to_node(inits[init]), N_CPU))
- hmat_update_target_access(target, type, value, 1);
+ hmat_update_target_access(target, type, value,
+ NODE_ACCESS_CLASS_1);
}
}
}
@@ -600,10 +608,12 @@ static void hmat_register_target_initiators(struct memory_target *target)
*/
if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid,
+ NODE_ACCESS_CLASS_0);
access0done = true;
if (node_state(cpu_nid, N_CPU)) {
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid,
+ NODE_ACCESS_CLASS_1);
return;
}
}
@@ -644,12 +654,13 @@ static void hmat_register_target_initiators(struct memory_target *target)
}
if (best)
hmat_update_target_access(target, loc->hmat_loc->data_type,
- best, 0);
+ best, NODE_ACCESS_CLASS_0);
}
for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
cpu_nid = pxm_to_node(i);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid,
+ NODE_ACCESS_CLASS_0);
}
}
@@ -681,11 +692,13 @@ static void hmat_register_target_initiators(struct memory_target *target)
clear_bit(initiator->processor_pxm, p_nodes);
}
if (best)
- hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1);
+ hmat_update_target_access(target, loc->hmat_loc->data_type, best,
+ NODE_ACCESS_CLASS_1);
}
for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
cpu_nid = pxm_to_node(i);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid,
+ NODE_ACCESS_CLASS_1);
}
}
@@ -746,8 +759,8 @@ static void hmat_register_target(struct memory_target *target)
if (!target->registered) {
hmat_register_target_initiators(target);
hmat_register_target_cache(target);
- hmat_register_target_perf(target, 0);
- hmat_register_target_perf(target, 1);
+ hmat_register_target_perf(target, NODE_ACCESS_CLASS_0);
+ hmat_register_target_perf(target, NODE_ACCESS_CLASS_1);
target->registered = true;
}
mutex_unlock(&target_lock);
Create enums to provide named indexing for the hmem_attrs array. This is in preparation for adding generic port support which will add a third member in the array to keep the generic port attributes separate from the memory attributes. Signed-off-by: Dave Jiang <dave.jiang@intel.com> --- drivers/acpi/numa/hmat.c | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-)