[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v2 23/25] ARM: NUMA: Initialize ACPI NUMA



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>

Call ACPI NUMA initialization under CONFIG_ACPI_NUMA.

Signed-off-by: Vijaya Kumar <Vijaya.Kumar@xxxxxxxxxx>
---
 xen/arch/arm/numa/acpi_numa.c | 28 +++++++++++++++++++++++++++-
 xen/arch/arm/numa/numa.c      |  6 ++++++
 xen/common/numa.c             | 14 ++++++++++++++
 xen/include/asm-arm/numa.h    |  1 +
 xen/include/xen/numa.h        |  1 +
 5 files changed, 49 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/numa/acpi_numa.c b/xen/arch/arm/numa/acpi_numa.c
index 8f51ed0..574ed45 100644
--- a/xen/arch/arm/numa/acpi_numa.c
+++ b/xen/arch/arm/numa/acpi_numa.c
@@ -29,6 +29,7 @@
 #include <asm/acpi.h>
 
 extern nodemask_t processor_nodes_parsed;
+extern nodemask_t memory_nodes_parsed;
 
 /* Holds CPUID to MPIDR mapping read from MADT table. */
 struct cpuid_to_hwid {
@@ -183,7 +184,7 @@ acpi_numa_gicc_affinity_init(const struct 
acpi_srat_gicc_affinity *pa)
            pxm, mpidr, node);
 }
 
-void __init acpi_map_uid_to_mpidr(void)
+static void __init acpi_map_uid_to_mpidr(void)
 {
     acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
                     acpi_parse_madt_handler, NR_CPUS);
@@ -211,6 +212,31 @@ void __init arch_table_parse_srat(void)
                           acpi_parse_gicc_affinity, NR_CPUS);
 }
 
+bool_t __init arch_acpi_numa_init(void)
+{
+    int ret;
+
+    if ( !acpi_disabled )
+    {
+        /*
+         * If firmware has DT, process_memory_node() call
+         * would have added memory blocks. So reset it before
+         * ACPI numa init.
+         */
+        numa_clear_memblks();
+        nodes_clear(memory_nodes_parsed);
+        acpi_map_uid_to_mpidr();
+        ret = acpi_numa_init();
+        if ( ret || srat_disabled() )
+            return 1;
+
+        /* Register acpi node_distance handler */
+        register_node_distance(&acpi_node_distance);
+    }
+
+    return 0;
+}
+
 void __init acpi_numa_arch_fixup(void) {}
 
 /*
diff --git a/xen/arch/arm/numa/numa.c b/xen/arch/arm/numa/numa.c
index 958085c..b5556c6 100644
--- a/xen/arch/arm/numa/numa.c
+++ b/xen/arch/arm/numa/numa.c
@@ -152,12 +152,18 @@ void __init numa_init(void)
     if ( is_numa_off() )
         goto no_numa;
 
+#ifdef CONFIG_ACPI_NUMA
+    ret = arch_acpi_numa_init();
+    if ( ret )
+        printk(XENLOG_WARNING "ACPI NUMA init failed\n");
+#else
     if ( !dt_numa )
         goto no_numa;
 
     ret = dt_numa_init();
     if ( ret )
         printk(XENLOG_WARNING "DT NUMA init failed\n");
+#endif
 
     for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
     {
diff --git a/xen/common/numa.c b/xen/common/numa.c
index f2ac726..aca2386 100644
--- a/xen/common/numa.c
+++ b/xen/common/numa.c
@@ -84,6 +84,20 @@ nodeid_t get_memblk_nodeid(int id)
     return memblk_nodeid[id];
 }
 
+void __init numa_clear_memblks(void)
+{
+    unsigned int i;
+
+    for ( i = 0; i < get_num_node_memblks(); i++ )
+    {
+        node_memblk_range[i].start = 0;
+        node_memblk_range[i].end = 0;
+        memblk_nodeid[i] = NUMA_NO_NODE;
+    }
+
+    num_node_memblks = 0;
+}
+
 int __init get_mem_nodeid(paddr_t start, paddr_t end)
 {
     unsigned int i;
diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h
index 1d4dc98..f932ba3 100644
--- a/xen/include/asm-arm/numa.h
+++ b/xen/include/asm-arm/numa.h
@@ -24,6 +24,7 @@ static inline nodeid_t acpi_get_nodeid(uint64_t hwid)
 
 #ifdef CONFIG_NUMA
 extern void numa_init(void);
+extern bool_t arch_acpi_numa_init(void);
 extern int dt_numa_init(void);
 extern void numa_set_cpu_node(int cpu, unsigned int nid);
 extern void numa_add_cpu(int cpu);
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index c3b4adc..6c885bd 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -59,4 +59,5 @@ void set_acpi_numa(bool val);
 int get_numa_fake(void);
 extern int numa_emulation(uint64_t start_pfn, uint64_t end_pfn);
 extern void numa_dummy_init(uint64_t start_pfn, uint64_t end_pfn);
+extern void numa_clear_memblks(void);
 #endif /* _XEN_NUMA_H */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.