[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 10/11] xen/hvmlite: Boot secondary CPUs



HVMlite secondary VCPUs use baremetal bringup path (i.e. native_*
smp_ops) but need to do some preparation in PV code.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 arch/x86/xen/enlighten.c |    2 +
 arch/x86/xen/pmu.c       |    4 +-
 arch/x86/xen/smp.c       |   64 +++++++++++++++++++++++++++++++++------------
 3 files changed, 51 insertions(+), 19 deletions(-)

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 1409de6..76fb0b2 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1933,6 +1933,8 @@ static void __init xen_hvm_guest_init(void)
                xen_have_vector_callback = 1;
        xen_hvm_smp_init();
        register_cpu_notifier(&xen_hvm_cpu_notifier);
+       if (xen_hvmlite)
+               smp_found_config = 1;
        xen_unplug_emulated_devices();
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 724a087..7bc209b 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -518,7 +518,7 @@ void xen_pmu_init(int cpu)
 
        BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
 
-       if (xen_hvm_domain())
+       if (xen_hvm_domain() && !xen_hvmlite)
                return;
 
        xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
@@ -556,7 +556,7 @@ void xen_pmu_finish(int cpu)
 {
        struct xen_pmu_params xp;
 
-       if (xen_hvm_domain())
+       if (xen_hvm_domain() && !xen_hvmlite)
                return;
 
        xp.vcpu = cpu;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index fb085ef..a22cae2 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -348,26 +348,31 @@ static void __init xen_smp_prepare_cpus(unsigned int 
max_cpus)
        }
        xen_init_lock_cpu(0);
 
-       smp_store_boot_cpu_info();
-       cpu_data(0).x86_max_cores = 1;
+       if (!xen_hvmlite) {
+               smp_store_boot_cpu_info();
+               cpu_data(0).x86_max_cores = 1;
+
+               for_each_possible_cpu(i) {
+                       zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i),
+                                          GFP_KERNEL);
+                       zalloc_cpumask_var(&per_cpu(cpu_core_map, i),
+                                          GFP_KERNEL);
+                       zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i),
+                                          GFP_KERNEL);
+               }
+               set_cpu_sibling_map(0);
 
-       for_each_possible_cpu(i) {
-               zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
-               zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
-               zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
+               if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
+                       panic("could not allocate xen_cpu_initialized_map\n");
+
+               cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
        }
-       set_cpu_sibling_map(0);
 
        xen_pmu_init(0);
 
        if (xen_smp_intr_init(0))
                BUG();
 
-       if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
-               panic("could not allocate xen_cpu_initialized_map\n");
-
-       cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
-
        /* Restrict the possible_map according to max_cpus. */
        while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
                for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
@@ -375,8 +380,11 @@ static void __init xen_smp_prepare_cpus(unsigned int 
max_cpus)
                set_cpu_possible(cpu, false);
        }
 
-       for_each_possible_cpu(cpu)
+       for_each_possible_cpu(cpu) {
                set_cpu_present(cpu, true);
+               if (xen_hvmlite)
+                       physid_set(cpu, phys_cpu_present_map);
+       }
 }
 
 static int
@@ -810,10 +818,15 @@ void __init xen_smp_init(void)
 
 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
 {
+       if (xen_hvmlite)
+               xen_smp_prepare_cpus(max_cpus);
+
        native_smp_prepare_cpus(max_cpus);
-       WARN_ON(xen_smp_intr_init(0));
 
-       xen_init_lock_cpu(0);
+       if (!xen_hvmlite) {
+               WARN_ON(xen_smp_intr_init(0));
+               xen_init_lock_cpu(0);
+       }
 }
 
 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
@@ -836,8 +849,21 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct 
task_struct *tidle)
        */
        rc = xen_smp_intr_init(cpu);
        WARN_ON(rc);
-       if (!rc)
-               rc =  native_cpu_up(cpu, tidle);
+
+       if (xen_hvmlite) {
+               rc = cpu_initialize_context(cpu, tidle);
+               if (rc) {
+                       xen_smp_intr_free(cpu);
+                       return rc;
+               }
+               xen_pmu_init(cpu);
+       }
+
+       if (!rc) {
+               rc = native_cpu_up(cpu, tidle);
+               if (rc && xen_hvmlite)
+                       xen_pmu_finish(cpu);
+       }
 
        /*
         * We must initialize the slowpath CPU kicker _after_ the native
@@ -861,4 +887,8 @@ void __init xen_hvm_smp_init(void)
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
        smp_ops.send_call_func_single_ipi = 
xen_smp_send_call_function_single_ipi;
        smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
+       if (xen_hvmlite) {
+               smp_ops.play_dead = xen_play_dead;
+               xen_fill_possible_map();
+       }
 }
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.