WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Fix CPU hotplug after percpu data handlin

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Fix CPU hotplug after percpu data handling changes.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 19 May 2010 05:16:17 -0700
Delivery-date: Wed, 19 May 2010 05:24:44 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1274264126 -3600
# Node ID abc9f8e809e555b00d16a94fa4a1635803591fc9
# Parent  f67ae6f9d4107f091d062fc1501a96f873671d10
Fix CPU hotplug after percpu data handling changes.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/ia64/linux-xen/smpboot.c |    2 
 xen/arch/ia64/xen/xensetup.c      |   10 ---
 xen/arch/x86/setup.c              |   13 ----
 xen/arch/x86/smpboot.c            |    3 
 xen/common/domain.c               |    6 -
 xen/common/sched_sedf.c           |    2 
 xen/common/schedule.c             |  116 ++++++++++++++++++++++----------------
 xen/include/xen/domain.h          |    1 
 xen/include/xen/sched-if.h        |    4 -
 9 files changed, 73 insertions(+), 84 deletions(-)

diff -r f67ae6f9d410 -r abc9f8e809e5 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Wed May 19 08:22:06 2010 +0100
+++ b/xen/arch/ia64/linux-xen/smpboot.c Wed May 19 11:15:26 2010 +0100
@@ -526,7 +526,7 @@ do_rest:
 #else
        struct vcpu *v;
 
-       v = alloc_idle_vcpu(cpu);
+       v = idle_vcpu[cpu];
        BUG_ON(v == NULL);
 
        //printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v);
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Wed May 19 08:22:06 2010 +0100
+++ b/xen/arch/ia64/xen/xensetup.c      Wed May 19 11:15:26 2010 +0100
@@ -341,7 +341,6 @@ void __init start_kernel(void)
     unsigned long dom0_memory_start, dom0_memory_size;
     unsigned long dom0_initrd_start, dom0_initrd_size;
     unsigned long md_end, relo_start, relo_end, relo_size = 0;
-    struct domain *idle_domain;
     struct vcpu *dom0_vcpu0;
     efi_memory_desc_t *kern_md, *last_md, *md;
     unsigned long xenheap_phys_end;
@@ -560,15 +559,8 @@ skip_move:
 
     late_setup_arch(&cmdline);
 
+    idle_vcpu[0] = (struct vcpu*) ia64_r13;
     scheduler_init();
-    idle_vcpu[0] = (struct vcpu*) ia64_r13;
-    idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
-    if ( idle_domain == NULL )
-        BUG();
-    idle_domain->vcpu = idle_vcpu;
-    idle_domain->max_vcpus = NR_CPUS;
-    if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
-        BUG();
 
     alloc_dom_xen_and_dom_io();
     setup_per_cpu_areas();
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Wed May 19 08:22:06 2010 +0100
+++ b/xen/arch/x86/setup.c      Wed May 19 11:15:26 2010 +0100
@@ -189,22 +189,9 @@ extern char __init_begin[], __init_end[]
 
 static void __init init_idle_domain(void)
 {
-    struct domain *idle_domain;
-
-    /* Domain creation requires that scheduler structures are initialised. */
     scheduler_init();
-
-    idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
-    if ( idle_domain == NULL )
-        BUG();
-    idle_domain->vcpu = idle_vcpu;
-    idle_domain->max_vcpus = NR_CPUS;
-    if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
-        BUG();
-
     set_current(idle_vcpu[0]);
     this_cpu(curr_vcpu) = current;
-
     setup_idle_pagetable();
 }
 
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Wed May 19 08:22:06 2010 +0100
+++ b/xen/arch/x86/smpboot.c    Wed May 19 11:15:26 2010 +0100
@@ -678,9 +678,6 @@ static int cpu_smpboot_alloc(unsigned in
     struct page_info *page;
 #endif
 
-    if ( alloc_idle_vcpu(cpu) == NULL )
-        goto oom;
-
     stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER, 0);
     if ( stack_base[cpu] == NULL )
         goto oom;
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/common/domain.c
--- a/xen/common/domain.c       Wed May 19 08:22:06 2010 +0100
+++ b/xen/common/domain.c       Wed May 19 11:15:26 2010 +0100
@@ -194,12 +194,6 @@ struct vcpu *alloc_vcpu(
     return v;
 }
 
-struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
-{
-    return idle_vcpu[cpu_id] ?: alloc_vcpu(idle_vcpu[0]->domain,
-                                           cpu_id, cpu_id);
-}
-
 static unsigned int __read_mostly extra_dom0_irqs = 256;
 static unsigned int __read_mostly extra_domU_irqs = 32;
 static void __init parse_extra_guest_irqs(const char *s)
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Wed May 19 08:22:06 2010 +0100
+++ b/xen/common/sched_sedf.c   Wed May 19 11:15:26 2010 +0100
@@ -123,7 +123,7 @@ struct sedf_cpu_info {
 #define RUNQ(cpu)      (&CPU_INFO(cpu)->runnableq)
 #define WAITQ(cpu)     (&CPU_INFO(cpu)->waitq)
 #define EXTRAQ(cpu,i)  (&(CPU_INFO(cpu)->extraq[i]))
-#define IDLETASK(cpu)  ((struct vcpu *)per_cpu(schedule_data, cpu).idle)
+#define IDLETASK(cpu)  (idle_vcpu[cpu])
 
 #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
 
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/common/schedule.c
--- a/xen/common/schedule.c     Wed May 19 08:22:06 2010 +0100
+++ b/xen/common/schedule.c     Wed May 19 11:15:26 2010 +0100
@@ -211,27 +211,14 @@ int sched_init_vcpu(struct vcpu *v, unsi
     if ( is_idle_domain(d) )
     {
         per_cpu(schedule_data, v->processor).curr = v;
-        per_cpu(schedule_data, v->processor).idle = v;
         v->is_running = 1;
     }
 
     TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
-
-    if ( unlikely(per_cpu(schedule_data, v->processor).sched_priv == NULL)
-         && (DOM2OP(d)->alloc_pdata != NULL) )
-    {
-        per_cpu(schedule_data, v->processor).sched_priv =
-            SCHED_OP(DOM2OP(d), alloc_pdata, processor);
-        if ( per_cpu(schedule_data, v->processor).sched_priv == NULL )
-            return 1;
-    }
 
     v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
     if ( v->sched_priv == NULL )
         return 1;
-
-    if ( is_idle_domain(d) )
-        per_cpu(schedule_data, v->processor).sched_idlevpriv = v->sched_priv;
 
     return 0;
 }
@@ -1090,39 +1077,73 @@ const struct scheduler *scheduler_get_by
     return NULL;
 }
 
-static int cpu_callback(
+static int cpu_schedule_up(unsigned int cpu)
+{
+    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+
+    per_cpu(scheduler, cpu) = &ops;
+    spin_lock_init(&sd->_lock);
+    sd->schedule_lock = &sd->_lock;
+    sd->curr = idle_vcpu[cpu];
+    init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
+    atomic_set(&sd->urgent_count, 0);
+
+    /* Boot CPU is dealt with later in schedule_init(). */
+    if ( cpu == 0 )
+        return 0;
+
+    if ( idle_vcpu[cpu] == NULL )
+        alloc_vcpu(idle_vcpu[0]->domain, cpu, cpu);
+    if ( idle_vcpu[cpu] == NULL )
+        return -ENOMEM;
+
+    if ( (ops.alloc_pdata != NULL) &&
+         ((sd->sched_priv = ops.alloc_pdata(&ops, cpu)) == NULL) )
+        return -ENOMEM;
+
+    return 0;
+}
+
+static void cpu_schedule_down(unsigned int cpu)
+{
+    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+
+    if ( sd->sched_priv != NULL )
+        SCHED_OP(&ops, free_pdata, sd->sched_priv, cpu);
+
+    kill_timer(&sd->s_timer);
+}
+
+static int cpu_schedule_callback(
     struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
     unsigned int cpu = (unsigned long)hcpu;
+    int rc = 0;
 
     switch ( action )
     {
     case CPU_UP_PREPARE:
-        per_cpu(scheduler, cpu) = &ops;
-        spin_lock_init(&per_cpu(schedule_data, cpu)._lock);
-        per_cpu(schedule_data, cpu).schedule_lock
-            = &per_cpu(schedule_data, cpu)._lock;
-        init_timer(&per_cpu(schedule_data, cpu).s_timer,
-                   s_timer_fn, NULL, cpu);
+        rc = cpu_schedule_up(cpu);
         break;
+    case CPU_UP_CANCELED:
     case CPU_DEAD:
-        kill_timer(&per_cpu(schedule_data, cpu).s_timer);
+        cpu_schedule_down(cpu);
         break;
     default:
         break;
     }
 
-    return NOTIFY_DONE;
-}
-
-static struct notifier_block cpu_nfb = {
-    .notifier_call = cpu_callback
+    return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
+}
+
+static struct notifier_block cpu_schedule_nfb = {
+    .notifier_call = cpu_schedule_callback
 };
 
 /* Initialise the data structures. */
 void __init scheduler_init(void)
 {
-    void *hcpu = (void *)(long)smp_processor_id();
+    struct domain *idle_domain;
     int i;
 
     open_softirq(SCHEDULE_SOFTIRQ, schedule);
@@ -1140,53 +1161,54 @@ void __init scheduler_init(void)
         ops = *schedulers[0];
     }
 
-    cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
-    register_cpu_notifier(&cpu_nfb);
+    if ( cpu_schedule_up(0) )
+        BUG();
+    register_cpu_notifier(&cpu_schedule_nfb);
 
     printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
     if ( SCHED_OP(&ops, init) )
         panic("scheduler returned error on init\n");
+
+    idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
+    BUG_ON(idle_domain == NULL);
+    idle_domain->vcpu = idle_vcpu;
+    idle_domain->max_vcpus = NR_CPUS;
+    if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
+        BUG();
+    if ( ops.alloc_pdata &&
+         !(this_cpu(schedule_data).sched_priv = ops.alloc_pdata(&ops, 0)) )
+        BUG();
 }
 
 void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 {
     unsigned long flags;
-    struct vcpu *v;
-    void *ppriv, *ppriv_old, *vpriv = NULL;
+    struct vcpu *idle;
+    void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
 
     if ( old_ops == new_ops )
         return;
 
-    v = per_cpu(schedule_data, cpu).idle;
+    idle = idle_vcpu[cpu];
     ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
-    if ( c != NULL )
-        vpriv = SCHED_OP(new_ops, alloc_vdata, v, v->domain->sched_priv);
+    vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv);
 
     spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags);
 
-    if ( c == NULL )
-    {
-        vpriv = v->sched_priv;
-        v->sched_priv = per_cpu(schedule_data, cpu).sched_idlevpriv;
-    }
-    else
-    {
-        v->sched_priv = vpriv;
-        vpriv = NULL;
-    }
     SCHED_OP(old_ops, tick_suspend, cpu);
+    vpriv_old = idle->sched_priv;
+    idle->sched_priv = vpriv;
     per_cpu(scheduler, cpu) = new_ops;
     ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
     per_cpu(schedule_data, cpu).sched_priv = ppriv;
     SCHED_OP(new_ops, tick_resume, cpu);
-    SCHED_OP(new_ops, insert_vcpu, v);
+    SCHED_OP(new_ops, insert_vcpu, idle);
 
     spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags);
 
-    if ( vpriv != NULL )
-        SCHED_OP(old_ops, free_vdata, vpriv);
+    SCHED_OP(old_ops, free_vdata, vpriv);
     SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
 }
 
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/include/xen/domain.h
--- a/xen/include/xen/domain.h  Wed May 19 08:22:06 2010 +0100
+++ b/xen/include/xen/domain.h  Wed May 19 11:15:26 2010 +0100
@@ -14,7 +14,6 @@ struct vcpu *alloc_vcpu(
     struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
 int boot_vcpu(
     struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
-struct vcpu *alloc_idle_vcpu(unsigned int cpu_id);
 struct vcpu *alloc_dom0_vcpu0(void);
 void vcpu_reset(struct vcpu *v);
 
diff -r f67ae6f9d410 -r abc9f8e809e5 xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h        Wed May 19 08:22:06 2010 +0100
+++ b/xen/include/xen/sched-if.h        Wed May 19 11:15:26 2010 +0100
@@ -30,12 +30,10 @@ struct schedule_data {
     spinlock_t         *schedule_lock,
                        _lock;
     struct vcpu        *curr;           /* current task                    */
-    struct vcpu        *idle;           /* idle task for this cpu          */
     void               *sched_priv;
-    void               *sched_idlevpriv; /* default scheduler vcpu data    */
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
-} __cacheline_aligned;
+};
 
 DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Fix CPU hotplug after percpu data handling changes., Xen patchbot-unstable <=