[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/9] xen: sched: make locking for {insert, remove}_vcpu consistent



The insert_vcpu() scheduler hook is called with an
inconsistent locking strategy. In fact, it is sometimes
invoked while holding the runqueue lock and sometimes
when that is not the case.

In other words, some call sites seems to imply that
locking should be handled in the callers, in schedule.c
--e.g., in schedule_cpu_switch(), which acquires the
runqueue lock before calling the hook; others that
specific schedulers should be responsible for locking
themselves --e.g., in sched_move_domain(), which does
not acquire any lock for calling the hook.

The right thing to do seems to always defer locking to
the specific schedulers, as it's them that know what, how
and when it is best to lock (as in: runqueue locks, vs.
private scheduler locks, vs. both, etc.)

This patch, therefore:
 - removes any locking around insert_vcpu() from
   generic code (schedule.c);
 - add the _proper_ locking in the hook implementations,
   depending on the scheduler (for instance, credit2
   does that already, credit1 and RTDS need to grab
   the runqueue lock while manipulating runqueues).

In case of credit1, remove_vcpu() handling needs some
fixing remove_vcpu() too, i.e.:
 - it manipulates runqueues, so the runqueue lock must
   be acquired;
 - *_lock_irq() is enough, there is no need to do
   _irqsave()

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Meng Xu <mengxu@xxxxxxxxxxxxx>
---
 xen/common/sched_credit.c |   24 +++++++++++++++++++-----
 xen/common/sched_rt.c     |   10 +++++++++-
 xen/common/schedule.c     |    6 ------
 3 files changed, 28 insertions(+), 12 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index a1945ac..557efaa 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -905,8 +905,19 @@ csched_vcpu_insert(const struct scheduler *ops, struct 
vcpu *vc)
 {
     struct csched_vcpu *svc = vc->sched_priv;
 
-    if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
-        __runq_insert(vc->processor, svc);
+    /*
+     * For the idle domain, this is called, during boot, before
+     * than alloc_pdata() has been called for the pCPU.
+     */
+    if ( !is_idle_vcpu(vc) )
+    {
+        spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+
+        if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
+            __runq_insert(vc->processor, svc);
+
+        vcpu_schedule_unlock_irq(lock, vc);
+    }
 }
 
 static void
@@ -925,7 +936,7 @@ csched_vcpu_remove(const struct scheduler *ops, struct vcpu 
*vc)
     struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_vcpu * const svc = CSCHED_VCPU(vc);
     struct csched_dom * const sdom = svc->sdom;
-    unsigned long flags;
+    spinlock_t *lock;
 
     SCHED_STAT_CRANK(vcpu_destroy);
 
@@ -935,15 +946,18 @@ csched_vcpu_remove(const struct scheduler *ops, struct 
vcpu *vc)
         vcpu_unpause(svc->vcpu);
     }
 
+    lock = vcpu_schedule_lock_irq(vc);
+
     if ( __vcpu_on_runq(svc) )
         __runq_remove(svc);
 
-    spin_lock_irqsave(&(prv->lock), flags);
+    spin_lock(&(prv->lock));
 
     if ( !list_empty(&svc->active_vcpu_elem) )
         __csched_vcpu_acct_stop_locked(prv, svc);
 
-    spin_unlock_irqrestore(&(prv->lock), flags);
+    spin_unlock(&(prv->lock));
+    vcpu_schedule_unlock_irq(lock, vc);
 
     BUG_ON( sdom == NULL );
     BUG_ON( !list_empty(&svc->runq_elem) );
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 4372486..89f29b2 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -621,18 +621,26 @@ static void
 rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
 {
     struct rt_vcpu *svc = rt_vcpu(vc);
+    spinlock_t *lock;
     s_time_t now = NOW();
 
-    /* not addlocate idle vcpu to dom vcpu list */
+    /*
+     * For the idle domain, this is called, during boot, before
+     * than alloc_pdata() has been called for the pCPU.
+     */
     if ( is_idle_vcpu(vc) )
         return;
 
+    lock = vcpu_schedule_lock_irq(vc);
+
     if ( now >= svc->cur_deadline )
         rt_update_deadline(now, svc);
 
     if ( !__vcpu_on_q(svc) && vcpu_runnable(vc) && !vc->is_running )
         __runq_insert(ops, svc);
 
+    vcpu_schedule_unlock_irq(lock, vc);
+
     /* add rt_vcpu svc to scheduler-specific vcpu list of the dom */
     list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
 }
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 3eefed7..4a89222 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1488,9 +1488,7 @@ void __init scheduler_init(void)
 
 int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 {
-    unsigned long flags;
     struct vcpu *idle;
-    spinlock_t *lock;
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
@@ -1509,8 +1507,6 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
         return -ENOMEM;
     }
 
-    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
-
     SCHED_OP(old_ops, tick_suspend, cpu);
     vpriv_old = idle->sched_priv;
     idle->sched_priv = vpriv;
@@ -1520,8 +1516,6 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     SCHED_OP(new_ops, tick_resume, cpu);
     SCHED_OP(new_ops, insert_vcpu, idle);
 
-    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
-
     SCHED_OP(old_ops, free_vdata, vpriv_old);
     SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.