commit cbabd44e171d0bd2169f1c7100e69a9e48289980 Author: Dario Faggioli Date: Tue Apr 26 18:56:56 2016 +0200 xen: sched: avoid spuriously re-enabling IRQs in csched2_switch_sched() interrupts are already disabled when calling the hook (from schedule_cpu_switch()), so we must use spin_lock() and spin_unlock(). Add an ASSERT(), so we will notice if this code and its caller get out of sync with respect to disabling interrupts (and add one at the same exact occurrence of this pattern in Credit1 too) Signed-off-by: Dario Faggioli --- Cc: George Dunlap Cc: Wei Liu -- Changes from v1: * add the ASSERT(), as requested by George * add the ASSERT in Credit1 too -- For Wei: - the Credit2 spin_lock_irq()-->spin_lock() change needs to go in, as it fixes a bug; - adding the ASSERT was requested during review; - adding the ASSERT in Credit1 is not strictly necessary, but imptoves code quality and consistency at zero cost and risk, so I think we should just go for it now, instead of waitign for 4.8 (it's basically like I'm adding a comment!). diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index db4d42a..a38a63d 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -615,6 +615,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu, * schedule_cpu_switch()). It actually may or may not be the 'right' * one for this cpu, but that is ok for preventing races. */ + ASSERT(!local_irq_is_enabled()); spin_lock(&prv->lock); init_pdata(prv, pdata, cpu); spin_unlock(&prv->lock); diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index f3b62ac..f95e509 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -2238,7 +2238,8 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, * And owning exactly that one (the lock of the old scheduler of this * cpu) is what is necessary to prevent races. */ - spin_lock_irq(&prv->lock); + ASSERT(!local_irq_is_enabled()); + spin_lock(&prv->lock); idle_vcpu[cpu]->sched_priv = vdata; @@ -2263,7 +2264,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, smp_mb(); per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock; - spin_unlock_irq(&prv->lock); + spin_unlock(&prv->lock); } static void