[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/7] xen: rework locking for dump of scheduler info (debug-key r)



such as it is taken care of by the various schedulers, rather
than happening in schedule.c. In fact, it is the schedulers
that know better which locks are necessary for the specific
dumping operations.

While there, fix a few style issues (indentation, trailing
whitespace, parentheses and blank line after var declarations)

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Meng Xu <xumengpanda@xxxxxxxxx>
Cc: Jan Beulich <JBeulich@xxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
---
 xen/common/sched_credit.c  |   42 ++++++++++++++++++++++++++++++++++++++++--
 xen/common/sched_credit2.c |   40 ++++++++++++++++++++++++++++++++--------
 xen/common/sched_rt.c      |    7 +++++--
 xen/common/schedule.c      |    5 ++---
 4 files changed, 79 insertions(+), 15 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index bec67ff..953ecb0 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -26,6 +26,23 @@
 
 
 /*
+ * Locking:
+ * - Scheduler-lock (a.k.a. runqueue lock):
+ *  + is per-runqueue, and there is one runqueue per-cpu;
+ *  + serializes all runqueue manipulation operations;
+ * - Private data lock (a.k.a. private scheduler lock):
+ *  + serializes accesses to the scheduler global state (weight,
+ *    credit, balance_credit, etc);
+ *  + serializes updates to the domains' scheduling parameters.
+ *
+ * Ordering is "private lock always comes first":
+ *  + if we need both locks, we must acquire the private
+ *    scheduler lock for first;
+ *  + if we already own a runqueue lock, we must never acquire
+ *    the private scheduler lock.
+ */
+
+/*
  * Basic constants
  */
 #define CSCHED_DEFAULT_WEIGHT       256
@@ -1750,11 +1767,24 @@ static void
 csched_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct list_head *runq, *iter;
+    struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_pcpu *spc;
     struct csched_vcpu *svc;
+    spinlock_t *lock = lock;
+    unsigned long flags;
     int loop;
 #define cpustr keyhandler_scratch
 
+    /*
+     * We need both locks:
+     * - csched_dump_vcpu() wants to access domains' scheduling
+     *   parameters, which are protected by the private scheduler lock;
+     * - we scan through the runqueue, so we need the proper runqueue
+     *   lock (the one of the runqueue of this cpu).
+     */
+    spin_lock_irqsave(&prv->lock, flags);
+    lock = pcpu_schedule_lock(cpu);
+
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
@@ -1781,6 +1811,9 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
             csched_dump_vcpu(svc);
         }
     }
+
+    pcpu_schedule_unlock(lock, cpu);
+    spin_unlock_irqrestore(&prv->lock, flags);
 #undef cpustr
 }
 
@@ -1792,7 +1825,7 @@ csched_dump(const struct scheduler *ops)
     int loop;
     unsigned long flags;
 
-    spin_lock_irqsave(&(prv->lock), flags);
+    spin_lock_irqsave(&prv->lock, flags);
 
 #define idlers_buf keyhandler_scratch
 
@@ -1835,15 +1868,20 @@ csched_dump(const struct scheduler *ops)
         list_for_each( iter_svc, &sdom->active_vcpu )
         {
             struct csched_vcpu *svc;
+            spinlock_t *lock;
+
             svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem);
+            lock = vcpu_schedule_lock(svc->vcpu);
 
             printk("\t%3d: ", ++loop);
             csched_dump_vcpu(svc);
+
+            vcpu_schedule_unlock(lock, svc->vcpu);
         }
     }
 #undef idlers_buf
 
-    spin_unlock_irqrestore(&(prv->lock), flags);
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static int
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index f0e2c82..564f890 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -51,8 +51,6 @@
  * credit2 wiki page:
  *  http://wiki.xen.org/wiki/Credit2_Scheduler_Development
  * TODO:
- * + Immediate bug-fixes
- *  - Do per-runqueue, grab proper lock for dump debugkey
  * + Multiple sockets
  *  - Detect cpu layout and make runqueue map, one per L2 (make_runq_map())
  *  - Simple load balancer / runqueue assignment
@@ -1800,12 +1798,24 @@ csched2_dump_vcpu(struct csched2_vcpu *svc)
 static void
 csched2_dump_pcpu(const struct scheduler *ops, int cpu)
 {
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
     struct list_head *runq, *iter;
     struct csched2_vcpu *svc;
+    unsigned long flags;
+    spinlock_t *lock;
     int loop;
     char cpustr[100];
 
-    /* FIXME: Do locking properly for access to runqueue structures */
+    /*
+     * We need both locks:
+     * - csched2_dump_vcpu() wants to access domains' scheduling
+     *   parameters, which are protected by the private scheduler lock;
+     * - we scan through the runqueue, so we need the proper runqueue
+     *   lock (the one of the runqueue this cpu is associated to).
+     */
+    spin_lock_irqsave(&prv->lock, flags);
+    lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spin_lock(lock);
 
     runq = &RQD(ops, cpu)->runq;
 
@@ -1832,6 +1842,9 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
             csched2_dump_vcpu(svc);
         }
     }
+
+    spin_unlock(lock);
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static void
@@ -1839,8 +1852,13 @@ csched2_dump(const struct scheduler *ops)
 {
     struct list_head *iter_sdom, *iter_svc;
     struct csched2_private *prv = CSCHED2_PRIV(ops);
+    unsigned long flags;
     int i, loop;
 
+    /* We need the private lock as we access global scheduler data
+     * and (below) the list of active domains. */
+    spin_lock_irqsave(&prv->lock, flags);
+
     printk("Active queues: %d\n"
            "\tdefault-weight     = %d\n",
            cpumask_weight(&prv->active_queues),
@@ -1863,7 +1881,6 @@ csched2_dump(const struct scheduler *ops)
                fraction);
 
     }
-    /* FIXME: Locking! */
 
     printk("Domain info:\n");
     loop = 0;
@@ -1872,20 +1889,27 @@ csched2_dump(const struct scheduler *ops)
         struct csched2_dom *sdom;
         sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
 
-       printk("\tDomain: %d w %d v %d\n\t", 
-              sdom->dom->domain_id, 
-              sdom->weight, 
-              sdom->nr_vcpus);
+        printk("\tDomain: %d w %d v %d\n\t",
+               sdom->dom->domain_id,
+               sdom->weight,
+               sdom->nr_vcpus);
 
         list_for_each( iter_svc, &sdom->vcpu )
         {
             struct csched2_vcpu *svc;
+            spinlock_t *lock;
+
             svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem);
+            lock = vcpu_schedule_lock(svc->vcpu);
 
             printk("\t%3d: ", ++loop);
             csched2_dump_vcpu(svc);
+
+            vcpu_schedule_unlock(lock, svc->vcpu);
         }
     }
+
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static void activate_runqueue(struct csched2_private *prv, int rqi)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 3e37e62..167a484 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -253,9 +253,12 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_vcpu *svc)
 static void
 rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
-    struct rt_vcpu *svc = rt_vcpu(curr_on_cpu(cpu));
+    struct rt_private *prv = rt_priv(ops);
+    unsigned long flags;
 
-    rt_dump_vcpu(ops, svc);
+    spin_lock_irqsave(&prv->lock, flags);
+    rt_dump_vcpu(ops, rt_vcpu(curr_on_cpu(cpu)));
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static void
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index ef79847..30eec0f 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1515,6 +1515,8 @@ void schedule_dump(struct cpupool *c)
     struct scheduler *sched;
     cpumask_t        *cpus;
 
+    /* Locking, if necessary, must be handled withing each scheduler */
+
     sched = (c == NULL) ? &ops : c->sched;
     cpus = cpupool_scheduler_cpumask(c);
     printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
@@ -1522,11 +1524,8 @@ void schedule_dump(struct cpupool *c)
 
     for_each_cpu (i, cpus)
     {
-        spinlock_t *lock = pcpu_schedule_lock(i);
-
         printk("CPU[%02d] ", i);
         SCHED_OP(sched, dump_cpu_state, i);
-        pcpu_schedule_unlock(lock, i);
     }
 }
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.