[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] credit2: use unique names


  • To: xen-devel@xxxxxxxxxxxxx, george.dunlap@xxxxxxxxxxxxx
  • From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
  • Date: Wed, 30 Apr 2014 13:35:30 +0200
  • Cc: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
  • Delivery-date: Wed, 30 Apr 2014 11:36:12 +0000
  • Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer; b=ljNgLez90VxxrDYdB5xJZVqB9iAz47vnXjYpZVfM03J55rGxaLGHNJfW DrGxRdJNXnR8z+G6JgG+5rj9Jbo/3HFZPoBGKOerkHaKs/RyF0cO0V5fY HvoCm8y+GgWp75NjaOm12+rMbQCIE97xquXmQrRqN7CU/RUB2kRjtfkV3 M3ZJBmiNfcFTClg+iea1ZszWCpXHJJOEmrfYREH2iCNWFX4iZTGwXerCp coSLoUzeIivrelis2HbyGXQp5E1m3;
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

Avoid name duplicated with the credit scheduler. This makes live easier when
debugging with tools like cscope or crash.

Signed-off-by: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
---
 xen/common/sched_credit2.c |  474 ++++++++++++++++++++++----------------------
 1 file changed, 237 insertions(+), 237 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 97f4049..1ca521b 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3,7 +3,7 @@
  * (C) 2009 - George Dunlap - Citrix Systems R&D UK, Ltd
  ****************************************************************************
  *
- *        File: common/csched_credit2.c
+ *        File: common/sched_credit2.c
  *      Author: George Dunlap
  *
  * Description: Credit-based SMP CPU scheduler
@@ -108,29 +108,29 @@
  * Basic constants
  */
 /* Default weight: How much a new domain starts with */
-#define CSCHED_DEFAULT_WEIGHT       256
+#define CSCHED2_DEFAULT_WEIGHT       256
 /* Min timer: Minimum length a timer will be set, to
  * achieve efficiency */
-#define CSCHED_MIN_TIMER            MICROSECS(500)
+#define CSCHED2_MIN_TIMER            MICROSECS(500)
 /* Amount of credit VMs begin with, and are reset to.
  * ATM, set so that highest-weight VMs can only run for 10ms
  * before a reset event. */
-#define CSCHED_CREDIT_INIT          MILLISECS(10)
+#define CSCHED2_CREDIT_INIT          MILLISECS(10)
 /* Carryover: How much "extra" credit may be carried over after
  * a reset. */
-#define CSCHED_CARRYOVER_MAX        CSCHED_MIN_TIMER
+#define CSCHED2_CARRYOVER_MAX        CSCHED2_MIN_TIMER
 /* Stickiness: Cross-L2 migration resistance.  Should be less than
  * MIN_TIMER. */
-#define CSCHED_MIGRATE_RESIST       ((opt_migrate_resist)*MICROSECS(1))
+#define CSCHED2_MIGRATE_RESIST       ((opt_migrate_resist)*MICROSECS(1))
 /* How much to "compensate" a vcpu for L2 migration */
-#define CSCHED_MIGRATE_COMPENSATION MICROSECS(50)
+#define CSCHED2_MIGRATE_COMPENSATION MICROSECS(50)
 /* Reset: Value below which credit will be reset. */
-#define CSCHED_CREDIT_RESET         0
+#define CSCHED2_CREDIT_RESET         0
 /* Max timer: Maximum time a guest can be run for. */
-#define CSCHED_MAX_TIMER            MILLISECS(2)
+#define CSCHED2_MAX_TIMER            MILLISECS(2)
 
 
-#define CSCHED_IDLE_CREDIT                 (-(1<<30))
+#define CSCHED2_IDLE_CREDIT                 (-(1<<30))
 
 /*
  * Flags
@@ -138,8 +138,8 @@
 /* CSFLAG_scheduled: Is this vcpu either running on, or context-switching off,
  * a physical cpu?
  * + Accessed only with runqueue lock held
- * + Set when chosen as next in csched_schedule().
- * + Cleared after context switch has been saved in csched_context_saved()
+ * + Set when chosen as next in csched2_schedule().
+ * + Cleared after context switch has been saved in csched2_context_saved()
  * + Checked in vcpu_wake to see if we can add to the runqueue, or if we should
  *   set CSFLAG_delayed_runq_add
  * + Checked to be false in runq_insert.
@@ -148,9 +148,9 @@
 #define CSFLAG_scheduled (1<<__CSFLAG_scheduled)
 /* CSFLAG_delayed_runq_add: Do we need to add this to the runqueue once it'd 
done
  * being context switched out?
- * + Set when scheduling out in csched_schedule() if prev is runnable
- * + Set in csched_vcpu_wake if it finds CSFLAG_scheduled set
- * + Read in csched_context_saved().  If set, it adds prev to the runqueue and
+ * + Set when scheduling out in csched2_schedule() if prev is runnable
+ * + Set in csched2_vcpu_wake if it finds CSFLAG_scheduled set
+ * + Read in csched2_context_saved().  If set, it adds prev to the runqueue and
  *   clears the bit.
  */
 #define __CSFLAG_delayed_runq_add 2
@@ -169,14 +169,14 @@ integer_param("sched_credit2_migrate_resist", 
opt_migrate_resist);
 /*
  * Useful macros
  */
-#define CSCHED_PRIV(_ops)   \
-    ((struct csched_private *)((_ops)->sched_data))
-#define CSCHED_VCPU(_vcpu)  ((struct csched_vcpu *) (_vcpu)->sched_priv)
-#define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
+#define CSCHED2_PRIV(_ops)   \
+    ((struct csched2_private *)((_ops)->sched_data))
+#define CSCHED2_VCPU(_vcpu)  ((struct csched2_vcpu *) (_vcpu)->sched_priv)
+#define CSCHED2_DOM(_dom)    ((struct csched2_dom *) (_dom)->sched_priv)
 /* CPU to runq_id macro */
-#define c2r(_ops, _cpu)     (CSCHED_PRIV(_ops)->runq_map[(_cpu)])
+#define c2r(_ops, _cpu)     (CSCHED2_PRIV(_ops)->runq_map[(_cpu)])
 /* CPU to runqueue struct macro */
-#define RQD(_ops, _cpu)     (&CSCHED_PRIV(_ops)->rqd[c2r(_ops, _cpu)])
+#define RQD(_ops, _cpu)     (&CSCHED2_PRIV(_ops)->rqd[c2r(_ops, _cpu)])
 
 /*
  * Shifts for load average.
@@ -197,7 +197,7 @@ integer_param("credit2_balance_over", 
opt_overload_balance_tolerance);
 /*
  * Per-runqueue data
  */
-struct csched_runqueue_data {
+struct csched2_runqueue_data {
     int id;
 
     spinlock_t lock;      /* Lock for this runqueue. */
@@ -218,7 +218,7 @@ struct csched_runqueue_data {
 /*
  * System-wide private data
  */
-struct csched_private {
+struct csched2_private {
     spinlock_t lock;
     cpumask_t initialized; /* CPU is initialized for this pool */
     
@@ -226,7 +226,7 @@ struct csched_private {
 
     int runq_map[NR_CPUS];
     cpumask_t active_queues; /* Queues which may have active cpus */
-    struct csched_runqueue_data rqd[NR_CPUS];
+    struct csched2_runqueue_data rqd[NR_CPUS];
 
     int load_window_shift;
 };
@@ -234,14 +234,14 @@ struct csched_private {
 /*
  * Virtual CPU
  */
-struct csched_vcpu {
+struct csched2_vcpu {
     struct list_head rqd_elem;  /* On the runqueue data list */
     struct list_head sdom_elem; /* On the domain vcpu list */
     struct list_head runq_elem; /* On the runqueue         */
-    struct csched_runqueue_data *rqd; /* Up-pointer to the runqueue */
+    struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue */
 
     /* Up-pointers */
-    struct csched_dom *sdom;
+    struct csched2_dom *sdom;
     struct vcpu *vcpu;
 
     unsigned int weight;
@@ -255,13 +255,13 @@ struct csched_vcpu {
     s_time_t load_last_update;  /* Last time average was updated */
     s_time_t avgload;           /* Decaying queue load */
 
-    struct csched_runqueue_data *migrate_rqd; /* Pre-determined rqd to which 
to migrate */
+    struct csched2_runqueue_data *migrate_rqd; /* Pre-determined rqd to which 
to migrate */
 };
 
 /*
  * Domain
  */
-struct csched_dom {
+struct csched2_dom {
     struct list_head vcpu;
     struct list_head sdom_elem;
     struct domain *dom;
@@ -278,8 +278,8 @@ struct csched_dom {
  *
  * FIXME: Do pre-calculated division?
  */
-static void t2c_update(struct csched_runqueue_data *rqd, s_time_t time,
-                          struct csched_vcpu *svc)
+static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
+                          struct csched2_vcpu *svc)
 {
     uint64_t val = time * rqd->max_weight + svc->residual;
 
@@ -287,7 +287,7 @@ static void t2c_update(struct csched_runqueue_data *rqd, 
s_time_t time,
     svc->credit -= val;
 }
 
-static s_time_t c2t(struct csched_runqueue_data *rqd, s_time_t credit, struct 
csched_vcpu *svc)
+static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct 
csched2_vcpu *svc)
 {
     return credit * svc->weight / rqd->max_weight;
 }
@@ -297,22 +297,22 @@ static s_time_t c2t(struct csched_runqueue_data *rqd, 
s_time_t credit, struct cs
  */
 
 static /*inline*/ int
-__vcpu_on_runq(struct csched_vcpu *svc)
+__vcpu_on_runq(struct csched2_vcpu *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
 
-static /*inline*/ struct csched_vcpu *
+static /*inline*/ struct csched2_vcpu *
 __runq_elem(struct list_head *elem)
 {
-    return list_entry(elem, struct csched_vcpu, runq_elem);
+    return list_entry(elem, struct csched2_vcpu, runq_elem);
 }
 
 static void
 __update_runq_load(const struct scheduler *ops,
-                  struct csched_runqueue_data *rqd, int change, s_time_t now)
+                  struct csched2_runqueue_data *rqd, int change, s_time_t now)
 {
-    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
     s_time_t delta=-1;
 
     now >>= LOADAVG_GRANULARITY_SHIFT;
@@ -354,9 +354,9 @@ __update_runq_load(const struct scheduler *ops,
 
 static void
 __update_svc_load(const struct scheduler *ops,
-                  struct csched_vcpu *svc, int change, s_time_t now)
+                  struct csched2_vcpu *svc, int change, s_time_t now)
 {
-    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
     s_time_t delta=-1;
     int vcpu_load;
 
@@ -399,8 +399,8 @@ __update_svc_load(const struct scheduler *ops,
 
 static void
 update_load(const struct scheduler *ops,
-            struct csched_runqueue_data *rqd,
-            struct csched_vcpu *svc, int change, s_time_t now)
+            struct csched2_runqueue_data *rqd,
+            struct csched2_vcpu *svc, int change, s_time_t now)
 {
     __update_runq_load(ops, rqd, change, now);
     if ( svc )
@@ -408,7 +408,7 @@ update_load(const struct scheduler *ops,
 }
 
 static int
-__runq_insert(struct list_head *runq, struct csched_vcpu *svc)
+__runq_insert(struct list_head *runq, struct csched2_vcpu *svc)
 {
     struct list_head *iter;
     int pos = 0;
@@ -423,7 +423,7 @@ __runq_insert(struct list_head *runq, struct csched_vcpu 
*svc)
 
     list_for_each( iter, runq )
     {
-        struct csched_vcpu * iter_svc = __runq_elem(iter);
+        struct csched2_vcpu * iter_svc = __runq_elem(iter);
 
         if ( svc->credit > iter_svc->credit )
         {
@@ -439,7 +439,7 @@ __runq_insert(struct list_head *runq, struct csched_vcpu 
*svc)
 }
 
 static void
-runq_insert(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu 
*svc)
+runq_insert(const struct scheduler *ops, unsigned int cpu, struct csched2_vcpu 
*svc)
 {
     struct list_head * runq = &RQD(ops, cpu)->runq;
     int pos = 0;
@@ -468,24 +468,24 @@ runq_insert(const struct scheduler *ops, unsigned int 
cpu, struct csched_vcpu *s
 }
 
 static inline void
-__runq_remove(struct csched_vcpu *svc)
+__runq_remove(struct csched2_vcpu *svc)
 {
     BUG_ON( !__vcpu_on_runq(svc) );
     list_del_init(&svc->runq_elem);
 }
 
-void burn_credits(struct csched_runqueue_data *rqd, struct csched_vcpu *, 
s_time_t);
+void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, 
s_time_t);
 
 /* Check to see if the item on the runqueue is higher priority than what's
  * currently running; if so, wake up the processor */
 static /*inline*/ void
-runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu 
*new, s_time_t now)
+runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched2_vcpu 
*new, s_time_t now)
 {
     int i, ipid=-1;
     s_time_t lowest=(1<<30);
-    struct csched_runqueue_data *rqd = RQD(ops, cpu);
+    struct csched2_runqueue_data *rqd = RQD(ops, cpu);
     cpumask_t mask;
-    struct csched_vcpu * cur;
+    struct csched2_vcpu * cur;
 
     d2printk("rqt %pv curr %pv\n", new->vcpu, current);
 
@@ -493,7 +493,7 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, 
struct csched_vcpu *n
     BUG_ON(new->rqd != rqd);
 
     /* Look at the cpu it's running on first */
-    cur = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
+    cur = CSCHED2_VCPU(per_cpu(schedule_data, cpu).curr);
     burn_credits(rqd, cur, now);
 
     if ( cur->credit < new->credit )
@@ -520,13 +520,13 @@ runq_tickle(const struct scheduler *ops, unsigned int 
cpu, struct csched_vcpu *n
 
     for_each_cpu(i, &mask)
     {
-        struct csched_vcpu * cur;
+        struct csched2_vcpu * cur;
 
         /* Already looked at this one above */
         if ( i == cpu )
             continue;
 
-        cur = CSCHED_VCPU(per_cpu(schedule_data, i).curr);
+        cur = CSCHED2_VCPU(per_cpu(schedule_data, i).curr);
 
         BUG_ON(is_idle_vcpu(cur->vcpu));
 
@@ -555,7 +555,7 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, 
struct csched_vcpu *n
 
     /* Only switch to another processor if the credit difference is greater
      * than the migrate resistance */
-    if ( ipid == -1 || lowest + CSCHED_MIGRATE_RESIST > new->credit )
+    if ( ipid == -1 || lowest + CSCHED2_MIGRATE_RESIST > new->credit )
         goto no_tickle;
 
 tickle:
@@ -581,17 +581,17 @@ no_tickle:
  * Credit-related code
  */
 static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
-                         struct csched_vcpu *snext)
+                         struct csched2_vcpu *snext)
 {
-    struct csched_runqueue_data *rqd = RQD(ops, cpu);
+    struct csched2_runqueue_data *rqd = RQD(ops, cpu);
     struct list_head *iter;
     int m;
 
     /*
      * Under normal circumstances, snext->credit should never be less
-     * than -CSCHED_MIN_TIMER.  However, under some circumstances, a
+     * than -CSCHED2_MIN_TIMER.  However, under some circumstances, a
      * vcpu with low credits may be allowed to run long enough that
-     * its credits are actually less than -CSCHED_CREDIT_INIT.
+     * its credits are actually less than -CSCHED2_CREDIT_INIT.
      * (Instances have been observed, for example, where a vcpu with
      * 200us of credit was allowed to run for 11ms, giving it -10.8ms
      * of credit.  Thus it was still negative even after the reset.)
@@ -605,15 +605,15 @@ static void reset_credit(const struct scheduler *ops, int 
cpu, s_time_t now,
      * case.
      */
     m = 1;
-    if ( snext->credit < -CSCHED_CREDIT_INIT )
-        m += (-snext->credit) / CSCHED_CREDIT_INIT;
+    if ( snext->credit < -CSCHED2_CREDIT_INIT )
+        m += (-snext->credit) / CSCHED2_CREDIT_INIT;
 
     list_for_each( iter, &rqd->svc )
     {
-        struct csched_vcpu * svc;
+        struct csched2_vcpu * svc;
         int start_credit;
 
-        svc = list_entry(iter, struct csched_vcpu, rqd_elem);
+        svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
 
         BUG_ON( is_idle_vcpu(svc->vcpu) );
         BUG_ON( svc->rqd != rqd );
@@ -623,13 +623,13 @@ static void reset_credit(const struct scheduler *ops, int 
cpu, s_time_t now,
         /* And add INIT * m, avoiding integer multiplication in the
          * common case. */
         if ( likely(m==1) )
-            svc->credit += CSCHED_CREDIT_INIT;
+            svc->credit += CSCHED2_CREDIT_INIT;
         else
-            svc->credit += m * CSCHED_CREDIT_INIT;
+            svc->credit += m * CSCHED2_CREDIT_INIT;
 
         /* "Clip" credits to max carryover */
-        if ( svc->credit > CSCHED_CREDIT_INIT + CSCHED_CARRYOVER_MAX )
-            svc->credit = CSCHED_CREDIT_INIT + CSCHED_CARRYOVER_MAX;
+        if ( svc->credit > CSCHED2_CREDIT_INIT + CSCHED2_CARRYOVER_MAX )
+            svc->credit = CSCHED2_CREDIT_INIT + CSCHED2_CARRYOVER_MAX;
 
         svc->start_time = now;
 
@@ -653,16 +653,16 @@ static void reset_credit(const struct scheduler *ops, int 
cpu, s_time_t now,
     /* No need to resort runqueue, as everyone's order should be the same. */
 }
 
-void burn_credits(struct csched_runqueue_data *rqd, struct csched_vcpu *svc, 
s_time_t now)
+void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *svc, 
s_time_t now)
 {
     s_time_t delta;
 
     /* Assert svc is current */
-    ASSERT(svc==CSCHED_VCPU(per_cpu(schedule_data, 
svc->vcpu->processor).curr));
+    ASSERT(svc==CSCHED2_VCPU(per_cpu(schedule_data, 
svc->vcpu->processor).curr));
 
     if ( is_idle_vcpu(svc->vcpu) )
     {
-        BUG_ON(svc->credit != CSCHED_IDLE_CREDIT);
+        BUG_ON(svc->credit != CSCHED2_IDLE_CREDIT);
         return;
     }
 
@@ -696,7 +696,7 @@ void burn_credits(struct csched_runqueue_data *rqd, struct 
csched_vcpu *svc, s_t
 }
 
 /* Find the domain with the highest weight. */
-static void update_max_weight(struct csched_runqueue_data *rqd, int new_weight,
+static void update_max_weight(struct csched2_runqueue_data *rqd, int 
new_weight,
                               int old_weight)
 {
     /* Try to avoid brute-force search:
@@ -717,7 +717,7 @@ static void update_max_weight(struct csched_runqueue_data 
*rqd, int new_weight,
 
         list_for_each( iter, &rqd->svc )
         {
-            struct csched_vcpu * svc = list_entry(iter, struct csched_vcpu, 
rqd_elem);
+            struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, 
rqd_elem);
 
             if ( svc->weight > max_weight )
                 max_weight = svc->weight;
@@ -730,13 +730,13 @@ static void update_max_weight(struct csched_runqueue_data 
*rqd, int new_weight,
 
 #ifndef NDEBUG
 static /*inline*/ void
-__csched_vcpu_check(struct vcpu *vc)
+__csched2_vcpu_check(struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
-    struct csched_dom * const sdom = svc->sdom;
+    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
-    BUG_ON( sdom != CSCHED_DOM(vc->domain) );
+    BUG_ON( sdom != CSCHED2_DOM(vc->domain) );
     if ( sdom )
     {
         BUG_ON( is_idle_vcpu(vc) );
@@ -747,18 +747,18 @@ __csched_vcpu_check(struct vcpu *vc)
         BUG_ON( !is_idle_vcpu(vc) );
     }
 }
-#define CSCHED_VCPU_CHECK(_vc)  (__csched_vcpu_check(_vc))
+#define CSCHED2_VCPU_CHECK(_vc)  (__csched2_vcpu_check(_vc))
 #else
-#define CSCHED_VCPU_CHECK(_vc)
+#define CSCHED2_VCPU_CHECK(_vc)
 #endif
 
 static void *
-csched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
+csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
 {
-    struct csched_vcpu *svc;
+    struct csched2_vcpu *svc;
 
     /* Allocate per-VCPU info */
-    svc = xzalloc(struct csched_vcpu);
+    svc = xzalloc(struct csched2_vcpu);
     if ( svc == NULL )
         return NULL;
 
@@ -774,16 +774,16 @@ csched_alloc_vdata(const struct scheduler *ops, struct 
vcpu *vc, void *dd)
     {
         BUG_ON( svc->sdom == NULL );
 
-        svc->credit = CSCHED_CREDIT_INIT;
+        svc->credit = CSCHED2_CREDIT_INIT;
         svc->weight = svc->sdom->weight;
         /* Starting load of 50% */
-        svc->avgload = 1ULL << (CSCHED_PRIV(ops)->load_window_shift - 1);
+        svc->avgload = 1ULL << (CSCHED2_PRIV(ops)->load_window_shift - 1);
         svc->load_last_update = NOW();
     }
     else
     {
         BUG_ON( svc->sdom != NULL );
-        svc->credit = CSCHED_IDLE_CREDIT;
+        svc->credit = CSCHED2_IDLE_CREDIT;
         svc->weight = 0;
     }
 
@@ -794,7 +794,7 @@ csched_alloc_vdata(const struct scheduler *ops, struct vcpu 
*vc, void *dd)
 
 /* Add and remove from runqueue assignment (not active run queue) */
 static void
-__runq_assign(struct csched_vcpu *svc, struct csched_runqueue_data *rqd)
+__runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
 {
 
     svc->rqd = rqd;
@@ -824,7 +824,7 @@ __runq_assign(struct csched_vcpu *svc, struct 
csched_runqueue_data *rqd)
 static void
 runq_assign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_vcpu *svc = vc->sched_priv;
+    struct csched2_vcpu *svc = vc->sched_priv;
 
     BUG_ON(svc->rqd != NULL);
 
@@ -832,7 +832,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
 }
 
 static void
-__runq_deassign(struct csched_vcpu *svc)
+__runq_deassign(struct csched2_vcpu *svc)
 {
     BUG_ON(__vcpu_on_runq(svc));
     BUG_ON(test_bit(__CSFLAG_scheduled, &svc->flags));
@@ -849,7 +849,7 @@ __runq_deassign(struct csched_vcpu *svc)
 static void
 runq_deassign(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_vcpu *svc = vc->sched_priv;
+    struct csched2_vcpu *svc = vc->sched_priv;
 
     BUG_ON(svc->rqd != RQD(ops, vc->processor));
 
@@ -857,10 +857,10 @@ runq_deassign(const struct scheduler *ops, struct vcpu 
*vc)
 }
 
 static void
-csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
+csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_vcpu *svc = vc->sched_priv;
-    struct csched_dom * const sdom = svc->sdom;
+    struct csched2_vcpu *svc = vc->sched_priv;
+    struct csched2_dom * const sdom = svc->sdom;
 
     printk("%s: Inserting %pv\n", __func__, vc);
 
@@ -884,22 +884,22 @@ csched_vcpu_insert(const struct scheduler *ops, struct 
vcpu *vc)
         sdom->nr_vcpus++;
     }
 
-    CSCHED_VCPU_CHECK(vc);
+    CSCHED2_VCPU_CHECK(vc);
 }
 
 static void
-csched_free_vdata(const struct scheduler *ops, void *priv)
+csched2_free_vdata(const struct scheduler *ops, void *priv)
 {
-    struct csched_vcpu *svc = priv;
+    struct csched2_vcpu *svc = priv;
 
     xfree(svc);
 }
 
 static void
-csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
+csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
-    struct csched_dom * const sdom = svc->sdom;
+    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_dom * const sdom = svc->sdom;
 
     BUG_ON( sdom == NULL );
     BUG_ON( !list_empty(&svc->runq_elem) );
@@ -926,9 +926,9 @@ csched_vcpu_remove(const struct scheduler *ops, struct vcpu 
*vc)
 }
 
 static void
-csched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
+csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
 
     BUG_ON( is_idle_vcpu(vc) );
 
@@ -945,9 +945,9 @@ csched_vcpu_sleep(const struct scheduler *ops, struct vcpu 
*vc)
 }
 
 static void
-csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
+csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
     s_time_t now = 0;
 
     /* Schedule lock should be held at this point. */
@@ -998,9 +998,9 @@ out:
 }
 
 static void
-csched_context_saved(const struct scheduler *ops, struct vcpu *vc)
+csched2_context_saved(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
     s_time_t now = NOW();
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
 
@@ -1035,9 +1035,9 @@ csched_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
 static int
 choose_cpu(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
     int i, min_rqi = -1, new_cpu;
-    struct csched_vcpu *svc = CSCHED_VCPU(vc);
+    struct csched2_vcpu *svc = CSCHED2_VCPU(vc);
     s_time_t min_avgload;
 
     BUG_ON(cpumask_empty(&prv->active_queues));
@@ -1094,7 +1094,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
     /* Find the runqueue with the lowest instantaneous load */
     for_each_cpu(i, &prv->active_queues)
     {
-        struct csched_runqueue_data *rqd;
+        struct csched2_runqueue_data *rqd;
         s_time_t rqd_avgload;
 
         rqd = prv->rqd + i;
@@ -1143,15 +1143,15 @@ out_up:
 typedef struct {
     /* NB: Modified by consider() */
     s_time_t load_delta;
-    struct csched_vcpu * best_push_svc, *best_pull_svc;
+    struct csched2_vcpu * best_push_svc, *best_pull_svc;
     /* NB: Read by consider() */
-    struct csched_runqueue_data *lrqd;
-    struct csched_runqueue_data *orqd;                  
+    struct csched2_runqueue_data *lrqd;
+    struct csched2_runqueue_data *orqd;                  
 } balance_state_t;
 
 static void consider(balance_state_t *st, 
-                     struct csched_vcpu *push_svc,
-                     struct csched_vcpu *pull_svc)
+                     struct csched2_vcpu *push_svc,
+                     struct csched2_vcpu *pull_svc)
 {
     s_time_t l_load, o_load, delta;
 
@@ -1184,8 +1184,8 @@ static void consider(balance_state_t *st,
 
 
 static void migrate(const struct scheduler *ops,
-                    struct csched_vcpu *svc, 
-                    struct csched_runqueue_data *trqd, 
+                    struct csched2_vcpu *svc, 
+                    struct csched2_runqueue_data *trqd, 
                     s_time_t now)
 {
     if ( test_bit(__CSFLAG_scheduled, &svc->flags) )
@@ -1222,7 +1222,7 @@ static void migrate(const struct scheduler *ops,
 
 static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
 {
-    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
     int i, max_delta_rqi = -1;
     struct list_head *push_iter, *pull_iter;
 
@@ -1323,7 +1323,7 @@ retry:
     list_for_each( push_iter, &st.lrqd->svc )
     {
         int inner_load_updated = 0;
-        struct csched_vcpu * push_svc = list_entry(push_iter, struct 
csched_vcpu, rqd_elem);
+        struct csched2_vcpu * push_svc = list_entry(push_iter, struct 
csched2_vcpu, rqd_elem);
 
         __update_svc_load(ops, push_svc, 0, now);
 
@@ -1333,7 +1333,7 @@ retry:
 
         list_for_each( pull_iter, &st.orqd->svc )
         {
-            struct csched_vcpu * pull_svc = list_entry(pull_iter, struct 
csched_vcpu, rqd_elem);
+            struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct 
csched2_vcpu, rqd_elem);
             
             if ( ! inner_load_updated )
             {
@@ -1355,7 +1355,7 @@ retry:
 
     list_for_each( pull_iter, &st.orqd->svc )
     {
-        struct csched_vcpu * pull_svc = list_entry(pull_iter, struct 
csched_vcpu, rqd_elem);
+        struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct 
csched2_vcpu, rqd_elem);
         
         /* Skip this one if it's already been flagged to migrate */
         if ( test_bit(__CSFLAG_runq_migrate_request, &pull_svc->flags) )
@@ -1379,7 +1379,7 @@ out:
 }
 
 static int
-csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
+csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
 {
     int new_cpu;
 
@@ -1389,14 +1389,14 @@ csched_cpu_pick(const struct scheduler *ops, struct 
vcpu *vc)
 }
 
 static void
-csched_vcpu_migrate(
+csched2_vcpu_migrate(
     const struct scheduler *ops, struct vcpu *vc, unsigned int new_cpu)
 {
-    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
-    struct csched_runqueue_data *trqd;
+    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_runqueue_data *trqd;
 
     /* Check if new_cpu is valid */
-    BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED_PRIV(ops)->initialized));
+    BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED2_PRIV(ops)->initialized));
 
     trqd = RQD(ops, new_cpu);
 
@@ -1405,16 +1405,16 @@ csched_vcpu_migrate(
 }
 
 static int
-csched_dom_cntl(
+csched2_dom_cntl(
     const struct scheduler *ops,
     struct domain *d,
     struct xen_domctl_scheduler_op *op)
 {
-    struct csched_dom * const sdom = CSCHED_DOM(d);
-    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched2_dom * const sdom = CSCHED2_DOM(d);
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
     unsigned long flags;
 
-    /* Must hold csched_priv lock to read and update sdom,
+    /* Must hold csched2_priv lock to read and update sdom,
      * runq lock to update csvcs. */
     spin_lock_irqsave(&prv->lock, flags);
 
@@ -1438,10 +1438,10 @@ csched_dom_cntl(
             /* Update weights for vcpus, and max_weight for runqueues on which 
they reside */
             list_for_each ( iter, &sdom->vcpu )
             {
-                struct csched_vcpu *svc = list_entry(iter, struct csched_vcpu, 
sdom_elem);
+                struct csched2_vcpu *svc = list_entry(iter, struct 
csched2_vcpu, sdom_elem);
 
                 /* NB: Locking order is important here.  Because we grab this 
lock here, we
-                 * must never lock csched_priv.lock if we're holding a 
runqueue lock.
+                 * must never lock csched2_priv.lock if we're holding a 
runqueue lock.
                  * Also, calling vcpu_schedule_lock() is enough, since IRQs 
have already
                  * been disabled. */
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
@@ -1462,12 +1462,12 @@ csched_dom_cntl(
 }
 
 static void *
-csched_alloc_domdata(const struct scheduler *ops, struct domain *dom)
+csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom)
 {
-    struct csched_dom *sdom;
+    struct csched2_dom *sdom;
     unsigned long flags;
 
-    sdom = xzalloc(struct csched_dom);
+    sdom = xzalloc(struct csched2_dom);
     if ( sdom == NULL )
         return NULL;
 
@@ -1475,29 +1475,29 @@ csched_alloc_domdata(const struct scheduler *ops, 
struct domain *dom)
     INIT_LIST_HEAD(&sdom->vcpu);
     INIT_LIST_HEAD(&sdom->sdom_elem);
     sdom->dom = dom;
-    sdom->weight = CSCHED_DEFAULT_WEIGHT;
+    sdom->weight = CSCHED2_DEFAULT_WEIGHT;
     sdom->nr_vcpus = 0;
 
-    spin_lock_irqsave(&CSCHED_PRIV(ops)->lock, flags);
+    spin_lock_irqsave(&CSCHED2_PRIV(ops)->lock, flags);
 
-    list_add_tail(&sdom->sdom_elem, &CSCHED_PRIV(ops)->sdom);
+    list_add_tail(&sdom->sdom_elem, &CSCHED2_PRIV(ops)->sdom);
 
-    spin_unlock_irqrestore(&CSCHED_PRIV(ops)->lock, flags);
+    spin_unlock_irqrestore(&CSCHED2_PRIV(ops)->lock, flags);
 
     return (void *)sdom;
 }
 
 static int
-csched_dom_init(const struct scheduler *ops, struct domain *dom)
+csched2_dom_init(const struct scheduler *ops, struct domain *dom)
 {
-    struct csched_dom *sdom;
+    struct csched2_dom *sdom;
 
     printk("%s: Initializing domain %d\n", __func__, dom->domain_id);
 
     if ( is_idle_domain(dom) )
         return 0;
 
-    sdom = csched_alloc_domdata(ops, dom);
+    sdom = csched2_alloc_domdata(ops, dom);
     if ( sdom == NULL )
         return -ENOMEM;
 
@@ -1507,41 +1507,41 @@ csched_dom_init(const struct scheduler *ops, struct 
domain *dom)
 }
 
 static void
-csched_free_domdata(const struct scheduler *ops, void *data)
+csched2_free_domdata(const struct scheduler *ops, void *data)
 {
     unsigned long flags;
-    struct csched_dom *sdom = data;
+    struct csched2_dom *sdom = data;
 
-    spin_lock_irqsave(&CSCHED_PRIV(ops)->lock, flags);
+    spin_lock_irqsave(&CSCHED2_PRIV(ops)->lock, flags);
 
     list_del_init(&sdom->sdom_elem);
 
-    spin_unlock_irqrestore(&CSCHED_PRIV(ops)->lock, flags);
+    spin_unlock_irqrestore(&CSCHED2_PRIV(ops)->lock, flags);
 
     xfree(data);
 }
 
 static void
-csched_dom_destroy(const struct scheduler *ops, struct domain *dom)
+csched2_dom_destroy(const struct scheduler *ops, struct domain *dom)
 {
-    struct csched_dom *sdom = CSCHED_DOM(dom);
+    struct csched2_dom *sdom = CSCHED2_DOM(dom);
 
     BUG_ON(!list_empty(&sdom->vcpu));
 
-    csched_free_domdata(ops, CSCHED_DOM(dom));
+    csched2_free_domdata(ops, CSCHED2_DOM(dom));
 }
 
 /* How long should we let this vcpu run for? */
 static s_time_t
-csched_runtime(const struct scheduler *ops, int cpu, struct csched_vcpu *snext)
+csched2_runtime(const struct scheduler *ops, int cpu, struct csched2_vcpu 
*snext)
 {
     s_time_t time; 
     int rt_credit; /* Proposed runtime measured in credits */
-    struct csched_runqueue_data *rqd = RQD(ops, cpu);
+    struct csched2_runqueue_data *rqd = RQD(ops, cpu);
     struct list_head *runq = &rqd->runq;
 
     if ( is_idle_vcpu(snext->vcpu) )
-        return CSCHED_MAX_TIMER;
+        return CSCHED2_MAX_TIMER;
 
     /* General algorithm:
      * 1) Run until snext's credit will be 0
@@ -1557,7 +1557,7 @@ csched_runtime(const struct scheduler *ops, int cpu, 
struct csched_vcpu *snext)
      * run until your credit ~= his */
     if ( ! list_empty(runq) )
     {
-        struct csched_vcpu *swait = __runq_elem(runq->next);
+        struct csched2_vcpu *swait = __runq_elem(runq->next);
 
         if ( ! is_idle_vcpu(swait->vcpu)
              && swait->credit > 0 )
@@ -1569,7 +1569,7 @@ csched_runtime(const struct scheduler *ops, int cpu, 
struct csched_vcpu *snext)
     /* The next guy may actually have a higher credit, if we've tried to
      * avoid migrating him from a different cpu.  DTRT.  */
     if ( rt_credit <= 0 )
-        time = CSCHED_MIN_TIMER;
+        time = CSCHED2_MIN_TIMER;
     else
     {
         /* FIXME: See if we can eliminate this conversion if we know time
@@ -1579,10 +1579,10 @@ csched_runtime(const struct scheduler *ops, int cpu, 
struct csched_vcpu *snext)
         time = c2t(rqd, rt_credit, snext);
 
         /* Check limits */
-        if ( time < CSCHED_MIN_TIMER )
-            time = CSCHED_MIN_TIMER;
-        else if ( time > CSCHED_MAX_TIMER )
-            time = CSCHED_MAX_TIMER;
+        if ( time < CSCHED2_MIN_TIMER )
+            time = CSCHED2_MIN_TIMER;
+        else if ( time > CSCHED2_MAX_TIMER )
+            time = CSCHED2_MAX_TIMER;
     }
 
     return time;
@@ -1593,28 +1593,28 @@ void __dump_execstate(void *unused);
 /*
  * Find a candidate.
  */
-static struct csched_vcpu *
-runq_candidate(struct csched_runqueue_data *rqd,
-               struct csched_vcpu *scurr,
+static struct csched2_vcpu *
+runq_candidate(struct csched2_runqueue_data *rqd,
+               struct csched2_vcpu *scurr,
                int cpu, s_time_t now)
 {
     struct list_head *iter;
-    struct csched_vcpu *snext = NULL;
+    struct csched2_vcpu *snext = NULL;
 
     /* Default to current if runnable, idle otherwise */
     if ( vcpu_runnable(scurr->vcpu) )
         snext = scurr;
     else
-        snext = CSCHED_VCPU(idle_vcpu[cpu]);
+        snext = CSCHED2_VCPU(idle_vcpu[cpu]);
 
     list_for_each( iter, &rqd->runq )
     {
-        struct csched_vcpu * svc = list_entry(iter, struct csched_vcpu, 
runq_elem);
+        struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, 
runq_elem);
 
         /* If this is on a different processor, don't pull it unless
-         * its credit is at least CSCHED_MIGRATE_RESIST higher. */
+         * its credit is at least CSCHED2_MIGRATE_RESIST higher. */
         if ( svc->vcpu->processor != cpu
-             && snext->credit + CSCHED_MIGRATE_RESIST > svc->credit )
+             && snext->credit + CSCHED2_MIGRATE_RESIST > svc->credit )
             continue;
 
         /* If the next one on the list has more credit than current
@@ -1635,21 +1635,21 @@ runq_candidate(struct csched_runqueue_data *rqd,
  * fast for the common case.
  */
 static struct task_slice
-csched_schedule(
+csched2_schedule(
     const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled)
 {
     const int cpu = smp_processor_id();
-    struct csched_runqueue_data *rqd;
-    struct csched_vcpu * const scurr = CSCHED_VCPU(current);
-    struct csched_vcpu *snext = NULL;
+    struct csched2_runqueue_data *rqd;
+    struct csched2_vcpu * const scurr = CSCHED2_VCPU(current);
+    struct csched2_vcpu *snext = NULL;
     struct task_slice ret;
 
     SCHED_STAT_CRANK(schedule);
-    CSCHED_VCPU_CHECK(current);
+    CSCHED2_VCPU_CHECK(current);
 
     d2printk("sc p%d c %pv now %"PRI_stime"\n", cpu, scurr->vcpu, now);
 
-    BUG_ON(!cpumask_test_cpu(cpu, &CSCHED_PRIV(ops)->initialized));
+    BUG_ON(!cpumask_test_cpu(cpu, &CSCHED2_PRIV(ops)->initialized));
 
     rqd = RQD(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
@@ -1665,9 +1665,9 @@ csched_schedule(
         {
             int rq;
             other_rqi = -2;
-            for_each_cpu ( rq, &CSCHED_PRIV(ops)->active_queues )
+            for_each_cpu ( rq, &CSCHED2_PRIV(ops)->active_queues )
             {
-                if ( scurr->rqd == &CSCHED_PRIV(ops)->rqd[rq] )
+                if ( scurr->rqd == &CSCHED2_PRIV(ops)->rqd[rq] )
                 {
                     other_rqi = rq;
                     break;
@@ -1710,7 +1710,7 @@ csched_schedule(
     if ( tasklet_work_scheduled )
     {
         trace_var(TRC_CSCHED2_SCHED_TASKLET, 0, 0,  NULL);
-        snext = CSCHED_VCPU(idle_vcpu[cpu]);
+        snext = CSCHED2_VCPU(idle_vcpu[cpu]);
     }
     else
         snext=runq_candidate(rqd, scurr, cpu, now);
@@ -1743,7 +1743,7 @@ csched_schedule(
         }
 
         /* Check for the reset condition */
-        if ( snext->credit <= CSCHED_CREDIT_RESET )
+        if ( snext->credit <= CSCHED2_CREDIT_RESET )
         {
             reset_credit(ops, cpu, now, snext);
             balance_load(ops, cpu, now);
@@ -1758,7 +1758,7 @@ csched_schedule(
         /* Safe because lock for old processor is held */
         if ( snext->vcpu->processor != cpu )
         {
-            snext->credit += CSCHED_MIGRATE_COMPENSATION;
+            snext->credit += CSCHED2_MIGRATE_COMPENSATION;
             snext->vcpu->processor = cpu;
             ret.migrated = 1;
         }
@@ -1776,15 +1776,15 @@ csched_schedule(
     /*
      * Return task to run next...
      */
-    ret.time = csched_runtime(ops, cpu, snext);
+    ret.time = csched2_runtime(ops, cpu, snext);
     ret.task = snext->vcpu;
 
-    CSCHED_VCPU_CHECK(ret.task);
+    CSCHED2_VCPU_CHECK(ret.task);
     return ret;
 }
 
 static void
-csched_dump_vcpu(struct csched_vcpu *svc)
+csched2_dump_vcpu(struct csched2_vcpu *svc)
 {
     printk("[%i.%i] flags=%x cpu=%i",
             svc->vcpu->domain->domain_id,
@@ -1798,10 +1798,10 @@ csched_dump_vcpu(struct csched_vcpu *svc)
 }
 
 static void
-csched_dump_pcpu(const struct scheduler *ops, int cpu)
+csched2_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct list_head *runq, *iter;
-    struct csched_vcpu *svc;
+    struct csched2_vcpu *svc;
     int loop;
     char cpustr[100];
 
@@ -1815,11 +1815,11 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
     printk("core=%s\n", cpustr);
 
     /* current VCPU */
-    svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
+    svc = CSCHED2_VCPU(per_cpu(schedule_data, cpu).curr);
     if ( svc )
     {
         printk("\trun: ");
-        csched_dump_vcpu(svc);
+        csched2_dump_vcpu(svc);
     }
 
     loop = 0;
@@ -1829,22 +1829,22 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
         if ( svc )
         {
             printk("\t%3d: ", ++loop);
-            csched_dump_vcpu(svc);
+            csched2_dump_vcpu(svc);
         }
     }
 }
 
 static void
-csched_dump(const struct scheduler *ops)
+csched2_dump(const struct scheduler *ops)
 {
     struct list_head *iter_sdom, *iter_svc;
-    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
     int i, loop;
 
     printk("Active queues: %d\n"
            "\tdefault-weight     = %d\n",
            cpumask_weight(&prv->active_queues),
-           CSCHED_DEFAULT_WEIGHT);
+           CSCHED2_DEFAULT_WEIGHT);
     for_each_cpu(i, &prv->active_queues)
     {
         s_time_t fraction;
@@ -1869,8 +1869,8 @@ csched_dump(const struct scheduler *ops)
     loop = 0;
     list_for_each( iter_sdom, &prv->sdom )
     {
-        struct csched_dom *sdom;
-        sdom = list_entry(iter_sdom, struct csched_dom, sdom_elem);
+        struct csched2_dom *sdom;
+        sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
 
        printk("\tDomain: %d w %d v %d\n\t", 
               sdom->dom->domain_id, 
@@ -1879,18 +1879,18 @@ csched_dump(const struct scheduler *ops)
 
         list_for_each( iter_svc, &sdom->vcpu )
         {
-            struct csched_vcpu *svc;
-            svc = list_entry(iter_svc, struct csched_vcpu, sdom_elem);
+            struct csched2_vcpu *svc;
+            svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem);
 
             printk("\t%3d: ", ++loop);
-            csched_dump_vcpu(svc);
+            csched2_dump_vcpu(svc);
         }
     }
 }
 
-static void activate_runqueue(struct csched_private *prv, int rqi)
+static void activate_runqueue(struct csched2_private *prv, int rqi)
 {
-    struct csched_runqueue_data *rqd;
+    struct csched2_runqueue_data *rqd;
 
     rqd = prv->rqd + rqi;
 
@@ -1905,9 +1905,9 @@ static void activate_runqueue(struct csched_private *prv, 
int rqi)
     cpumask_set_cpu(rqi, &prv->active_queues);
 }
 
-static void deactivate_runqueue(struct csched_private *prv, int rqi)
+static void deactivate_runqueue(struct csched2_private *prv, int rqi)
 {
-    struct csched_runqueue_data *rqd;
+    struct csched2_runqueue_data *rqd;
 
     rqd = prv->rqd + rqi;
 
@@ -1922,8 +1922,8 @@ static void init_pcpu(const struct scheduler *ops, int 
cpu)
 {
     int rqi;
     unsigned long flags;
-    struct csched_private *prv = CSCHED_PRIV(ops);
-    struct csched_runqueue_data *rqd;
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_runqueue_data *rqd;
     spinlock_t *old_lock;
 
     spin_lock_irqsave(&prv->lock, flags);
@@ -1984,7 +1984,7 @@ static void init_pcpu(const struct scheduler *ops, int 
cpu)
 }
 
 static void *
-csched_alloc_pdata(const struct scheduler *ops, int cpu)
+csched2_alloc_pdata(const struct scheduler *ops, int cpu)
 {
     /* Check to see if the cpu is online yet */
     /* Note: cpu 0 doesn't get a STARTING callback */
@@ -1998,11 +1998,11 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu)
 }
 
 static void
-csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
+csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
     unsigned long flags;
-    struct csched_private *prv = CSCHED_PRIV(ops);
-    struct csched_runqueue_data *rqd;
+    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_runqueue_data *rqd;
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     int rqi;
 
@@ -2046,14 +2046,14 @@ csched_free_pdata(const struct scheduler *ops, void 
*pcpu, int cpu)
 }
 
 static int
-csched_cpu_starting(int cpu)
+csched2_cpu_starting(int cpu)
 {
     struct scheduler *ops;
 
     /* Hope this is safe from cpupools switching things around. :-) */
     ops = per_cpu(scheduler, cpu);
 
-    if ( ops->alloc_pdata == csched_alloc_pdata )
+    if ( ops->alloc_pdata == csched2_alloc_pdata )
         init_pcpu(ops, cpu);
 
     return NOTIFY_DONE;
@@ -2068,7 +2068,7 @@ static int cpu_credit2_callback(
     switch ( action )
     {
     case CPU_STARTING:
-        csched_cpu_starting(cpu);
+        csched2_cpu_starting(cpu);
         break;
     default:
         break;
@@ -2082,17 +2082,17 @@ static struct notifier_block cpu_credit2_nfb = {
 };
 
 static int
-csched_global_init(void)
+csched2_global_init(void)
 {
     register_cpu_notifier(&cpu_credit2_nfb);
     return 0;
 }
 
 static int
-csched_init(struct scheduler *ops)
+csched2_init(struct scheduler *ops)
 {
     int i;
-    struct csched_private *prv;
+    struct csched2_private *prv;
 
     printk("Initializing Credit2 scheduler\n" \
            " WARNING: This is experimental software in development.\n" \
@@ -2113,7 +2113,7 @@ csched_init(struct scheduler *ops)
      * set up basic structures, and a callback when the CPU info is
      * available. */
 
-    prv = xzalloc(struct csched_private);
+    prv = xzalloc(struct csched2_private);
     if ( prv == NULL )
         return -ENOMEM;
     ops->sched_data = prv;
@@ -2133,49 +2133,49 @@ csched_init(struct scheduler *ops)
 }
 
 static void
-csched_deinit(const struct scheduler *ops)
+csched2_deinit(const struct scheduler *ops)
 {
-    struct csched_private *prv;
+    struct csched2_private *prv;
 
-    prv = CSCHED_PRIV(ops);
+    prv = CSCHED2_PRIV(ops);
     if ( prv != NULL )
         xfree(prv);
 }
 
 
-static struct csched_private _csched_priv;
+static struct csched2_private _csched2_priv;
 
 const struct scheduler sched_credit2_def = {
     .name           = "SMP Credit Scheduler rev2",
     .opt_name       = "credit2",
     .sched_id       = XEN_SCHEDULER_CREDIT2,
-    .sched_data     = &_csched_priv,
-
-    .init_domain    = csched_dom_init,
-    .destroy_domain = csched_dom_destroy,
-
-    .insert_vcpu    = csched_vcpu_insert,
-    .remove_vcpu    = csched_vcpu_remove,
-
-    .sleep          = csched_vcpu_sleep,
-    .wake           = csched_vcpu_wake,
-
-    .adjust         = csched_dom_cntl,
-
-    .pick_cpu       = csched_cpu_pick,
-    .migrate        = csched_vcpu_migrate,
-    .do_schedule    = csched_schedule,
-    .context_saved  = csched_context_saved,
-
-    .dump_cpu_state = csched_dump_pcpu,
-    .dump_settings  = csched_dump,
-    .global_init    = csched_global_init,
-    .init           = csched_init,
-    .deinit         = csched_deinit,
-    .alloc_vdata    = csched_alloc_vdata,
-    .free_vdata     = csched_free_vdata,
-    .alloc_pdata    = csched_alloc_pdata,
-    .free_pdata     = csched_free_pdata,
-    .alloc_domdata  = csched_alloc_domdata,
-    .free_domdata   = csched_free_domdata,
+    .sched_data     = &_csched2_priv,
+
+    .init_domain    = csched2_dom_init,
+    .destroy_domain = csched2_dom_destroy,
+
+    .insert_vcpu    = csched2_vcpu_insert,
+    .remove_vcpu    = csched2_vcpu_remove,
+
+    .sleep          = csched2_vcpu_sleep,
+    .wake           = csched2_vcpu_wake,
+
+    .adjust         = csched2_dom_cntl,
+
+    .pick_cpu       = csched2_cpu_pick,
+    .migrate        = csched2_vcpu_migrate,
+    .do_schedule    = csched2_schedule,
+    .context_saved  = csched2_context_saved,
+
+    .dump_cpu_state = csched2_dump_pcpu,
+    .dump_settings  = csched2_dump,
+    .global_init    = csched2_global_init,
+    .init           = csched2_init,
+    .deinit         = csched2_deinit,
+    .alloc_vdata    = csched2_alloc_vdata,
+    .free_vdata     = csched2_free_vdata,
+    .alloc_pdata    = csched2_alloc_pdata,
+    .free_pdata     = csched2_free_pdata,
+    .alloc_domdata  = csched2_alloc_domdata,
+    .free_domdata   = csched2_free_domdata,
 };
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.