[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 07/47] xen/sched: move per cpu scheduler private data into struct sched_resource



This prepares support of larger scheduling granularities, e.g. core
scheduling.

While at it move sched_has_urgent_vcpu() from include/asm-x86/cpuidle.h
into sched.h removing the need for including sched-if.h in cpuidle.h.
For that purpose remobe urgent_count from the scheduler private data
and make it a plain percpu variable.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V1:
- move sched_has_urgent_vcpu()
V2:
- make sched_has_urgent_vcpu() return bool (Jan Beulich)
V3:
- split out removing sched-if.h include in some C files (Jan Beulich)
- make urgent_count a plain percpu variable (Jan Beulich)
---
 xen/common/sched_arinc653.c   |  4 ++--
 xen/common/sched_credit.c     | 10 ++++----
 xen/common/sched_credit2.c    | 21 +++++++++--------
 xen/common/sched_null.c       |  4 ++--
 xen/common/sched_rt.c         |  9 +++----
 xen/common/schedule.c         | 55 ++++++++++++++++++++++---------------------
 xen/include/asm-x86/cpuidle.h | 11 ---------
 xen/include/xen/sched-if.h    | 23 ++++++++----------
 xen/include/xen/sched.h       | 11 +++++++++
 9 files changed, 74 insertions(+), 74 deletions(-)

diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index 8585d9c4fe..98cdd7f894 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -475,7 +475,7 @@ a653sched_unit_sleep(const struct scheduler *ops, struct 
sched_unit *unit)
      * If the VCPU being put to sleep is the same one that is currently
      * running, raise a softirq to invoke the scheduler to switch domains.
      */
-    if ( per_cpu(schedule_data, vc->processor).curr == unit )
+    if ( get_sched_res(vc->processor)->curr == unit )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
 }
 
@@ -643,7 +643,7 @@ static spinlock_t *
 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                   void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     arinc653_vcpu_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index b7d44efc8b..ee42de6bce 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -82,7 +82,7 @@
 #define CSCHED_PRIV(_ops)   \
     ((struct csched_private *)((_ops)->sched_data))
 #define CSCHED_PCPU(_c)     \
-    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
+    ((struct csched_pcpu *)get_sched_res(_c)->sched_priv)
 #define CSCHED_UNIT(unit)   ((struct csched_unit *) (unit)->priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -250,7 +250,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
     /*
      * We're peeking at cpu's runq, we must hold the proper lock.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     return list_empty(RUNQ(cpu)) ||
            is_idle_vcpu(__runq_elem(RUNQ(cpu)->next)->vcpu);
@@ -259,7 +259,7 @@ static inline bool_t is_runq_idle(unsigned int cpu)
 static inline void
 inc_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     CSCHED_PCPU(cpu)->nr_runnable++;
 
 }
@@ -267,7 +267,7 @@ inc_nr_runnable(unsigned int cpu)
 static inline void
 dec_nr_runnable(unsigned int cpu)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(CSCHED_PCPU(cpu)->nr_runnable >= 1);
     CSCHED_PCPU(cpu)->nr_runnable--;
 }
@@ -628,7 +628,7 @@ static spinlock_t *
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                     void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct csched_private *prv = CSCHED_PRIV(new_ops);
     struct csched_unit *svc = vdata;
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 4982f1ef36..849f3a916e 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -568,7 +568,7 @@ static inline struct csched2_private *csched2_priv(const 
struct scheduler *ops)
 
 static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
 {
-    return per_cpu(schedule_data, cpu).sched_priv;
+    return get_sched_res(cpu)->sched_priv;
 }
 
 static inline struct csched2_unit *csched2_unit(const struct sched_unit *unit)
@@ -1277,7 +1277,7 @@ runq_insert(const struct scheduler *ops, struct 
csched2_unit *svc)
     struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!vcpu_on_runq(svc));
     ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
@@ -1798,7 +1798,7 @@ static bool vcpu_grab_budget(struct csched2_unit *svc)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( svc->budget > 0 )
         return true;
@@ -1845,7 +1845,7 @@ vcpu_return_budget(struct csched2_unit *svc, struct 
list_head *parked)
     struct csched2_dom *sdom = svc->sdom;
     unsigned int cpu = svc->vcpu->processor;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     ASSERT(list_empty(parked));
 
     /* budget_lock nests inside runqueue lock. */
@@ -2102,7 +2102,7 @@ csched2_unit_wake(const struct scheduler *ops, struct 
sched_unit *unit)
     unsigned int cpu = vc->processor;
     s_time_t now;
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     ASSERT(!is_idle_vcpu(vc));
 
@@ -2230,7 +2230,7 @@ csched2_res_pick(const struct scheduler *ops, const 
struct sched_unit *unit)
      * just grab the prv lock.  Instead, we'll have to trylock, and
      * do something else reasonable if we fail.
      */
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     if ( !read_trylock(&prv->lock) )
     {
@@ -2570,7 +2570,7 @@ static void balance_load(const struct scheduler *ops, int 
cpu, s_time_t now)
      * on either side may be empty).
      */
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
     st.lrqd = c2rqd(ops, cpu);
 
     update_runq_load(ops, st.lrqd, 0, now);
@@ -3476,7 +3476,7 @@ csched2_schedule(
     rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
@@ -3867,7 +3867,7 @@ csched2_init_pdata(const struct scheduler *ops, void 
*pdata, int cpu)
 
     rqi = init_pdata(prv, pdata, cpu);
     /* Move the scheduler lock to the new runq lock. */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
+    get_sched_res(cpu)->schedule_lock = &prv->rqd[rqi].lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock(old_lock);
@@ -3881,6 +3881,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
 {
     struct csched2_private *prv = csched2_priv(new_ops);
     struct csched2_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
     unsigned rqi;
 
     ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
@@ -3906,7 +3907,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
      * this scheduler, and so it's safe to have taken it /before/ our
      * private global lock.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
+    ASSERT(sd->schedule_lock != &prv->rqd[rqi].lock);
 
     write_unlock(&prv->lock);
 
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index d7d0e77cb5..e7ba55c650 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -269,7 +269,7 @@ pick_res(struct null_private *prv, const struct sched_unit 
*unit)
     unsigned int cpu = v->processor, new_cpu;
     cpumask_t *cpus = cpupool_domain_cpumask(v->domain);
 
-    ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
     for_each_affinity_balance_step( bs )
     {
@@ -419,7 +419,7 @@ static spinlock_t *null_switch_sched(struct scheduler 
*new_ops,
                                      unsigned int cpu,
                                      void *pdata, void *vdata)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct null_private *prv = null_priv(new_ops);
     struct null_unit *nvc = vdata;
 
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 115e4da24d..f31c7dc02c 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -75,7 +75,7 @@
 /*
  * Locking:
  * A global system lock is used to protect the RunQ and DepletedQ.
- * The global lock is referenced by schedule_data.schedule_lock
+ * The global lock is referenced by sched_res->schedule_lock
  * from all physical cpus.
  *
  * The lock is already grabbed when calling wake/sleep/schedule/ functions
@@ -176,7 +176,7 @@ static void repl_timer_handler(void *data);
 
 /*
  * System-wide private data, include global RunQueue/DepletedQ
- * Global lock is referenced by schedule_data.schedule_lock from all
+ * Global lock is referenced by sched_res->schedule_lock from all
  * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
  */
 struct rt_private {
@@ -722,7 +722,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int 
cpu)
     }
 
     /* Move the scheduler lock to our global runqueue lock.  */
-    per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+    get_sched_res(cpu)->schedule_lock = &prv->lock;
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
@@ -735,6 +735,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
 {
     struct rt_private *prv = rt_priv(new_ops);
     struct rt_unit *svc = vdata;
+    struct sched_resource *sd = get_sched_res(cpu);
 
     ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
 
@@ -744,7 +745,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * another scheduler, but that is how things need to be, for
      * preventing races.
      */
-    ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->lock);
+    ASSERT(sd->schedule_lock != &prv->lock);
 
     /*
      * If we are the absolute first cpu being switched toward this
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 865c50b378..42bca8df54 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -65,13 +65,15 @@ static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
 
 /* Scratch space for cpumasks. */
 DEFINE_PER_CPU(cpumask_t, cpumask_scratch);
 
+/* How many urgent vcpus. */
+DEFINE_PER_CPU(atomic_t, sched_urgent_count);
+
 extern const struct scheduler *__start_schedulers_array[], 
*__end_schedulers_array[];
 #define NUM_SCHEDULERS (__end_schedulers_array - __start_schedulers_array)
 #define schedulers __start_schedulers_array
@@ -213,7 +215,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              !test_bit(v->vcpu_id, v->domain->poll_mask) )
         {
             v->is_urgent = 0;
-            atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_dec(&per_cpu(sched_urgent_count, v->processor));
         }
     }
     else
@@ -222,7 +224,7 @@ static inline void vcpu_urgent_count_update(struct vcpu *v)
              unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
         {
             v->is_urgent = 1;
-            atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+            atomic_inc(&per_cpu(sched_urgent_count, v->processor));
         }
     }
 }
@@ -233,7 +235,7 @@ static inline void vcpu_runstate_change(
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -393,7 +395,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
     if ( is_idle_domain(d) )
     {
-        per_cpu(schedule_data, v->processor).curr = unit;
+        get_sched_res(v->processor)->curr = unit;
         v->is_running = 1;
     }
     else
@@ -518,7 +520,7 @@ void sched_destroy_vcpu(struct vcpu *v)
     kill_timer(&v->singleshot_timer);
     kill_timer(&v->poll_timer);
     if ( test_and_clear_bool(v->is_urgent) )
-        atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
+        atomic_dec(&per_cpu(sched_urgent_count, v->processor));
     sched_remove_unit(vcpu_scheduler(v), unit);
     sched_free_vdata(vcpu_scheduler(v), unit->priv);
     sched_free_unit(unit);
@@ -565,7 +567,7 @@ void sched_destroy_domain(struct domain *d)
 
 void vcpu_sleep_nosync_locked(struct vcpu *v)
 {
-    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
 
     if ( likely(!vcpu_runnable(v)) )
     {
@@ -660,8 +662,8 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int 
new_cpu)
      */
     if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
     {
-        atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
-        atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+        atomic_inc(&per_cpu(sched_urgent_count, new_cpu));
+        atomic_dec(&per_cpu(sched_urgent_count, old_cpu));
     }
 
     /*
@@ -727,20 +729,20 @@ static void vcpu_migrate_finish(struct vcpu *v)
          * are not correct any longer after evaluating old and new cpu holding
          * the locks.
          */
-        old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
-        new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+        old_lock = get_sched_res(old_cpu)->schedule_lock;
+        new_lock = get_sched_res(new_cpu)->schedule_lock;
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
         old_cpu = v->processor;
-        if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+        if ( old_lock == get_sched_res(old_cpu)->schedule_lock )
         {
             /*
              * If we selected a CPU on the previosu iteration, check if it
              * remains suitable for running this vCPU.
              */
             if ( pick_called &&
-                 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+                 (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
@@ -748,7 +750,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
             /* Select a new CPU. */
             new_cpu = sched_pick_resource(vcpu_scheduler(v),
                                           v->sched_unit)->master_cpu;
-            if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+            if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
@@ -1565,7 +1567,7 @@ static void schedule(void)
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
     bool_t                tasklet_work_scheduled = 0;
-    struct schedule_data *sd;
+    struct sched_resource *sd;
     spinlock_t           *lock;
     struct task_slice     next_slice;
     int cpu = smp_processor_id();
@@ -1574,7 +1576,7 @@ static void schedule(void)
 
     SCHED_STAT_CRANK(sched_run);
 
-    sd = &this_cpu(schedule_data);
+    sd = get_sched_res(cpu);
 
     /* Update tasklet scheduling status. */
     switch ( *tasklet_work )
@@ -1715,20 +1717,19 @@ static void poll_timer_fn(void *data)
 
 static int cpu_schedule_up(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
-    struct sched_resource *res;
+    struct sched_resource *sd;
 
-    res = xzalloc(struct sched_resource);
-    if ( res == NULL )
+    sd = xzalloc(struct sched_resource);
+    if ( sd == NULL )
         return -ENOMEM;
-    res->master_cpu = cpu;
-    set_sched_res(cpu, res);
+    sd->master_cpu = cpu;
+    set_sched_res(cpu, sd);
 
     per_cpu(scheduler, cpu) = &sched_idle_ops;
     spin_lock_init(&sd->_lock);
     sd->schedule_lock = &sched_free_cpu_lock;
     init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
-    atomic_set(&sd->urgent_count, 0);
+    atomic_set(&per_cpu(sched_urgent_count, cpu), 0);
 
     /* Boot CPU is dealt with later in scheduler_init(). */
     if ( cpu == 0 )
@@ -1737,7 +1738,7 @@ static int cpu_schedule_up(unsigned int cpu)
     if ( idle_vcpu[cpu] == NULL )
         vcpu_create(idle_vcpu[0]->domain, cpu, cpu);
     else
-        idle_vcpu[cpu]->sched_unit->res = res;
+        idle_vcpu[cpu]->sched_unit->res = sd;
 
     if ( idle_vcpu[cpu] == NULL )
         return -ENOMEM;
@@ -1757,7 +1758,7 @@ static int cpu_schedule_up(unsigned int cpu)
 
 static void cpu_schedule_down(unsigned int cpu)
 {
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     kill_timer(&sd->s_timer);
 
@@ -1916,7 +1917,7 @@ void __init scheduler_init(void)
     idle_domain->max_vcpus = nr_cpu_ids;
     if ( vcpu_create(idle_domain, 0, 0) == NULL )
         BUG();
-    this_cpu(schedule_data).curr = idle_vcpu[0]->sched_unit;
+    get_sched_res(0)->curr = idle_vcpu[0]->sched_unit;
 }
 
 /*
@@ -1933,7 +1934,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
     struct cpupool *old_pool = per_cpu(cpupool, cpu);
-    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
     spinlock_t *old_lock, *new_lock;
     unsigned long flags;
 
diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h
index 488f708305..5d7dffd228 100644
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -4,7 +4,6 @@
 #include <xen/cpuidle.h>
 #include <xen/notifier.h>
 #include <xen/sched.h>
-#include <xen/sched-if.h>
 
 extern struct acpi_processor_power *processor_powers[];
 
@@ -27,14 +26,4 @@ void update_idle_stats(struct acpi_processor_power *,
 void update_last_cx_stat(struct acpi_processor_power *,
                          struct acpi_processor_cx *, uint64_t);
 
-/*
- * vcpu is urgent if vcpu is polling event channel
- *
- * if urgent vcpu exists, CPU should not enter deep C state
- */
-static inline int sched_has_urgent_vcpu(void)
-{
-    return atomic_read(&this_cpu(schedule_data).urgent_count);
-}
-
 #endif /* __X86_ASM_CPUIDLE_H__ */
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 6aa8d125c8..2b152d1c75 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -33,22 +33,19 @@ extern int sched_ratelimit_us;
  * For cache betterness, keep the actual lock in the same cache area
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
-struct schedule_data {
+struct sched_resource {
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;
     void               *sched_priv;
     struct timer        s_timer;        /* scheduling timer                */
-    atomic_t            urgent_count;   /* how many urgent vcpus           */
-};
-
-#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
 
-struct sched_resource {
-    unsigned int master_cpu;  /* Cpu with lowest id in scheduling resource. */
+    /* Cpu with lowest id in scheduling resource. */
+    unsigned int        master_cpu;
 };
 
-DECLARE_PER_CPU(struct schedule_data, schedule_data);
+#define curr_on_cpu(c)    (get_sched_res(c)->curr)
+
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
@@ -79,7 +76,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param 
EXTRA_TYPE(arg)) \
 { \
     for ( ; ; ) \
     { \
-        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+        spinlock_t *lock = get_sched_res(cpu)->schedule_lock; \
         /* \
          * v->processor may change when grabbing the lock; but \
          * per_cpu(v->processor) may also change, if changing cpu pool \
@@ -89,7 +86,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param 
EXTRA_TYPE(arg)) \
          * lock may be the same; this will succeed in that case. \
          */ \
         spin_lock##irq(lock, ## arg); \
-        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+        if ( likely(lock == get_sched_res(cpu)->schedule_lock) ) \
             return lock; \
         spin_unlock##irq(lock, ## arg); \
     } \
@@ -99,7 +96,7 @@ static inline spinlock_t *kind##_schedule_lock##irq(param 
EXTRA_TYPE(arg)) \
 static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
                                                EXTRA_TYPE(arg), param) \
 { \
-    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+    ASSERT(lock == get_sched_res(cpu)->schedule_lock); \
     spin_unlock##irq(lock, ## arg); \
 }
 
@@ -128,11 +125,11 @@ sched_unlock(vcpu, const struct vcpu *v, v->processor, 
_irqrestore, flags)
 
 static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
 {
-    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spinlock_t *lock = get_sched_res(cpu)->schedule_lock;
 
     if ( !spin_trylock(lock) )
         return NULL;
-    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+    if ( lock == get_sched_res(cpu)->schedule_lock )
         return lock;
     spin_unlock(lock);
     return NULL;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 999e43e8cc..8780888603 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -881,6 +881,17 @@ static inline struct vcpu *domain_vcpu(const struct domain 
*d,
 
 void cpu_init(void);
 
+/*
+ * vcpu is urgent if vcpu is polling event channel
+ *
+ * if urgent vcpu exists, CPU should not enter deep C state
+ */
+DECLARE_PER_CPU(atomic_t, sched_urgent_count);
+static inline bool sched_has_urgent_vcpu(void)
+{
+    return atomic_read(&this_cpu(sched_urgent_count));
+}
+
 struct scheduler;
 
 struct scheduler *scheduler_get_default(void);
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.