WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Make per-cpu schedule data explicit

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Make per-cpu schedule data explicitly PER_CPU.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 08 Aug 2006 14:50:29 +0000
Delivery-date: Tue, 08 Aug 2006 07:55:11 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 5e8c254c9dcd5ef3b94902bdce8c00015faa24e4
# Parent  7ce412dde1be62b4fb1a418f5efd3ad276dd37e2
[XEN] Make per-cpu schedule data explicitly PER_CPU.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/common/sched_bvt.c     |   16 ++++++++------
 xen/common/sched_credit.c  |   31 +++++++++++++++------------
 xen/common/sched_sedf.c    |   21 +++++++++---------
 xen/common/schedule.c      |   50 +++++++++++++++++++++++----------------------
 xen/include/xen/sched-if.h |   10 +++++----
 5 files changed, 69 insertions(+), 59 deletions(-)

diff -r 7ce412dde1be -r 5e8c254c9dcd xen/common/sched_bvt.c
--- a/xen/common/sched_bvt.c    Tue Aug 08 12:04:46 2006 +0100
+++ b/xen/common/sched_bvt.c    Tue Aug 08 13:55:22 2006 +0100
@@ -60,7 +60,8 @@ struct bvt_cpu_info
 
 #define BVT_INFO(p)   ((struct bvt_dom_info *)(p)->sched_priv)
 #define EBVT_INFO(p)  ((struct bvt_vcpu_info *)(p)->sched_priv)
-#define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
+#define CPU_INFO(cpu) \
+    ((struct bvt_cpu_info *)(per_cpu(schedule_data, cpu).sched_priv))
 #define RUNLIST(p)    ((struct list_head *)&(EBVT_INFO(p)->run_list))
 #define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
 #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
@@ -203,7 +204,8 @@ static int bvt_init_vcpu(struct vcpu *v)
     /* Allocate per-CPU context if this is the first domain to be added. */
     if ( CPU_INFO(v->processor) == NULL )
     {
-        schedule_data[v->processor].sched_priv = xmalloc(struct bvt_cpu_info);
+        per_cpu(schedule_data, v->processor).sched_priv =
+            xmalloc(struct bvt_cpu_info);
         BUG_ON(CPU_INFO(v->processor) == NULL);
         INIT_LIST_HEAD(RUNQUEUE(v->processor));
         CPU_SVT(v->processor) = 0;
@@ -251,7 +253,7 @@ static void bvt_wake(struct vcpu *v)
     /* Deal with warping here. */
     einf->evt = calc_evt(v, einf->avt);
     
-    curr = schedule_data[cpu].curr;
+    curr = per_cpu(schedule_data, cpu).curr;
     curr_evt = calc_evt(curr, calc_avt(curr, now));
     /* Calculate the time the current domain would run assuming
        the second smallest evt is of the newly woken domain */
@@ -261,14 +263,14 @@ static void bvt_wake(struct vcpu *v)
 
     if ( is_idle_vcpu(curr) || (einf->evt <= curr_evt) )
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
-    else if ( schedule_data[cpu].s_timer.expires > r_time )
-        set_timer(&schedule_data[cpu].s_timer, r_time);
+    else if ( per_cpu(schedule_data, cpu).s_timer.expires > r_time )
+        set_timer(&per_cpu(schedule_data, cpu).s_timer, r_time);
 }
 
 
 static void bvt_sleep(struct vcpu *v)
 {
-    if ( schedule_data[v->processor].curr == v )
+    if ( per_cpu(schedule_data, v->processor).curr == v )
         cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
     else  if ( __task_on_runqueue(v) )
         __del_from_runqueue(v);
@@ -418,7 +420,7 @@ static struct task_slice bvt_do_schedule
      * *and* the task the second lowest evt.
      * this code is O(n) but we expect n to be small.
      */
-    next_einf       = EBVT_INFO(schedule_data[cpu].idle);
+    next_einf       = EBVT_INFO(per_cpu(schedule_data, cpu).idle);
     next_prime_einf  = NULL;
 
     next_evt       = ~0U;
diff -r 7ce412dde1be -r 5e8c254c9dcd xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Tue Aug 08 12:04:46 2006 +0100
+++ b/xen/common/sched_credit.c Tue Aug 08 13:55:22 2006 +0100
@@ -55,7 +55,8 @@
 /*
  * Useful macros
  */
-#define CSCHED_PCPU(_c)     ((struct csched_pcpu 
*)schedule_data[_c].sched_priv)
+#define CSCHED_PCPU(_c)     \
+    ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
 #define CSCHED_VCPU(_vcpu)  ((struct csched_vcpu *) (_vcpu)->sched_priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
@@ -253,7 +254,8 @@ static inline void
 static inline void
 __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
 {
-    struct csched_vcpu * const cur = CSCHED_VCPU(schedule_data[cpu].curr);
+    struct csched_vcpu * const cur =
+        CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
     cpumask_t mask;
 
     ASSERT(cur);
@@ -318,10 +320,10 @@ csched_pcpu_init(int cpu)
 
     INIT_LIST_HEAD(&spc->runq);
     spc->runq_sort_last = csched_priv.runq_sort;
-    schedule_data[cpu].sched_priv = spc;
+    per_cpu(schedule_data, cpu).sched_priv = spc;
 
     /* Start off idling... */
-    BUG_ON( !is_idle_vcpu(schedule_data[cpu].curr) );
+    BUG_ON( !is_idle_vcpu(per_cpu(schedule_data, cpu).curr) );
     cpu_set(cpu, csched_priv.idlers);
 
     spin_unlock_irqrestore(&csched_priv.lock, flags);
@@ -533,7 +535,7 @@ csched_vcpu_sleep(struct vcpu *vc)
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( schedule_data[vc->processor].curr == vc )
+    if ( per_cpu(schedule_data, vc->processor).curr == vc )
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
     else if ( __vcpu_on_runq(svc) )
         __runq_remove(svc);
@@ -547,7 +549,7 @@ csched_vcpu_wake(struct vcpu *vc)
 
     BUG_ON( is_idle_vcpu(vc) );
 
-    if ( unlikely(schedule_data[cpu].curr == vc) )
+    if ( unlikely(per_cpu(schedule_data, cpu).curr == vc) )
     {
         CSCHED_STAT_CRANK(vcpu_wake_running);
         return;
@@ -599,7 +601,8 @@ csched_vcpu_set_affinity(struct vcpu *vc
 
             vc->processor = first_cpu(vc->cpu_affinity);
 
-            spin_unlock_irqrestore(&schedule_data[lcpu].schedule_lock, flags);
+            spin_unlock_irqrestore(&per_cpu(schedule_data, lcpu).schedule_lock,
+                                   flags);
         }
 
         vcpu_unpause(vc);
@@ -685,7 +688,7 @@ csched_runq_sort(unsigned int cpu)
 
     spc->runq_sort_last = sort_epoch;
 
-    spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags);
+    spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
 
     runq = &spc->runq;
     elem = runq->next;
@@ -710,7 +713,7 @@ csched_runq_sort(unsigned int cpu)
         elem = next;
     }
 
-    spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags);
+    spin_unlock_irqrestore(&per_cpu(schedule_data, cpu).schedule_lock, flags);
 }
 
 static void
@@ -900,7 +903,7 @@ csched_tick(unsigned int cpu)
      * we could distribute or at the very least cycle the duty.
      */
     if ( (csched_priv.master == cpu) &&
-         (schedule_data[cpu].tick % CSCHED_ACCT_NTICKS) == 0 )
+         (per_cpu(schedule_data, cpu).tick % CSCHED_ACCT_NTICKS) == 0 )
     {
         csched_acct();
     }
@@ -984,7 +987,7 @@ csched_load_balance(int cpu, struct csch
          * cause a deadlock if the peer CPU is also load balancing and trying
          * to lock this CPU.
          */
-        if ( spin_trylock(&schedule_data[peer_cpu].schedule_lock) )
+        if ( spin_trylock(&per_cpu(schedule_data, peer_cpu).schedule_lock) )
         {
 
             spc = CSCHED_PCPU(peer_cpu);
@@ -998,7 +1001,7 @@ csched_load_balance(int cpu, struct csch
                 speer = csched_runq_steal(spc, cpu, snext->pri);
             }
 
-            spin_unlock(&schedule_data[peer_cpu].schedule_lock);
+            spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock);
 
             /* Got one! */
             if ( speer )
@@ -1120,11 +1123,11 @@ csched_dump_pcpu(int cpu)
     runq = &spc->runq;
 
     printk(" tick=%lu, sort=%d\n",
-            schedule_data[cpu].tick,
+            per_cpu(schedule_data, cpu).tick,
             spc->runq_sort_last);
 
     /* current VCPU */
-    svc = CSCHED_VCPU(schedule_data[cpu].curr);
+    svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
     if ( svc )
     {
         printk("\trun: ");
diff -r 7ce412dde1be -r 5e8c254c9dcd xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Tue Aug 08 12:04:46 2006 +0100
+++ b/xen/common/sched_sedf.c   Tue Aug 08 13:55:22 2006 +0100
@@ -113,13 +113,14 @@ struct sedf_cpu_info {
 };
 
 #define EDOM_INFO(d)   ((struct sedf_vcpu_info *)((d)->sched_priv))
-#define CPU_INFO(cpu)  ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
+#define CPU_INFO(cpu)  \
+    ((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv)
 #define LIST(d)        (&EDOM_INFO(d)->list)
 #define EXTRALIST(d,i) (&(EDOM_INFO(d)->extralist[i]))
 #define RUNQ(cpu)      (&CPU_INFO(cpu)->runnableq)
 #define WAITQ(cpu)     (&CPU_INFO(cpu)->waitq)
 #define EXTRAQ(cpu,i)  (&(CPU_INFO(cpu)->extraq[i]))
-#define IDLETASK(cpu)  ((struct vcpu *)schedule_data[cpu].idle)
+#define IDLETASK(cpu)  ((struct vcpu *)per_cpu(schedule_data, cpu).idle)
 
 #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
 
@@ -348,11 +349,11 @@ static int sedf_init_vcpu(struct vcpu *v
     inf->vcpu = v;
  
     /* Allocate per-CPU context if this is the first domain to be added. */
-    if ( unlikely(schedule_data[v->processor].sched_priv == NULL) )
-    {
-        schedule_data[v->processor].sched_priv = 
+    if ( unlikely(per_cpu(schedule_data, v->processor).sched_priv == NULL) )
+    {
+        per_cpu(schedule_data, v->processor).sched_priv = 
             xmalloc(struct sedf_cpu_info);
-        BUG_ON(schedule_data[v->processor].sched_priv == NULL);
+        BUG_ON(per_cpu(schedule_data, v->processor).sched_priv == NULL);
         memset(CPU_INFO(v->processor), 0, sizeof(*CPU_INFO(v->processor)));
         INIT_LIST_HEAD(WAITQ(v->processor));
         INIT_LIST_HEAD(RUNQ(v->processor));
@@ -847,7 +848,7 @@ static void sedf_sleep(struct vcpu *d)
 
     EDOM_INFO(d)->status |= SEDF_ASLEEP;
  
-    if ( schedule_data[d->processor].curr == d )
+    if ( per_cpu(schedule_data, d->processor).curr == d )
     {
         cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
     }
@@ -1167,9 +1168,9 @@ void sedf_wake(struct vcpu *d)
       Save approximation: Always switch to scheduler!*/
     ASSERT(d->processor >= 0);
     ASSERT(d->processor < NR_CPUS);
-    ASSERT(schedule_data[d->processor].curr);
-
-    if ( should_switch(schedule_data[d->processor].curr, d, now) )
+    ASSERT(per_cpu(schedule_data, d->processor).curr);
+
+    if ( should_switch(per_cpu(schedule_data, d->processor).curr, d, now) )
         cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
 }
 
diff -r 7ce412dde1be -r 5e8c254c9dcd xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Aug 08 12:04:46 2006 +0100
+++ b/xen/common/schedule.c     Tue Aug 08 13:55:22 2006 +0100
@@ -46,7 +46,7 @@ static void poll_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-struct schedule_data schedule_data[NR_CPUS];
+DEFINE_PER_CPU(struct schedule_data, schedule_data);
 
 extern struct scheduler sched_bvt_def;
 extern struct scheduler sched_sedf_def;
@@ -73,7 +73,7 @@ static inline void vcpu_runstate_change(
     struct vcpu *v, int new_state, s_time_t new_entry_time)
 {
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(&schedule_data[v->processor].schedule_lock));
+    ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
 
     v->runstate.time[v->runstate.state] +=
         new_entry_time - v->runstate.state_entry_time;
@@ -107,8 +107,8 @@ int sched_init_vcpu(struct vcpu *v)
 
     if ( is_idle_vcpu(v) )
     {
-        schedule_data[v->processor].curr = v;
-        schedule_data[v->processor].idle = v;
+        per_cpu(schedule_data, v->processor).curr = v;
+        per_cpu(schedule_data, v->processor).idle = v;
         set_bit(_VCPUF_running, &v->vcpu_flags);
     }
 
@@ -500,19 +500,21 @@ long sched_adjdom(struct sched_adjdom_cm
  */
 static void __enter_scheduler(void)
 {
-    struct vcpu        *prev = current, *next = NULL;
-    int                 cpu = smp_processor_id();
-    s_time_t            now = NOW();
-    struct task_slice   next_slice;
-    s32                 r_time;     /* time for new dom to run */
+    struct vcpu          *prev = current, *next = NULL;
+    s_time_t              now = NOW();
+    struct schedule_data *sd;
+    struct task_slice     next_slice;
+    s32                   r_time;     /* time for new dom to run */
 
     ASSERT(!in_irq());
 
     perfc_incrc(sched_run);
 
-    spin_lock_irq(&schedule_data[cpu].schedule_lock);
-
-    stop_timer(&schedule_data[cpu].s_timer);
+    sd = &this_cpu(schedule_data);
+
+    spin_lock_irq(&sd->schedule_lock);
+
+    stop_timer(&sd->s_timer);
     
     /* get policy-specific decision on scheduling... */
     next_slice = ops.do_schedule(now);
@@ -520,13 +522,13 @@ static void __enter_scheduler(void)
     r_time = next_slice.time;
     next = next_slice.task;
 
-    schedule_data[cpu].curr = next;
+    sd->curr = next;
     
-    set_timer(&schedule_data[cpu].s_timer, now + r_time);
+    set_timer(&sd->s_timer, now + r_time);
 
     if ( unlikely(prev == next) )
     {
-        spin_unlock_irq(&schedule_data[cpu].schedule_lock);
+        spin_unlock_irq(&sd->schedule_lock);
         return continue_running(prev);
     }
 
@@ -552,17 +554,17 @@ static void __enter_scheduler(void)
     ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags));
     set_bit(_VCPUF_running, &next->vcpu_flags);
 
-    spin_unlock_irq(&schedule_data[cpu].schedule_lock);
+    spin_unlock_irq(&sd->schedule_lock);
 
     perfc_incrc(sched_ctx);
 
-    prev->sleep_tick = schedule_data[cpu].tick;
+    prev->sleep_tick = sd->tick;
 
     /* Ensure that the domain has an up-to-date time base. */
     if ( !is_idle_vcpu(next) )
     {
         update_vcpu_system_time(next);
-        if ( next->sleep_tick != schedule_data[cpu].tick )
+        if ( next->sleep_tick != sd->tick )
             send_timer_event(next);
     }
 
@@ -594,7 +596,7 @@ static void t_timer_fn(void *unused)
     struct vcpu  *v   = current;
     unsigned int  cpu = smp_processor_id();
 
-    schedule_data[cpu].tick++;
+    per_cpu(schedule_data, cpu).tick++;
 
     if ( !is_idle_vcpu(v) )
     {
@@ -633,8 +635,8 @@ void __init scheduler_init(void)
 
     for ( i = 0; i < NR_CPUS; i++ )
     {
-        spin_lock_init(&schedule_data[i].schedule_lock);
-        init_timer(&schedule_data[i].s_timer, s_timer_fn, NULL, i);
+        spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
+        init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
         init_timer(&t_timer[i], t_timer_fn, NULL, i);
     }
 
@@ -676,10 +678,10 @@ void dump_runq(unsigned char key)
 
     for_each_online_cpu ( i )
     {
-        spin_lock(&schedule_data[i].schedule_lock);
+        spin_lock(&per_cpu(schedule_data, i).schedule_lock);
         printk("CPU[%02d] ", i);
-        SCHED_OP(dump_cpu_state,i);
-        spin_unlock(&schedule_data[i].schedule_lock);
+        SCHED_OP(dump_cpu_state, i);
+        spin_unlock(&per_cpu(schedule_data, i).schedule_lock);
     }
 
     local_irq_restore(flags);
diff -r 7ce412dde1be -r 5e8c254c9dcd xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h        Tue Aug 08 12:04:46 2006 +0100
+++ b/xen/include/xen/sched-if.h        Tue Aug 08 13:55:22 2006 +0100
@@ -8,6 +8,8 @@
 #ifndef __XEN_SCHED_IF_H__
 #define __XEN_SCHED_IF_H__
 
+#include <xen/percpu.h>
+
 struct schedule_data {
     spinlock_t          schedule_lock;  /* spinlock protecting curr        */
     struct vcpu        *curr;           /* current task                    */
@@ -17,7 +19,7 @@ struct schedule_data {
     unsigned long       tick;           /* current periodic 'tick'         */
 } __cacheline_aligned;
 
-extern struct schedule_data schedule_data[];
+DECLARE_PER_CPU(struct schedule_data, schedule_data);
 
 static inline void vcpu_schedule_lock(struct vcpu *v)
 {
@@ -26,10 +28,10 @@ static inline void vcpu_schedule_lock(st
     for ( ; ; )
     {
         cpu = v->processor;
-        spin_lock(&schedule_data[cpu].schedule_lock);
+        spin_lock(&per_cpu(schedule_data, cpu).schedule_lock);
         if ( likely(v->processor == cpu) )
             break;
-        spin_unlock(&schedule_data[cpu].schedule_lock);
+        spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);
     }
 }
 
@@ -40,7 +42,7 @@ static inline void vcpu_schedule_lock(st
 
 static inline void vcpu_schedule_unlock(struct vcpu *v)
 {
-    spin_unlock(&schedule_data[v->processor].schedule_lock);
+    spin_unlock(&per_cpu(schedule_data, v->processor).schedule_lock);
 }
 
 #define vcpu_schedule_unlock_irq(v) \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Make per-cpu schedule data explicitly PER_CPU., Xen patchbot-unstable <=