[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 32/49] xen/sched: switch schedule() from vcpus to sched_items



Use sched_items instead of vcpus in schedule(). This includes the
introduction of sched_item_runstate_change() as a replacement of
vcpu_runstate_change() in schedule().

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/schedule.c | 70 +++++++++++++++++++++++++++++----------------------
 1 file changed, 40 insertions(+), 30 deletions(-)

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 0b5e5e566b..c16f548b63 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -196,6 +196,20 @@ static inline void vcpu_runstate_change(
     v->runstate.state = new_state;
 }
 
+static inline void sched_item_runstate_change(struct sched_item *item,
+    bool running, s_time_t new_entry_time)
+{
+    struct vcpu *v = item->vcpu;
+
+    if ( running )
+        vcpu_runstate_change(v, RUNSTATE_running, new_entry_time);
+    else
+        vcpu_runstate_change(v,
+            ((v->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
+             (vcpu_runnable(v) ? RUNSTATE_runnable : RUNSTATE_offline)),
+            new_entry_time);
+}
+
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
 {
     spinlock_t *lock = likely(v == current)
@@ -1532,7 +1546,7 @@ static void vcpu_periodic_timer_work(struct vcpu *v)
  */
 static void schedule(void)
 {
-    struct vcpu          *prev = current, *next = NULL;
+    struct sched_item    *prev = current->sched_item, *next = NULL;
     s_time_t              now;
     struct scheduler     *sched;
     unsigned long        *tasklet_work = &this_cpu(tasklet_work_to_do);
@@ -1576,9 +1590,9 @@ static void schedule(void)
     sched = this_cpu(scheduler);
     next_slice = sched->do_schedule(sched, now, tasklet_work_scheduled);
 
-    next = next_slice.task->vcpu;
+    next = next_slice.task;
 
-    sd->curr = next->sched_item;
+    sd->curr = next;
 
     if ( next_slice.time >= 0 ) /* -ve means no limit */
         set_timer(&sd->s_timer, now + next_slice.time);
@@ -1587,59 +1601,55 @@ static void schedule(void)
     {
         pcpu_schedule_unlock_irq(lock, cpu);
         TRACE_4D(TRC_SCHED_SWITCH_INFCONT,
-                 next->domain->domain_id, next->vcpu_id,
-                 now - prev->runstate.state_entry_time,
+                 next->domain->domain_id, next->item_id,
+                 now - prev->state_entry_time,
                  next_slice.time);
-        trace_continue_running(next);
-        return continue_running(prev);
+        trace_continue_running(next->vcpu);
+        return continue_running(prev->vcpu);
     }
 
     TRACE_3D(TRC_SCHED_SWITCH_INFPREV,
-             prev->domain->domain_id, prev->vcpu_id,
-             now - prev->runstate.state_entry_time);
+             prev->domain->domain_id, prev->item_id,
+             now - prev->state_entry_time);
     TRACE_4D(TRC_SCHED_SWITCH_INFNEXT,
-             next->domain->domain_id, next->vcpu_id,
-             (next->runstate.state == RUNSTATE_runnable) ?
-             (now - next->runstate.state_entry_time) : 0,
+             next->domain->domain_id, next->item_id,
+             (next->vcpu->runstate.state == RUNSTATE_runnable) ?
+             (now - next->state_entry_time) : 0,
              next_slice.time);
 
-    ASSERT(prev->runstate.state == RUNSTATE_running);
+    ASSERT(prev->vcpu->runstate.state == RUNSTATE_running);
 
     TRACE_4D(TRC_SCHED_SWITCH,
-             prev->domain->domain_id, prev->vcpu_id,
-             next->domain->domain_id, next->vcpu_id);
+             prev->domain->domain_id, prev->item_id,
+             next->domain->domain_id, next->item_id);
 
-    vcpu_runstate_change(
-        prev,
-        ((prev->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
-         (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
-        now);
-    prev->sched_item->last_run_time = now;
+    sched_item_runstate_change(prev, false, now);
+    prev->last_run_time = now;
 
-    ASSERT(next->runstate.state != RUNSTATE_running);
-    vcpu_runstate_change(next, RUNSTATE_running, now);
+    ASSERT(next->vcpu->runstate.state != RUNSTATE_running);
+    sched_item_runstate_change(next, true, now);
 
     /*
      * NB. Don't add any trace records from here until the actual context
      * switch, else lost_records resume will not work properly.
      */
 
-    ASSERT(!vcpu_running(next));
-    next->sched_item->is_running = 1;
-    next->sched_item->state_entry_time = now;
+    ASSERT(!next->is_running);
+    next->is_running = 1;
+    next->state_entry_time = now;
 
     pcpu_schedule_unlock_irq(lock, cpu);
 
     SCHED_STAT_CRANK(sched_ctx);
 
-    stop_timer(&prev->periodic_timer);
+    stop_timer(&prev->vcpu->periodic_timer);
 
     if ( next_slice.migrated )
-        sched_move_irqs(next);
+        sched_move_irqs(next->vcpu);
 
-    vcpu_periodic_timer_work(next);
+    vcpu_periodic_timer_work(next->vcpu);
 
-    context_switch(prev, next);
+    context_switch(prev->vcpu, next->vcpu);
 }
 
 void context_saved(struct vcpu *prev)
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.