[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Xen-devel] [PATCH v3 35/47] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware
 
- To: Jan Beulich <jbeulich@xxxxxxxx>
 
- From: Jürgen Groß <jgross@xxxxxxxx>
 
- Date: Wed, 25 Sep 2019 15:07:18 +0200
 
- Cc: Tim Deegan <tim@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Ian Jackson <ian.jackson@xxxxxxxxxxxxx>, Dario Faggioli <dfaggioli@xxxxxxxx>, Julien Grall <julien.grall@xxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
 
- Delivery-date: Wed, 25 Sep 2019 13:07:37 +0000
 
- List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
 
 
 
On 24.09.19 13:55, Jan Beulich wrote:
 
On 14.09.2019 10:52, Juergen Gross wrote:
 
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -724,8 +724,10 @@ void sched_destroy_domain(struct domain *d)
      }
  }
  
-void vcpu_sleep_nosync_locked(struct vcpu *v)
+static void vcpu_sleep_nosync_locked(struct vcpu *v)
  {
+    struct sched_unit *unit = v->sched_unit;
+
      ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock));
  
      if ( likely(!vcpu_runnable(v)) )
@@ -733,7 +735,14 @@ void vcpu_sleep_nosync_locked(struct vcpu *v)
          if ( v->runstate.state == RUNSTATE_runnable )
              vcpu_runstate_change(v, RUNSTATE_offline, NOW());
  
-        sched_sleep(vcpu_scheduler(v), v->sched_unit);
+        if ( likely(!unit_runnable(unit)) )
+            sched_sleep(vcpu_scheduler(v), unit);
 
 
unit_scheduler(unit) (also elsewhere)?
 
 
Yes.
 
 
@@ -765,16 +774,22 @@ void vcpu_wake(struct vcpu *v)
  {
      unsigned long flags;
      spinlock_t *lock;
+    struct sched_unit *unit = v->sched_unit;
  
      TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
  
-    lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
+    lock = unit_schedule_lock_irqsave(unit, &flags);
   
      if ( likely(vcpu_runnable(v)) )
      {
          if ( v->runstate.state >= RUNSTATE_blocked )
              vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
-        sched_wake(vcpu_scheduler(v), v->sched_unit);
+        sched_wake(vcpu_scheduler(v), unit);
 
Is this correct / necessary when the unit is not asleep as a whole?
After all the corresponding sched_sleep() further up is called
conditionally only.
 
 
Oh, indeed. Will change that.
 
 
@@ -1998,6 +2013,62 @@ static void sched_context_switch(struct vcpu *vprev, 
struct vcpu *vnext,
      context_switch(vprev, vnext);
  }
  
+/*
+ * Force a context switch of a single vcpu of an unit.
+ * Might be called either if a vcpu of an already running unit is woken up
+ * or if a vcpu of a running unit is put asleep with other vcpus of the same
+ * unit still running.
+ */
+static struct vcpu *sched_force_context_switch(struct vcpu *vprev,
+                                               struct vcpu *v,
+                                               int cpu, s_time_t now)
 
 
unsigned int cpu? (Aiui it's suppose to equal smp_processor_id()
anyway.)
 
 
Yes and yes.
 
 
+{
+    v->force_context_switch = false;
+
+    if ( vcpu_runnable(v) == v->is_running )
+        return NULL;
 
This and other NULL returns suggest that the comment ahead of the
function might better state what the return value here is / means.
 
 
Okay.
 
 
+    if ( vcpu_runnable(v) )
+    {
+        if ( is_idle_vcpu(vprev) )
+        {
+            vcpu_runstate_change(vprev, RUNSTATE_runnable, now);
+            vprev->sched_unit = get_sched_res(cpu)->sched_unit_idle;
+        }
+        vcpu_runstate_change(v, RUNSTATE_running, now);
+    }
+    else
+    {
+        /* Make sure not to switch last vcpu of an unit away. */
+        if ( unit_running(v->sched_unit) == 1 )
+            return NULL;
+
+        v->new_state = vcpu_runstate_blocked(v);
+        vcpu_runstate_change(v, v->new_state, now);
+        v = sched_unit2vcpu_cpu(vprev->sched_unit, cpu);
+        if ( v != vprev )
+        {
+            if ( is_idle_vcpu(vprev) )
+            {
+                vcpu_runstate_change(vprev, RUNSTATE_runnable, now);
+                vprev->sched_unit = get_sched_res(cpu)->sched_unit_idle;
+            }
+            else
+            {
+                v->sched_unit = vprev->sched_unit;
+                vcpu_runstate_change(v, RUNSTATE_running, now);
+            }
+        }
+    }
+
+    v->is_running = 1;
 
Besides this wanting to use "true", how come this is unconditional
despite the function here being used for both waking and putting to
sleep of a vCPU?
 
 
At that time v is the vcpu which will be running next, so either the
just woken up one, or the idle vcpu. I can add a comment.
 
 
@@ -2067,9 +2160,29 @@ static void sched_slave(void)
   
      now = NOW();
  
+    v = unit2vcpu_cpu(prev, cpu);
+    if ( v && v->force_context_switch )
+    {
+        v = sched_force_context_switch(vprev, v, cpu, now);
+
+        if ( v )
+        {
+            pcpu_schedule_unlock_irq(lock, cpu);
 
I can't figure what it is that guarantees that this unlock isn't
going to be followed ...
 
+            sched_context_switch(vprev, v, false, now);
+        }
+
+        do_softirq = true;
+    }
+
      if ( !prev->rendezvous_in_cnt )
      {
          pcpu_schedule_unlock_irq(lock, cpu);
 
... by another unlock here. Or wait - is sched_context_switch()
(and perhaps other functions involved there) lacking a "noreturn"
annotation?
 
 
Indeed it is. Like context_switch() today. :-)
I'll annotate the functions.
 
 
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -100,6 +100,11 @@ static inline bool unit_runnable(const struct sched_unit 
*unit)
      return false;
  }
  
+static inline int vcpu_runstate_blocked(struct vcpu *v)
 
const?
 
 
Yes.
Juergen
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel 
 
    
     |