WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [RFC][PATCH 2/4] sched: change the handling of credits over

To: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [RFC][PATCH 2/4] sched: change the handling of credits over upper bound
From: NISHIGUCHI Naoki <nisiguti@xxxxxxxxxxxxxx>
Date: Thu, 18 Dec 2008 12:02:38 +0900
Cc: Ian.Pratt@xxxxxxxxxxxxx, disheng.su@xxxxxxxxx, aviv@xxxxxxxxxxxx, keir.fraser@xxxxxxxxxxxxx, sakaia@xxxxxxxxxxxxxx
Delivery-date: Wed, 17 Dec 2008 19:03:08 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <4949BC2C.4060302@xxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <4949BC2C.4060302@xxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.18 (Windows/20081105)
By applying this patch, the credit scheduler don't reset vcpu's credit (set to 0) when the credit would be over upper bound. And it prevents a vcpu from missing becoming active.

The difference between this patch and last patch is when vcpu is put back on active list. This patch puts vcpu back on active list only in csched_acct().

Best regards,
Naoki Nishiguchi
diff -r b431367fc717 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Wed Dec 17 16:00:48 2008 +0900
+++ b/xen/common/sched_credit.c Wed Dec 17 16:01:30 2008 +0900
@@ -197,6 +197,7 @@ struct csched_vcpu {
 struct csched_vcpu {
     struct list_head runq_elem;
     struct list_head active_vcpu_elem;
+    struct list_head inactive_vcpu_elem;
     struct csched_dom *sdom;
     struct vcpu *vcpu;
     atomic_t credit;
@@ -232,6 +233,7 @@ struct csched_private {
 struct csched_private {
     spinlock_t lock;
     struct list_head active_sdom;
+    struct list_head inactive_vcpu;
     uint32_t ncpus;
     unsigned int master;
     cpumask_t idlers;
@@ -485,12 +487,9 @@ csched_cpu_pick(struct vcpu *vc)
 }
 
 static inline void
-__csched_vcpu_acct_start(struct csched_vcpu *svc)
+__csched_vcpu_acct_start_locked(struct csched_vcpu *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
-    unsigned long flags;
-
-    spin_lock_irqsave(&csched_priv.lock, flags);
 
     if ( list_empty(&svc->active_vcpu_elem) )
     {
@@ -499,14 +498,13 @@ __csched_vcpu_acct_start(struct csched_v
 
         sdom->active_vcpu_count++;
         list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
+        list_del_init(&svc->inactive_vcpu_elem);
         if ( list_empty(&sdom->active_sdom_elem) )
         {
             list_add(&sdom->active_sdom_elem, &csched_priv.active_sdom);
             csched_priv.weight += sdom->weight;
         }
     }
-
-    spin_unlock_irqrestore(&csched_priv.lock, flags);
 }
 
 static inline void
@@ -521,6 +519,7 @@ __csched_vcpu_acct_stop_locked(struct cs
 
     sdom->active_vcpu_count--;
     list_del_init(&svc->active_vcpu_elem);
+    list_add(&svc->inactive_vcpu_elem, &csched_priv.inactive_vcpu);
     if ( list_empty(&sdom->active_vcpu) )
     {
         BUG_ON( csched_priv.weight < sdom->weight );
@@ -546,18 +545,12 @@ csched_vcpu_acct(unsigned int cpu)
         svc->pri = CSCHED_PRI_TS_UNDER;
 
     /*
-     * Put this VCPU and domain back on the active list if it was
-     * idling.
-     *
      * If it's been active a while, check if we'd be better off
      * migrating it to run elsewhere (see multi-core and multi-thread
      * support in csched_cpu_pick()).
      */
-    if ( list_empty(&svc->active_vcpu_elem) )
-    {
-        __csched_vcpu_acct_start(svc);
-    }
-    else if ( csched_cpu_pick(current) != cpu )
+    if ( !list_empty(&svc->active_vcpu_elem) &&
+         csched_cpu_pick(current) != cpu )
     {
         CSCHED_VCPU_STAT_CRANK(svc, migrate_r);
         CSCHED_STAT_CRANK(migrate_running);
@@ -582,6 +575,7 @@ csched_vcpu_init(struct vcpu *vc)
 
     INIT_LIST_HEAD(&svc->runq_elem);
     INIT_LIST_HEAD(&svc->active_vcpu_elem);
+    INIT_LIST_HEAD(&svc->inactive_vcpu_elem);
     svc->sdom = sdom;
     svc->vcpu = vc;
     atomic_set(&svc->credit, 0);
@@ -597,6 +591,16 @@ csched_vcpu_init(struct vcpu *vc)
             return -1;
     }
 
+    /* Add vcpu to inactive queue in order to start acct */
+    if ( !is_idle_vcpu(vc) )
+    {
+        unsigned long flags;
+
+        spin_lock_irqsave(&csched_priv.lock, flags);
+        list_add(&svc->inactive_vcpu_elem, &csched_priv.inactive_vcpu);
+        spin_unlock_irqrestore(&csched_priv.lock, flags);
+    }
+
     CSCHED_VCPU_CHECK(vc);
     return 0;
 }
@@ -617,6 +621,9 @@ csched_vcpu_destroy(struct vcpu *vc)
 
     if ( !list_empty(&svc->active_vcpu_elem) )
         __csched_vcpu_acct_stop_locked(svc);
+
+    if ( !list_empty(&svc->inactive_vcpu_elem) )
+        list_del_init(&svc->inactive_vcpu_elem);
 
     spin_unlock_irqrestore(&csched_priv.lock, flags);
 
@@ -835,6 +842,18 @@ csched_acct(void)
 
     spin_lock_irqsave(&csched_priv.lock, flags);
 
+    /* Add vcpu to active list when its credit were consumed by one tick. */
+    list_for_each_safe( iter_vcpu, next_vcpu, &csched_priv.inactive_vcpu )
+    {
+        svc = list_entry(iter_vcpu, struct csched_vcpu, inactive_vcpu_elem);
+
+        if ( atomic_read(&svc->credit)
+             <= CSCHED_CREDITS_PER_TICK * (CSCHED_TICKS_PER_ACCT - 1) )
+        {
+            __csched_vcpu_acct_start_locked(svc);
+        }
+    }
+
     weight_total = csched_priv.weight;
     credit_total = csched_priv.credit;
 
@@ -991,7 +1010,7 @@ csched_acct(void)
                 if ( credit > CSCHED_CREDITS_PER_TSLICE )
                 {
                     __csched_vcpu_acct_stop_locked(svc);
-                    credit = 0;
+                    credit = CSCHED_CREDITS_PER_TSLICE;
                     atomic_set(&svc->credit, credit);
                 }
             }
@@ -1353,6 +1372,17 @@ csched_dump(void)
             csched_dump_vcpu(svc);
         }
     }
+
+    printk("inactive vcpus:\n");
+    loop = 0;
+    list_for_each( iter_svc, &csched_priv.inactive_vcpu )
+    {
+        struct csched_vcpu *svc;
+        svc = list_entry(iter_svc, struct csched_vcpu, inactive_vcpu_elem);
+
+        printk("\t%3d: ", ++loop);
+        csched_dump_vcpu(svc);
+    }
 }
 
 static void
@@ -1360,6 +1390,7 @@ csched_init(void)
 {
     spin_lock_init(&csched_priv.lock);
     INIT_LIST_HEAD(&csched_priv.active_sdom);
+    INIT_LIST_HEAD(&csched_priv.inactive_vcpu);
     csched_priv.ncpus = 0;
     csched_priv.master = UINT_MAX;
     cpus_clear(csched_priv.idlers);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel