WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [RFC][PATCH] scheduler: credit scheduler for client virt

Hi George,

George Dunlap wrote:
I'm working on revising the scheduler right now, so it's probably best
if you hold off patches for a little while.

OK. I'll wait to finish your work.

I'm also trying to understand the minimum that your client workloads
actually need to run well.  There were compontents of the "boost"
patch series that helped your workload:
 (a) minimum cpu time,
 (b) Shortened time slices (2ms)
 (c) "boosted" priority for multimedia domains

Is it possible that having (a) and (b), possibly with some other
combinations, could work well without adding (c)?

Yes, it is possible.
I divided the rev2 "boost" patch as follows without (c).
 (1) minimum cpu time (a): boost_1.patch + boost_1_tools.patch
 (2) Shortened time slices (b): boost_2.patch
 (3) alternative "boost" mechanism by boost_credit: boost_3.patch

These patches works with the following combinations.
  (1), (1)+(2), (1)+(2)+(3)
Please apply these patches in numerical order.

Without (3), didn't solve the problem in the paper you showed.

Are these what you want?

At any rate, I'm going to start with a revised system that has a
minimum cpu time, but no "high priority", and see if we can get things
to work OK without it.

Thanks for your work, BTW -- the scheduler has needed some attention
for a long time, but I don't think it would have gotten it if you
hadn't introduced these patches.

Thanks.

Best regards,
Naoki
diff -r 56032cbaf1e8 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Thu Jan 22 10:43:50 2009 +0900
+++ b/xen/common/sched_credit.c Thu Jan 22 13:06:24 2009 +0900
@@ -201,6 +201,7 @@ struct csched_vcpu {
     struct csched_dom *sdom;
     struct vcpu *vcpu;
     atomic_t credit;
+    int prev_credit;
     uint16_t flags;
     int16_t pri;
 #ifdef CSCHED_STATS
@@ -225,6 +226,7 @@ struct csched_dom {
     uint16_t active_vcpu_count;
     uint16_t weight;
     uint16_t cap;
+    uint16_t percent;
 };
 
 /*
@@ -239,6 +241,8 @@ struct csched_private {
     cpumask_t idlers;
     uint32_t weight;
     uint32_t credit;
+    uint32_t percent;
+    uint16_t total_percent;
     int credit_balance;
     uint32_t runq_sort;
     CSCHED_STATS_DEFINE()
@@ -503,6 +507,7 @@ __csched_vcpu_acct_start_locked(struct c
         {
             list_add(&sdom->active_sdom_elem, &csched_priv.active_sdom);
             csched_priv.weight += sdom->weight;
+            csched_priv.percent += sdom->percent;
         }
     }
 }
@@ -525,6 +530,7 @@ __csched_vcpu_acct_stop_locked(struct cs
         BUG_ON( csched_priv.weight < sdom->weight );
         list_del_init(&sdom->active_sdom_elem);
         csched_priv.weight -= sdom->weight;
+        csched_priv.percent -= sdom->percent;
     }
 }
 
@@ -579,6 +585,7 @@ csched_vcpu_init(struct vcpu *vc)
     svc->sdom = sdom;
     svc->vcpu = vc;
     atomic_set(&svc->credit, 0);
+    svc->prev_credit = 0;
     svc->flags = 0U;
     svc->pri = is_idle_domain(dom) ? CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
     CSCHED_VCPU_STATS_RESET(svc);
@@ -712,25 +719,56 @@ csched_dom_cntl(
     {
         op->u.credit.weight = sdom->weight;
         op->u.credit.cap = sdom->cap;
+        op->u.credit.percent = sdom->percent;
     }
     else
     {
+        uint16_t weight = (uint16_t)~0U;
+
         ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_putinfo);
 
         spin_lock_irqsave(&csched_priv.lock, flags);
 
-        if ( op->u.credit.weight != 0 )
+        if ( (op->u.credit.weight != 0) &&
+             (sdom->percent == 0 || op->u.credit.percent == 0) )
+        {
+            weight = op->u.credit.weight;
+        }
+
+        if ( op->u.credit.cap != (uint16_t)~0U )
+            sdom->cap = op->u.credit.cap;
+
+        if ( (op->u.credit.percent != (uint16_t)~0U) &&
+             ((csched_priv.total_percent - sdom->percent +
+               op->u.credit.percent) <= 100 * csched_priv.ncpus) )
+        {
+            csched_priv.total_percent -= sdom->percent;
+            csched_priv.total_percent += op->u.credit.percent;
+
+            if ( !list_empty(&sdom->active_sdom_elem) )
+            {
+                csched_priv.percent -= sdom->percent;
+                csched_priv.percent += op->u.credit.percent;
+            }
+            sdom->percent = op->u.credit.percent;
+            if ( sdom->percent == 0 )
+            {
+                if ( sdom->weight == 0 )
+                    weight = CSCHED_DEFAULT_WEIGHT;
+            }
+            else
+                weight = 0;
+        }
+
+        if ( weight != (uint16_t)~0U )
         {
             if ( !list_empty(&sdom->active_sdom_elem) )
             {
                 csched_priv.weight -= sdom->weight;
-                csched_priv.weight += op->u.credit.weight;
+                csched_priv.weight += weight;
             }
-            sdom->weight = op->u.credit.weight;
-        }
-
-        if ( op->u.credit.cap != (uint16_t)~0U )
-            sdom->cap = op->u.credit.cap;
+            sdom->weight = weight;
+        }
 
         spin_unlock_irqrestore(&csched_priv.lock, flags);
     }
@@ -759,6 +797,7 @@ csched_dom_init(struct domain *dom)
     sdom->dom = dom;
     sdom->weight = CSCHED_DEFAULT_WEIGHT;
     sdom->cap = 0U;
+    sdom->percent = 0U;
     dom->sched_priv = sdom;
 
     return 0;
@@ -831,6 +870,7 @@ csched_acct(void)
     struct csched_dom *sdom;
     uint32_t credit_total;
     uint32_t weight_total;
+    uint32_t percent_credit;
     uint32_t weight_left;
     uint32_t credit_fair;
     uint32_t credit_peak;
@@ -857,6 +897,7 @@ csched_acct(void)
 
     weight_total = csched_priv.weight;
     credit_total = csched_priv.credit;
+    percent_credit = csched_priv.percent * CSCHED_CREDITS_PER_TSLICE / 100;
 
     /* Converge balance towards 0 when it drops negative */
     if ( csched_priv.credit_balance < 0 )
@@ -865,7 +906,7 @@ csched_acct(void)
         CSCHED_STAT_CRANK(acct_balance);
     }
 
-    if ( unlikely(weight_total == 0) )
+    if ( unlikely(weight_total == 0 && percent_credit == 0) )
     {
         csched_priv.credit_balance = 0;
         spin_unlock_irqrestore(&csched_priv.lock, flags);
@@ -880,22 +921,44 @@ csched_acct(void)
     credit_xtra = 0;
     credit_cap = 0U;
 
+    /* Firstly, subtract percent_credit from credit_total. */
+    if ( percent_credit != 0 )
+    {
+        credit_total -= percent_credit;
+        credit_balance += percent_credit;
+    }
+
+    /* Avoid 0 divide error */
+    if ( weight_total == 0 )
+        weight_total = 1;
+
     list_for_each_safe( iter_sdom, next_sdom, &csched_priv.active_sdom )
     {
         sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
 
         BUG_ON( is_idle_domain(sdom->dom) );
         BUG_ON( sdom->active_vcpu_count == 0 );
-        BUG_ON( sdom->weight == 0 );
+        BUG_ON( sdom->weight == 0 && sdom->percent == 0 );
         BUG_ON( sdom->weight > weight_left );
 
-        /* Compute the average of active VCPUs. */
+        /*
+         * Compute the average of active VCPUs
+         * and adjust credit for comsumption too much.
+         */
         credit_sum = 0;
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
+            int adjust;
+
             svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
             BUG_ON( sdom != svc->sdom );
 
+            credit = atomic_read(&svc->credit);
+            adjust = svc->prev_credit - credit - CSCHED_CREDITS_PER_TSLICE;
+            if ( adjust > 0 )
+            {
+                atomic_add(adjust, &svc->credit);
+            }
             credit_sum += atomic_read(&svc->credit);
         }
         credit_average = ( credit_sum + (sdom->active_vcpu_count - 1)
@@ -934,7 +997,9 @@ csched_acct(void)
 
         if ( credit_fair < credit_peak )
         {
-            credit_xtra = 1;
+            /* credit_fair is 0 if weight is 0. */
+            if ( sdom->weight != 0 )
+                credit_xtra = 1;
         }
         else
         {
@@ -963,9 +1028,9 @@ csched_acct(void)
         }
 
         /* Compute fair share per VCPU */
+        credit_fair += (sdom->percent * CSCHED_CREDITS_PER_ACCT)/100;
         credit_fair = ( credit_fair + ( sdom->active_vcpu_count - 1 )
                       ) / sdom->active_vcpu_count;
-
 
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
@@ -1029,6 +1094,9 @@ csched_acct(void)
                 }
             }
 
+            /* save credit for adjustment */
+            svc->prev_credit = credit;
+
             CSCHED_VCPU_STAT_SET(svc, credit_last, credit);
             CSCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
             credit_balance += credit;
@@ -1282,7 +1350,10 @@ csched_dump_vcpu(struct csched_vcpu *svc
 
     if ( sdom )
     {
-        printk(" credit=%i [w=%u]", atomic_read(&svc->credit), sdom->weight);
+        printk(" credit=%i [w=%u,p=%u]",
+               atomic_read(&svc->credit),
+               sdom->weight,
+               sdom->percent);
 #ifdef CSCHED_STATS
         printk(" (%d+%u) {a/i=%u/%u m=%u+%u}",
                 svc->stats.credit_last,
@@ -1348,6 +1419,8 @@ csched_dump(void)
            "\tcredit balance     = %d\n"
            "\tweight             = %u\n"
            "\trunq_sort          = %u\n"
+           "\tpercent            = %u\n"
+           "\ttotal_percent      = %u\n"
            "\tdefault-weight     = %d\n"
            "\tmsecs per tick     = %dms\n"
            "\tcredits per tick   = %d\n"
@@ -1359,6 +1432,8 @@ csched_dump(void)
            csched_priv.credit_balance,
            csched_priv.weight,
            csched_priv.runq_sort,
+           csched_priv.percent,
+           csched_priv.total_percent,
            CSCHED_DEFAULT_WEIGHT,
            CSCHED_MSECS_PER_TICK,
            CSCHED_CREDITS_PER_TICK,
@@ -1412,6 +1487,8 @@ csched_init(void)
     csched_priv.credit = 0U;
     csched_priv.credit_balance = 0;
     csched_priv.runq_sort = 0U;
+    csched_priv.percent = 0;
+    csched_priv.total_percent = 0;
     CSCHED_STATS_RESET();
 }
 
diff -r 56032cbaf1e8 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Thu Jan 22 10:43:50 2009 +0900
+++ b/xen/include/public/domctl.h       Thu Jan 22 13:06:24 2009 +0900
@@ -311,6 +311,7 @@ struct xen_domctl_scheduler_op {
         struct xen_domctl_sched_credit {
             uint16_t weight;
             uint16_t cap;
+            uint16_t percent;
         } credit;
     } u;
 };
diff -r 56032cbaf1e8 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Jan 22 10:43:50 2009 +0900
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Jan 22 12:31:18 2009 +0900
@@ -1285,18 +1285,21 @@ static PyObject *pyxc_sched_credit_domai
     uint32_t domid;
     uint16_t weight;
     uint16_t cap;
-    static char *kwd_list[] = { "domid", "weight", "cap", NULL };
-    static char kwd_type[] = "I|HH";
+    uint16_t percent;
+    static char *kwd_list[] = { "domid", "weight", "cap", "percent", NULL };
+    static char kwd_type[] = "I|HHh";
     struct xen_domctl_sched_credit sdom;
     
     weight = 0;
     cap = (uint16_t)~0U;
+    percent = (uint16_t)~0U;
     if( !PyArg_ParseTupleAndKeywords(args, kwds, kwd_type, kwd_list, 
-                                     &domid, &weight, &cap) )
+                                     &domid, &weight, &cap, &percent) )
         return NULL;
 
     sdom.weight = weight;
     sdom.cap = cap;
+    sdom.percent = percent;
 
     if ( xc_sched_credit_domain_set(self->xc_handle, domid, &sdom) != 0 )
         return pyxc_error_to_exception();
@@ -1316,9 +1319,10 @@ static PyObject *pyxc_sched_credit_domai
     if ( xc_sched_credit_domain_get(self->xc_handle, domid, &sdom) != 0 )
         return pyxc_error_to_exception();
 
-    return Py_BuildValue("{s:H,s:H}",
+    return Py_BuildValue("{s:H,s:H,s:i}",
                          "weight",  sdom.weight,
-                         "cap",     sdom.cap);
+                         "cap",     sdom.cap,
+                         "percent", sdom.percent);
 }
 
 static PyObject *pyxc_domain_setmaxmem(XcObject *self, PyObject *args)
@@ -1744,6 +1748,8 @@ static PyMethodDef pyxc_methods[] = {
       "SMP credit scheduler.\n"
       " domid     [int]:   domain id to set\n"
       " weight    [short]: domain's scheduling weight\n"
+      " cap       [short]: cap\n"
+      " percent   [short]; domain's scheduling percentage per a cpu\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "sched_credit_domain_get",
@@ -1753,7 +1759,9 @@ static PyMethodDef pyxc_methods[] = {
       "SMP credit scheduler.\n"
       " domid     [int]:   domain id to get\n"
       "Returns:   [dict]\n"
-      " weight    [short]: domain's scheduling weight\n"},
+      " weight    [short]: domain's scheduling weight\n"
+      " cap       [short]: cap\n"
+      " percent   [short]: domain's scheduling percentage per a cpu\n"},
 
     { "evtchn_alloc_unbound", 
       (PyCFunction)pyxc_evtchn_alloc_unbound,
diff -r 56032cbaf1e8 tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py  Thu Jan 22 10:43:50 2009 +0900
+++ b/tools/python/xen/xend/XendAPI.py  Thu Jan 22 12:31:18 2009 +0900
@@ -1505,10 +1505,12 @@ class XendAPI(object):
 
         #need to update sched params aswell
         if 'weight' in xeninfo.info['vcpus_params'] \
-           and 'cap' in xeninfo.info['vcpus_params']:
+           and 'cap' in xeninfo.info['vcpus_params'] \
+           and 'percent' in xeninfo.info['vcpus_params']:
             weight = xeninfo.info['vcpus_params']['weight']
             cap = xeninfo.info['vcpus_params']['cap']
-            xendom.domain_sched_credit_set(xeninfo.getDomid(), weight, cap)
+            percent = xeninfo.info['vcpus_params']['percent']
+            xendom.domain_sched_credit_set(xeninfo.getDomid(), weight, cap, 
percent)
 
     def VM_set_VCPUs_number_live(self, _, vm_ref, num):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
diff -r 56032cbaf1e8 tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py       Thu Jan 22 10:43:50 2009 +0900
+++ b/tools/python/xen/xend/XendConfig.py       Thu Jan 22 12:31:18 2009 +0900
@@ -591,6 +591,8 @@ class XendConfig(dict):
             int(sxp.child_value(sxp_cfg, "cpu_weight", 256))
         cfg["vcpus_params"]["cap"] = \
             int(sxp.child_value(sxp_cfg, "cpu_cap", 0))
+        cfg["vcpus_params"]["percent"] = \
+            int(sxp.child_value(sxp_cfg, "cpu_percent", 0))
 
         # Only extract options we know about.
         extract_keys = LEGACY_UNSUPPORTED_BY_XENAPI_CFG + \
diff -r 56032cbaf1e8 tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Thu Jan 22 10:43:50 2009 +0900
+++ b/tools/python/xen/xend/XendDomain.py       Thu Jan 22 12:31:18 2009 +0900
@@ -1536,7 +1536,7 @@ class XendDomain:
 
         @param domid: Domain ID or Name
         @type domid: int or string.
-        @rtype: dict with keys 'weight' and 'cap'
+        @rtype: dict with keys 'weight' and 'cap' and 'percent'
         @return: credit scheduler parameters
         """
         dominfo = self.domain_lookup_nr(domid)
@@ -1550,19 +1550,22 @@ class XendDomain:
                 raise XendError(str(ex))
         else:
             return {'weight' : dominfo.getWeight(),
-                    'cap'    : dominfo.getCap()} 
+                    'cap'    : dominfo.getCap(),
+                    'percent': dominfo.getPercent()} 
     
-    def domain_sched_credit_set(self, domid, weight = None, cap = None):
+    def domain_sched_credit_set(self, domid, weight = None, cap = None, 
percent = None):
         """Set credit scheduler parameters for a domain.
 
         @param domid: Domain ID or Name
         @type domid: int or string.
         @type weight: int
         @type cap: int
+        @type percent: int
         @rtype: 0
         """
         set_weight = False
         set_cap = False
+        set_percent = False
         dominfo = self.domain_lookup_nr(domid)
         if not dominfo:
             raise XendInvalidDomain(str(domid))
@@ -1581,17 +1584,27 @@ class XendDomain:
             else:
                 set_cap = True
 
+            if percent is None:
+                percent = int(~0)
+            elif percent < 0:
+                raise XendError("percent is out of range")
+            else:
+                set_percent = True
+
             assert type(weight) == int
             assert type(cap) == int
+            assert type(percent) == int
 
             rc = 0
             if dominfo._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
-                rc = xc.sched_credit_domain_set(dominfo.getDomid(), weight, 
cap)
+                rc = xc.sched_credit_domain_set(dominfo.getDomid(), weight, 
cap, percent)
             if rc == 0:
                 if set_weight:
                     dominfo.setWeight(weight)
                 if set_cap:
                     dominfo.setCap(cap)
+                if set_percent:
+                    dominfo.setPercent(percent)
                 self.managed_config_save(dominfo)
             return rc
         except Exception, ex:
diff -r 56032cbaf1e8 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Thu Jan 22 10:43:50 2009 +0900
+++ b/tools/python/xen/xend/XendDomainInfo.py   Thu Jan 22 12:31:18 2009 +0900
@@ -467,7 +467,8 @@ class XendDomainInfo:
                 if xennode.xenschedinfo() == 'credit':
                     xendomains.domain_sched_credit_set(self.getDomid(),
                                                        self.getWeight(),
-                                                       self.getCap())
+                                                       self.getCap(),
+                                                       self.getPercent())
             except:
                 log.exception('VM start failed')
                 self.destroy()
@@ -1705,6 +1706,12 @@ class XendDomainInfo:
     def setWeight(self, cpu_weight):
         self.info['vcpus_params']['weight'] = cpu_weight
 
+    def getPercent(self):
+        return self.info['vcpus_params']['percent']
+
+    def setPercent(self, cpu_percent):
+        self.info['vcpus_params']['percent'] = cpu_percent
+
     def getRestartCount(self):
         return self._readVm('xend/restart_count')
 
diff -r 56032cbaf1e8 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Thu Jan 22 10:43:50 2009 +0900
+++ b/tools/python/xen/xm/main.py       Thu Jan 22 12:31:18 2009 +0900
@@ -150,7 +150,7 @@ SUBCOMMAND_HELP = {
     'log'         : ('', 'Print Xend log'),
     'rename'      : ('<Domain> <NewDomainName>', 'Rename a domain.'),
     'sched-sedf'  : ('<Domain> [options]', 'Get/set EDF parameters.'),
-    'sched-credit': ('[-d <Domain> [-w[=WEIGHT]|-c[=CAP]]]',
+    'sched-credit': ('[-d <Domain> [-w[=WEIGHT]|-c[=CAP]|-p[=PERCENT]]]',
                      'Get/set credit scheduler parameters.'),
     'sysrq'       : ('<Domain> <letter>', 'Send a sysrq to a domain.'),
     'debug-keys'  : ('<Keys>', 'Send debug keys to Xen.'),
@@ -240,6 +240,7 @@ SUBCOMMAND_OPTIONS = {
        ('-d DOMAIN', '--domain=DOMAIN', 'Domain to modify'),
        ('-w WEIGHT', '--weight=WEIGHT', 'Weight (int)'),
        ('-c CAP',    '--cap=CAP',       'Cap (int)'),
+       ('-p PERCENT', '--percent=PERCENT', 'Percent per a cpu (int)'),
     ),
     'list': (
        ('-l', '--long',         'Output all VM details in SXP'),
@@ -1578,8 +1579,8 @@ def xm_sched_credit(args):
     check_sched_type('credit')
 
     try:
-        opts, params = getopt.getopt(args, "d:w:c:",
-            ["domain=", "weight=", "cap="])
+        opts, params = getopt.getopt(args, "d:w:c:p:",
+            ["domain=", "weight=", "cap=", "percent="])
     except getopt.GetoptError, opterr:
         err(opterr)
         usage('sched-credit')
@@ -1587,6 +1588,7 @@ def xm_sched_credit(args):
     domid = None
     weight = None
     cap = None
+    percent = None
 
     for o, a in opts:
         if o in ["-d", "--domain"]:
@@ -1594,18 +1596,20 @@ def xm_sched_credit(args):
         elif o in ["-w", "--weight"]:
             weight = int(a)
         elif o in ["-c", "--cap"]:
-            cap = int(a);
+            cap = int(a)
+        elif o in ["-p", "--percent"]:
+            percent = int(a);
 
     doms = filter(lambda x : domid_match(domid, x),
                   [parse_doms_info(dom)
                   for dom in getDomains(None, 'all')])
 
-    if weight is None and cap is None:
+    if weight is None and cap is None and percent is None:
         if domid is not None and doms == []: 
             err("Domain '%s' does not exist." % domid)
             usage('sched-credit')
         # print header if we aren't setting any parameters
-        print '%-33s %4s %6s %4s' % ('Name','ID','Weight','Cap')
+        print '%-33s %4s %6s %4s %7s' % ('Name','ID','Weight','Cap','Percent')
         
         for d in doms:
             try:
@@ -1618,16 +1622,17 @@ def xm_sched_credit(args):
             except xmlrpclib.Fault:
                 pass
 
-            if 'weight' not in info or 'cap' not in info:
+            if 'weight' not in info or 'cap' not in info or 'percent' not in 
info:
                 # domain does not support sched-credit?
-                info = {'weight': -1, 'cap': -1}
+                info = {'weight': -1, 'cap': -1, 'percent':-1}
 
             info['weight'] = int(info['weight'])
             info['cap']    = int(info['cap'])
+            info['percent'] = int(info['percent'])
             
             info['name']  = d['name']
             info['domid'] = str(d['domid'])
-            print( ("%(name)-32s %(domid)5s %(weight)6d %(cap)4d") % info)
+            print( ("%(name)-32s %(domid)5s %(weight)6d %(cap)4d 
%(percent)6d") % info)
     else:
         if domid is None:
             # place holder for system-wide scheduler parameters
@@ -1644,6 +1649,10 @@ def xm_sched_credit(args):
                     get_single_vm(domid),
                     "cap",
                     cap)
+                server.xenapi.VM.add_to_VCPUs_params_live(
+                    get_single_vm(domid),
+                    "percent",
+                    percent)
             else:
                 server.xenapi.VM.add_to_VCPUs_params(
                     get_single_vm(domid),
@@ -1653,8 +1662,12 @@ def xm_sched_credit(args):
                     get_single_vm(domid),
                     "cap",
                     cap)
+                server.xenapi.VM.add_to_VCPUs_params(
+                    get_single_vm(domid),
+                    "percent",
+                    percent)
         else:
-            result = server.xend.domain.sched_credit_set(domid, weight, cap)
+            result = server.xend.domain.sched_credit_set(domid, weight, cap, 
percent)
             if result != 0:
                 err(str(result))
 
diff -r 116e2691c071 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Thu Jan 22 13:16:49 2009 +0900
+++ b/xen/common/sched_credit.c Thu Jan 22 13:21:28 2009 +0900
@@ -47,6 +47,7 @@
     (CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_TSLICE)
 #define CSCHED_CREDITS_PER_ACCT     \
     (CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_ACCT)
+#define CSCHED_MSECS_PER_BOOST_TSLICE 2
 
 
 /*
@@ -245,6 +246,7 @@ struct csched_private {
     uint16_t total_percent;
     int credit_balance;
     uint32_t runq_sort;
+    s_time_t boost_tslice;
     CSCHED_STATS_DEFINE()
 };
 
@@ -253,6 +255,10 @@ struct csched_private {
  * Global variables
  */
 static struct csched_private csched_priv;
+
+/* opt_credit_tslice: time slice for BOOST priority */
+static unsigned int opt_credit_tslice = CSCHED_MSECS_PER_BOOST_TSLICE;
+integer_param("credit_tslice", opt_credit_tslice);
 
 static void csched_tick(void *_cpu);
 
@@ -1327,7 +1333,17 @@ csched_schedule(s_time_t now)
     /*
      * Return task to run next...
      */
-    ret.time = MILLISECS(CSCHED_MSECS_PER_TSLICE);
+    if ( snext->pri == CSCHED_PRI_TS_BOOST )
+    {
+        struct csched_vcpu * const svc = __runq_elem(runq->next);
+
+        if ( svc->pri == CSCHED_PRI_TS_BOOST )
+            ret.time = csched_priv.boost_tslice;
+        else
+            ret.time = MILLISECS(CSCHED_MSECS_PER_TICK);
+    }
+    else
+        ret.time = MILLISECS(CSCHED_MSECS_PER_TSLICE);
     ret.task = snext->vcpu;
 
     spc->start_time = now;
@@ -1489,6 +1505,7 @@ csched_init(void)
     csched_priv.runq_sort = 0U;
     csched_priv.percent = 0;
     csched_priv.total_percent = 0;
+    csched_priv.boost_tslice = MILLISECS(opt_credit_tslice);
     CSCHED_STATS_RESET();
 }
 
diff -r 64618a20b9de xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Thu Jan 22 13:22:54 2009 +0900
+++ b/xen/common/sched_credit.c Thu Jan 22 13:42:04 2009 +0900
@@ -202,6 +202,7 @@ struct csched_vcpu {
     struct csched_dom *sdom;
     struct vcpu *vcpu;
     atomic_t credit;
+    atomic_t boost_credit;
     int prev_credit;
     uint16_t flags;
     int16_t pri;
@@ -549,14 +550,6 @@ csched_vcpu_acct(unsigned int cpu)
     ASSERT( svc->sdom != NULL );
 
     /*
-     * If this VCPU's priority was boosted when it last awoke, reset it.
-     * If the VCPU is found here, then it's consuming a non-negligeable
-     * amount of CPU resources and should no longer be boosted.
-     */
-    if ( svc->pri == CSCHED_PRI_TS_BOOST )
-        svc->pri = CSCHED_PRI_TS_UNDER;
-
-    /*
      * If it's been active a while, check if we'd be better off
      * migrating it to run elsewhere (see multi-core and multi-thread
      * support in csched_cpu_pick()).
@@ -591,6 +584,7 @@ csched_vcpu_init(struct vcpu *vc)
     svc->sdom = sdom;
     svc->vcpu = vc;
     atomic_set(&svc->credit, 0);
+    atomic_set(&svc->boost_credit, 0);
     svc->prev_credit = 0;
     svc->flags = 0U;
     svc->pri = is_idle_domain(dom) ? CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
@@ -706,6 +700,8 @@ csched_vcpu_wake(struct vcpu *vc)
          !(svc->flags & CSCHED_FLAG_VCPU_PARKED) )
     {
         svc->pri = CSCHED_PRI_TS_BOOST;
+        atomic_add(CSCHED_CREDITS_PER_TICK, &svc->boost_credit);
+        atomic_sub(CSCHED_CREDITS_PER_TICK, &svc->credit);
     }
 
     /* Put the VCPU on the runq and tickle CPUs */
@@ -954,11 +950,14 @@ csched_acct(void)
         credit_sum = 0;
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
-            int adjust;
+            int adjust, boost_credit;
 
             svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
             BUG_ON( sdom != svc->sdom );
 
+            boost_credit = atomic_read(&svc->boost_credit);
+            atomic_set(&svc->boost_credit, 0);
+            atomic_add(boost_credit, &svc->credit);
             credit = atomic_read(&svc->credit);
             adjust = svc->prev_credit - credit - CSCHED_CREDITS_PER_TSLICE;
             if ( adjust > 0 )
@@ -1290,6 +1289,22 @@ csched_schedule(s_time_t now)
                ) /
                ( MILLISECS(CSCHED_MSECS_PER_TSLICE) /
                  CSCHED_CREDITS_PER_TSLICE );
+    if ( scurr->pri == CSCHED_PRI_TS_BOOST )
+    {
+        int boost_credit = atomic_read(&scurr->boost_credit);
+
+        if ( boost_credit > consumed )
+        {
+            atomic_sub(consumed, &scurr->boost_credit);
+            consumed = 0;
+        }
+        else
+        {
+            atomic_sub(boost_credit, &scurr->boost_credit);
+            consumed -= boost_credit;
+            scurr->pri = CSCHED_PRI_TS_UNDER;
+        }
+    }
     if ( consumed > 0 && !is_idle_vcpu(current) )
         atomic_sub(consumed, &scurr->credit);
 
@@ -1366,8 +1381,9 @@ csched_dump_vcpu(struct csched_vcpu *svc
 
     if ( sdom )
     {
-        printk(" credit=%i [w=%u,p=%u]",
+        printk(" credit=%i bc=%i [w=%u,p=%u]",
                atomic_read(&svc->credit),
+               atomic_read(&svc->boost_credit),
                sdom->weight,
                sdom->percent);
 #ifdef CSCHED_STATS
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>