[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] 3/4 "nemesis" scheduling domains for Xen



Implements tool interfaces for scheduling domains. libxenctrl, xm, and xend.
signed-off-by: Mike D. Day <ncmike@xxxxxxxxxx>

--
libxc/xc_domain.c                   |   85 +++++++++++++++++++++++++++++++++---
libxc/xenctrl.h                     |   43 ++++++++++++++++--
python/xen/xend/XendDomain.py       |   78 +++++++++++++++++++++++++++++++++
python/xen/xend/server/SrvDomain.py |   27 +++++++++++
python/xen/xm/main.py               |   70 +++++++++++++++++++++++++++++
5 files changed, 294 insertions(+), 9 deletions(-)
--

diff -r baff9c7cc4b3 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Fri May 04 11:23:25 2007 -0400
+++ b/tools/libxc/xc_domain.c   Fri May 04 17:35:33 2007 -0400
@@ -108,7 +108,7 @@ int xc_vcpu_setaffinity(int xc_handle,
    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);

    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
- +
    if ( lock_pages(local, sizeof(local)) != 0 )
    {
        PERROR("Could not lock memory for Xen hypercall");
@@ -139,7 +139,7 @@ int xc_vcpu_getaffinity(int xc_handle,

    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
- +
    if ( lock_pages(local, sizeof(local)) != 0 )
    {
        PERROR("Could not lock memory for Xen hypercall");
@@ -253,13 +253,13 @@ int xc_domain_hvm_getcontext(int xc_hand
    domctl.u.hvmcontext.size = size;
    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);

- if ( ctxt_buf ) + if ( ctxt_buf )
        if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
            return ret;

    ret = do_domctl(xc_handle, &domctl);

- if ( ctxt_buf ) + if ( ctxt_buf )
        unlock_pages(ctxt_buf, size);

    return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
@@ -338,8 +338,8 @@ int xc_shadow_control(int xc_handle,
    if ( stats )
        memcpy(stats, &domctl.u.shadow_op.stats,
               sizeof(xc_shadow_op_stats_t));
- - if ( mb ) +
+    if ( mb )
        *mb = domctl.u.shadow_op.mb;

    return (rc == 0) ? domctl.u.shadow_op.pages : rc;
@@ -696,6 +696,79 @@ int xc_get_hvm_param(int handle, domid_t
    return rc;
}

+int xc_add_adom(int handle, domid_t adom, domid_t sdom, uint16_t *reason)
+{
+    int ret;
+
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_sdom;
+    domctl.u.scheduler_op.u.sdom.op = SDOM_add_adom;
+    domctl.u.scheduler_op.u.sdom.adom = adom;
+    domctl.u.scheduler_op.u.sdom.sdom = sdom;
+    ret = do_domctl(handle, &domctl);
+    if (ret < 0 && reason)
+        *reason = domctl.u.scheduler_op.u.sdom.reason;
+    return ret;
+}
+
+int xc_del_adom(int handle, domid_t adom, domid_t sdom, uint16_t *reason)
+{
+    int ret;
+
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_sdom;
+    domctl.u.scheduler_op.u.sdom.op = SDOM_del_adom;
+    domctl.u.scheduler_op.u.sdom.adom = adom;
+    domctl.u.scheduler_op.u.sdom.sdom = sdom;
+    ret = do_domctl(handle, &domctl);
+    if (ret < 0 && reason)
+        *reason = domctl.u.scheduler_op.u.sdom.reason;
+    return ret;
+}
+
+int xc_get_sdom(int handle, domid_t adom, domid_t *sdom)
+{
+    int ret;
+    DECLARE_DOMCTL;
+
+    if (sdom == NULL)
+        return -EINVAL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_sdom;
+    domctl.u.scheduler_op.u.sdom.op = SDOM_get_sdom;
+    domctl.u.scheduler_op.u.sdom.adom = adom;
+    ret = do_domctl(handle, &domctl);
+    if (ret >= 0)
+        *sdom = domctl.u.scheduler_op.u.sdom.sdom;
+    return ret;
+
+}
+
+
+int xc_get_sdom_flags(int handle, domid_t dom, uint32_t *flags)
+{
+    int ret;
+    DECLARE_DOMCTL;
+
+    if (flags == NULL)
+        return -EINVAL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_sdom;
+    domctl.u.scheduler_op.u.sdom.op = SDOM_get_flags;
+    domctl.u.scheduler_op.u.sdom.sdom = dom;
+    ret = do_domctl(handle, &domctl);
+    if (ret >= 0)
+        *flags = domctl.u.scheduler_op.u.sdom.flags;
+    return ret;
+
+}
+
/*
 * Local variables:
 * mode: C
diff -r baff9c7cc4b3 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Fri May 04 11:23:25 2007 -0400
+++ b/tools/libxc/xenctrl.h     Fri May 04 17:38:00 2007 -0400
@@ -425,7 +425,7 @@ int xc_sched_credit_domain_get(int xc_ha
 * @parm xc_handle a handle to an open hypervisor interface
 * @parm domid the domain id to send trigger
 * @parm trigger the trigger type
- * @parm vcpu the vcpu number to send trigger + * @parm vcpu the vcpu number to send trigger
 * return 0 on success, -1 on failure
 */
int xc_domain_send_trigger(int xc_handle,
@@ -441,7 +441,7 @@ int xc_domain_send_trigger(int xc_handle
 * This function allocates an unbound port.  Ports are named endpoints used for
 * interdomain communication.  This function is most useful in opening a
 * well-known port within a domain to receive events on.
- * + *
 * NOTE: If you are allocating a *local* unbound port, you probably want to
 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
 * ports *only* during domain creation.
@@ -718,7 +718,7 @@ evtchn_port_t xc_evtchn_bind_virq(int xc

/*
 * Return the next event channel to become pending, or -1 on failure, in which
- * case errno will be set appropriately. + * case errno will be set appropriately.
 */
evtchn_port_t xc_evtchn_pending(int xce_handle);

@@ -843,6 +843,43 @@ int xc_set_hvm_param(int handle, domid_t
int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value);
int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value);

+/**
+ * Adds an activation domain to a scheduling domain.
+ * The activation domain must not already be in a different scheduling domain,
+ * and the scheduling domain must not already be an activation domain.
+ *
+ * @parm adom the domain that will be added as an activation domain
+ * @parm sdom the domain that will be the scheduling domain
+ * @parm reason holds the detailed return code in case of error
+ */
+int xc_add_adom(int handle, domid_t adom, domid_t sdom, uint16_t *reason);
+
+/**
+ * Removes an activation domain from a scheduling domain.
+ *
+ * @parm adom the domain that will be removed as an activation domain
+ * @parm sdom from which the activation domain will be removed
+ * @parm reason holds the detailed return code in case of error
+ */
+int xc_del_adom(int handle, domid_t adom, domid_t sdom, uint16_t *reason);
+
+/**
+ * Returns the scheduling domain for an activation domain
+ *
+ * @parm adom the activation domain
+ * @parm sdom location to hold the return value
+ */
+int xc_get_sdom(int handle, domid_t adom, domid_t *sdom);
+
+/**
+ * Returns the scheduling domain flags of an sdom or adom
+ *
+ * @parm dom the domain, which must be either an activation domain
+ *     or a scheduling domain.
+ * @parm flags location to hold the return value
+ */
+int xc_get_sdom_flags(int handle, domid_t dom, uint32_t *flags);
+
/* PowerPC specific. */
int xc_alloc_real_mode_area(int xc_handle,
                            uint32_t domid,
diff -r baff9c7cc4b3 tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Fri May 04 11:23:25 2007 -0400
+++ b/tools/python/xen/xend/XendDomain.py       Fri May 04 14:26:45 2007 -0400
@@ -1424,6 +1424,84 @@ class XendDomain:
            log.exception(ex)
            raise XendError(str(ex))

+    def domain_add_adom(self, adom, sdom):
+        """ Ad an activation domain to a scheduling domain.
+
+        @param adom: activation domain ID
+        @type adom: int or string
+        @param sdom: scheduling domain ID
+        @type sdom: int or string
+        @rtype: 0
+        """
+        adominfo = self.domain_lookup_nr(adom)
+        if not adominfo:
+            raise XendInvalidDomain(str(adom))
+        sdominfo = self.domain_lookup_nr(sdom)
+        if not sdominfo:
+            raise XendInvalidDomain(str(sdom))
+        try:
+            return xc.add_adom(adom, sdom)
+        except Exception, ex:
+            log.exception(ex)
+            raise XendError(str(ex))
+
+    def domain_del_adom(self, adom, sdom):
+        """ Remove an activation domain from a scheduling domain.
+
+        @param adom: activation domain ID
+        @type adom: int or string
+        @param sdom: scheduling domain ID
+        @type sdom: int or string
+        @rtype: 0
+        """
+        adominfo = self.domain_lookup_nr(adom)
+        if not adominfo:
+            raise XendInvalidDomain(str(adom))
+        sdominfo = self.domain_lookup_nr(sdom)
+        if not sdominfo:
+            raise XendInvalidDomain(str(sdom))
+        assert type(adom) == int
+        assert type(sdom) == int
+        try:
+            return xc.del_adom(adom, sdom)
+        except Exception, ex:
+            log.exception(ex)
+            raise XendError(str(ex))
+
+    def domain_get_sdom(self, adom):
+        """ Get scheduling domain for an activation domain.
+
+        @param adom: activation domain ID
+        @type adom: int or string
+        @return: Domain ID of scheduling domain
+        @rtype: int
+        """
+        adominfo = self.domain_lookup_nr(adom)
+        if not adominfo:
+            raise XendInvalidDomain(str(adom))
+        try:
+            return xc.get_adom(adom)
+        except Exception, ex:
+            log.exception(ex)
+            raise XendError(str(ex))
+
+    def domain_get_sdom_flags(self, dom):
+        """ Get domain Scheduling/Activation flags.
+
+        @param dom: domain ID
+        @type dom: int or string
+        @return: Domain ID of scheduling domain
+        @rtype: int
+        """
+        dominfo = self.domain_lookup_nr(dom)
+        if not dominfo:
+            raise XendInvalidDomain(str(dom))
+        try:
+            return xc.get_sdom_flags(dom)
+        except Exception, ex:
+            log.exception(ex)
+            raise XendError(str(ex))
+ def domain_maxmem_set(self, domid, mem):
        """Set the memory limit for a domain.

diff -r baff9c7cc4b3 tools/python/xen/xend/server/SrvDomain.py
--- a/tools/python/xen/xend/server/SrvDomain.py Fri May 04 11:23:25 2007 -0400
+++ b/tools/python/xen/xend/server/SrvDomain.py Fri May 04 14:26:45 2007 -0400
@@ -156,6 +156,33 @@ class SrvDomain(SrvDir):
        fn = FormFn(self.xd.domain_sched_credit_set,
                    [['dom', 'int'],
                     ['weight', 'int']])
+        val = fn(req.args, {'dom': self.dom.domid})
+        return val
+
+
+    def op_domain_add_adom(self, _, req):
+        fn = FormFn(self.xd.domain_add_adom,
+                    [['adom', 'int'],
+                     ['sdom', 'int']])
+        val = fn(req.args, {'dom': self.dom.domid})
+        return val
+
+    def op_domain_del_adom(self, _, req):
+        fn = FormFn(self.xd.domain_del_adom,
+                    [['adom', 'int'],
+                     ['sdom', 'int']])
+        val = fn(req.args, {'dom': self.dom.domid})
+        return val
+
+    def op_domain_get_sdom(self, _, req):
+        fn = FormFn(self.xd.domain_get_sdom,
+                    [['adom', 'int']])
+        val = fn(req.args, {'dom': self.dom.domid})
+        return val
+
+    def op_domain_get_sdom_flags(self, _, req):
+        fn = FormFn(self.xd.domain_get_sdom_flags,
+                    [['dom', 'int']])
        val = fn(req.args, {'dom': self.dom.domid})
        return val

diff -r baff9c7cc4b3 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Fri May 04 11:23:25 2007 -0400
+++ b/tools/python/xen/xm/main.py       Fri May 04 14:26:45 2007 -0400
@@ -134,6 +134,8 @@ SUBCOMMAND_HELP = {
    'sched-sedf'  : ('<Domain> [options]', 'Get/set EDF parameters.'),
    'sched-credit': ('[-d <Domain> [-w[=WEIGHT]|-c[=CAP]]]',
                     'Get/set credit scheduler parameters.'),
+    'sched-sdom'  : ('option <Domain> [<Domain>]',
+                     'Get/set Nemesis Scheduling Domain options.'),
    'sysrq'       : ('<Domain> <letter>', 'Send a sysrq to a domain.'),
    'debug-keys'  : ('<Keys>', 'Send debug keys to Xen.'),
    'trigger'     : ('<Domain> <nmi|reset|init> [<VCPU>]',
@@ -207,6 +209,16 @@ SUBCOMMAND_OPTIONS = {
       ('-d DOMAIN', '--domain=DOMAIN', 'Domain to modify'),
       ('-w WEIGHT', '--weight=WEIGHT', 'Weight (int)'),
       ('-c CAP',    '--cap=CAP',       'Cap (int)'),
+    ),
+    'sched-sdom': (
+       ('-a -d DOMAIN -s DOMAIN', '--add --dom=DOMAIN --sdom=DOMAIN',
+        'Add DOMAIN to Scheduling domain SDOM.'),
+       ('-r -d DOMAIN -s DOMAIN', '--remove --dom=DOMAIN --sdom=DOMAIN',
+        'Remove domain DOMAIN from Scheduling domain SDOM.'),
+       ('-e -d DOMAIN ', '--sched --dom=DOMAIN',
+        'Return domain that schedules DOMAIN.'),
+       ('-f -d DOMAIN ', '--flags --dom=DOMAIN',
+        'Return Scheduling/Activation Domain flags for DOMAIN.'),
    ),
    'list': (
       ('-l', '--long',         'Output all VM details in SXP'),
@@ -312,6 +324,7 @@ scheduler_commands = [
scheduler_commands = [
    "sched-credit",
    "sched-sedf",
+    "sched-sdom",
    ]

device_commands = [
@@ -1532,6 +1545,62 @@ def xm_sched_credit(args):
cap) else:
            result = server.xend.domain.sched_credit_set(domid, weight, cap)
+            if result != 0:
+                err(str(result))
+
+def xm_sdom(args):
+    opts = {}
+    try:
+        (options, params) = getopt.gnu_getopt(args, 'arefd:s:',
+          ['--add', '--remove', '--sched', '--flags', '--dom=', 'sdom='])
+    except getopt.GetoptError, opterr:
+        err(opterr)
+        usage('sched-sdom')
+
+    for (k, v) in options:
+        if k in ['-a', '--add']:
+            opts['action'] = 'add'
+        elif k in ['-r', '--remove']:
+            opts['action'] = 'remove'
+        elif k in ['-e', '--sched']:
+            opts['action'] = 'sched'
+        elif k in ['-f', '--flags']:
+            opts['action'] = 'flags'
+        elif k in ['-d', '--dom']:
+            opts['dom'] = int(v)
+        elif k in ['-s', '--sdom']:
+            opts['sdom'] = int(v)
+ + if len(opts.keys()) == 0:
+        usage('sched-sdom')
+        sys.exit(-1)
+
+    if 'add' in opts['action']:
+        if serverType == SERVER_XEN_API:
+            pass
+        else:
+            result = server.xend.domain.add_adom(opts['dom'], opts['sdom'])
+            if result != 0:
+                err(str(result))
+    elif 'remove' in opts['action']:
+        if serverType == SERVER_XEN_API:
+            pass
+        else:
+            result = server.xend.domain.del_adom(opts['dom'], opts['sdom'])
+            if result != 0:
+                err(str(result))
+    elif 'sched' in opts['action']:
+        if serverType == SERVER_XEN_API:
+            pass
+        else:
+            result = server.xend.domain.get_sdom(opts['dom'])
+            if result != 0:
+                err(str(result))
+    elif 'flags' in opts['action']:
+        if serverType == SERVER_XEN_API:
+            pass
+        else:
+            result = server.xend.domain.get_sdom_flags(opts['dom'])
            if result != 0:
                err(str(result))

@@ -2356,6 +2425,7 @@ commands = {
    # scheduler
    "sched-sedf": xm_sched_sedf,
    "sched-credit": xm_sched_credit,
+    "sched-sdom" : xm_sdom,
    # block
    "block-attach": xm_block_attach,
    "block-detach": xm_block_detach,


--
Mike D. Day
IBM LTC
Cell: 919 412-3900
Sametime: ncmike@xxxxxxxxxx AIM: ncmikeday  Yahoo: ultra.runner
PGP key: http://www.ncultra.org/ncmike/pubkey.asc

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.