WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [Patch 4/6] xen: cpupool support - python stuff (xm, xend)

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [Patch 4/6] xen: cpupool support - python stuff (xm, xend)
From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
Date: Fri, 17 Apr 2009 11:54:00 +0200
Delivery-date: Fri, 17 Apr 2009 03:02:44 -0700
Dkim-signature: v=1; a=rsa-sha256; c=simple/simple; d=ts.fujitsu.com; i=juergen.gross@xxxxxxxxxxxxxx; q=dns/txt; s=s1536b; t=1239962071; x=1271498071; h=from:sender:reply-to:subject:date:message-id:to:cc: mime-version:content-transfer-encoding:content-id: content-description:resent-date:resent-from:resent-sender: resent-to:resent-cc:resent-message-id:in-reply-to: references:list-id:list-help:list-unsubscribe: list-subscribe:list-post:list-owner:list-archive; z=From:=20Juergen=20Gross=20<juergen.gross@xxxxxxxxxxxxxx> |Subject:=20[Patch=204/6]=20xen:=20cpupool=20support=20- =20python=20stuff=20(xm,=20xend)|Date:=20Fri,=2017=20Apr =202009=2011:54:00=20+0200|Message-ID:=20<49E851B8.904050 0@xxxxxxxxxxxxxx>|To:=20"xen-devel@xxxxxxxxxxxxxxxxxxx" =20<xen-devel@xxxxxxxxxxxxxxxxxxx>|MIME-Version:=201.0; bh=Sv5OlaXBwuXgdLfOeNO6CPKULZXstCN0koc9tRgrbpc=; b=lPVin0rlHMNmlSjyjgCneXU99cH967jJBxA+h4FfH//zbbB0zAwIDaQX r+aXBAw0IraShKPL3MwlYcWQszXhH+qrOHc33aiCyaXlx41ewilLHugN8 MghUMwX+JABpPKc9EKBCU8noNBv/FTFXzxVN7POZdlmdNlMETUiYRX27J 4JYAQM6AP4qzcpuKMuCo3P40UxOwgJt5tRRGJxQnsOYSyU48igl5SlSlZ X7/G4jASP8q/iST1nfRk41VSLtH+U;
Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:Received:Message-ID:Date:From:Organization: User-Agent:MIME-Version:To:Subject:X-Enigmail-Version: Content-Type; b=QR7kRvFA3ayEAucYq/aWoQURbDc/pPq5OMXfXLG+24scQgNTOMZCoUR4 JrE5UdUYTiO5HjgzdCqUZaNtpyEIDYaCI3HXYEcYFtqlSsxq21TUl04oX mrQOZ8Xndor7jwlruxr0+x/0KpvA3yuCF4c4Mq57go7qZox7zfABpNq3r ZiGSTblMxiymRKJnHr9EJxIzotP0mkls/CsQev1FocEtwX2FYZeY7AYAU 7xesNJgUuS+Oke4u5cNVB9KD79tvx;
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Fujitsu Technology Solutions
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla-Thunderbird 2.0.0.19 (X11/20090103)
Signed-off-by: juergen.gross@xxxxxxxxxxxxxx

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 636 47950
Fujitsu Technolgy Solutions               e-mail: juergen.gross@xxxxxxxxxxxxxx
Otto-Hahn-Ring 6                        Internet: ts.fujitsu.com
D-81739 Muenchen                 Company details: ts.fujitsu.com/imprint.html
diff -r 655dc3bc1d8e tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Apr 09 10:34:53 2009 +0200
@@ -96,17 +96,18 @@ static PyObject *pyxc_domain_create(XcOb
                                     PyObject *args,
                                     PyObject *kwds)
 {
-    uint32_t dom = 0, ssidref = 0, flags = 0, target = 0;
+    uint32_t dom = 0, ssidref = 0, flags = 0, target = 0, cpupool = 0;
     int      ret, i;
     PyObject *pyhandle = NULL;
     xen_domain_handle_t handle = { 
         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
 
-    static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", 
"target", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list,
-                                      &dom, &ssidref, &pyhandle, &flags, 
&target))
+    static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", 
"target", "cpupool", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list, &dom,
+                                      &ssidref, &pyhandle, &flags, &target,
+                                      &cpupool))
         return NULL;
     if ( pyhandle != NULL )
     {
@@ -123,8 +124,9 @@ static PyObject *pyxc_domain_create(XcOb
         }
     }
 
+    flags |= XEN_DOMCTL_CDF_pool;
     if ( (ret = xc_domain_create(self->xc_handle, ssidref,
-                                 handle, flags, &dom)) < 0 )
+                                 handle, flags, &dom, cpupool)) < 0 )
         return pyxc_error_to_exception();
 
     if ( target )
@@ -315,7 +317,7 @@ static PyObject *pyxc_domain_getinfo(XcO
     {
         info_dict = Py_BuildValue(
             "{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
-            ",s:L,s:L,s:L,s:i,s:i}",
+            ",s:L,s:L,s:L,s:i,s:i,s:i}",
             "domid",           (int)info[i].domid,
             "online_vcpus",    info[i].nr_online_vcpus,
             "max_vcpu_id",     info[i].max_vcpu_id,
@@ -330,7 +332,8 @@ static PyObject *pyxc_domain_getinfo(XcO
             "cpu_time",        (long long)info[i].cpu_time,
             "maxmem_kb",       (long long)info[i].max_memkb,
             "ssidref",         (int)info[i].ssidref,
-            "shutdown_reason", info[i].shutdown_reason);
+            "shutdown_reason", info[i].shutdown_reason,
+            "cpupool",         (int)info[i].cpupool);
         pyhandle = PyList_New(sizeof(xen_domain_handle_t));
         if ( (pyhandle == NULL) || (info_dict == NULL) )
         {
@@ -1503,6 +1506,175 @@ static PyObject *dom_op(XcObject *self, 
 
     Py_INCREF(zero);
     return zero;
+}
+
+static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+{
+    PyObject *cpulist = NULL;
+    uint32_t i;
+
+    cpulist = PyList_New(0);
+    for ( i = 0; cpumap != 0; i++ )
+    {
+        if ( cpumap & 1 )
+            PyList_Append(cpulist, PyInt_FromLong(i));
+        cpumap >>= 1;
+    }
+    return cpulist;
+}
+
+static PyObject *pyxc_cpupool_create(XcObject *self,
+                                     PyObject *args,
+                                     PyObject *kwds)
+{
+    uint32_t cpupool = 0, sched = XEN_SCHEDULER_CREDIT;
+
+    static char *kwd_list[] = { "pool", "sched", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, &cpupool,
+                                      &sched))
+        return NULL;
+
+    if ( xc_cpupool_create(self->xc_handle, &cpupool, sched) < 0 )
+        return pyxc_error_to_exception();
+
+    return PyInt_FromLong(cpupool);
+}
+
+static PyObject *pyxc_cpupool_destroy(XcObject *self,
+                                      PyObject *args)
+{
+    uint32_t cpupool;
+
+    if (!PyArg_ParseTuple(args, "i", &cpupool))
+        return NULL;
+
+    if (xc_cpupool_destroy(self->xc_handle, cpupool) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_getinfo(XcObject *self,
+                                      PyObject *args,
+                                      PyObject *kwds)
+{
+    PyObject *list, *info_dict;
+
+    uint32_t first_pool = 0; 
+    int max_pools = 1024, nr_pools, i;
+    xc_cpupoolinfo_t *info;
+
+    static char *kwd_list[] = { "first_pool", "max_pools", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
+                                      &first_pool, &max_pools) )
+        return NULL;
+
+    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
+    if (info == NULL)
+        return PyErr_NoMemory();
+
+    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, 
info);
+
+    if (nr_pools < 0)
+    {
+        free(info);
+        return pyxc_error_to_exception();
+    }
+
+    list = PyList_New(nr_pools);
+    for ( i = 0 ; i < nr_pools; i++ )
+    {
+        info_dict = Py_BuildValue(
+            "{s:i,s:i,s:i,s:N}",
+            "cpupool",         (int)info[i].cpupool_id,
+            "sched",           info[i].sched_id,
+            "n_dom",           info[i].n_dom,
+            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
+        if ( info_dict == NULL )
+        {
+            Py_DECREF(list);
+            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
+            free(info);
+            return NULL;
+        }
+        PyList_SetItem(list, i, info_dict);
+    }
+
+    free(info);
+
+    return list;
+}
+
+static PyObject *pyxc_cpupool_addcpu(XcObject *self,
+                                     PyObject *args,
+                                     PyObject *kwds)
+{
+    uint32_t cpupool;
+    int cpu = -1;
+
+    static char *kwd_list[] = { "cpupool", "cpu", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
+                                      &cpupool, &cpu) )
+        return NULL;
+
+    if (xc_cpupool_addcpu(self->xc_handle, cpupool, cpu) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_removecpu(XcObject *self,
+                                        PyObject *args,
+                                        PyObject *kwds)
+{
+    uint32_t cpupool;
+    int cpu = -1;
+
+    static char *kwd_list[] = { "cpupool", "cpu", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
+                                      &cpupool, &cpu) )
+        return NULL;
+
+    if (xc_cpupool_removecpu(self->xc_handle, cpupool, cpu) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_movedomain(XcObject *self,
+                                         PyObject *args,
+                                         PyObject *kwds)
+{
+    uint32_t cpupool, domid;
+
+    static char *kwd_list[] = { "cpupool", "domid", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii|", kwd_list,
+                                      &cpupool, &domid) )
+        return NULL;
+
+    if (xc_cpupool_movedomain(self->xc_handle, cpupool, domid) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
+{
+    uint64_t cpumap;
+
+    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+        return pyxc_error_to_exception();
+
+    return cpumap_to_cpulist(cpumap);
 }
 
 static PyMethodDef pyxc_methods[] = {
@@ -1619,7 +1791,8 @@ static PyMethodDef pyxc_methods[] = {
       " maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
       " cpu_time [long]: CPU time consumed, in nanoseconds\n"
       " shutdown_reason [int]: Numeric code from guest OS, explaining "
-      "reason why it shut itself down.\n" },
+      "reason why it shut itself down.\n" 
+      " cpupool  [int]   Id of cpupool domain is bound to.\n" },
 
     { "vcpu_getinfo", 
       (PyCFunction)pyxc_vcpu_getinfo, 
@@ -1963,6 +2136,66 @@ static PyMethodDef pyxc_methods[] = {
       "Do not propagate spurious page faults to this guest.\n"
       " dom [int]: Identifier of domain.\n" },
 #endif
+
+    { "cpupool_create",
+      (PyCFunction)pyxc_cpupool_create,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Create new cpupool.\n"
+      " pool    [int, 0]: cpupool identifier to use (allocated if zero).\n"
+      " sched   [int]: scheduler to use (credit if unspecified).\n\n"
+      "Returns: [int] new cpupool identifier; -1 on error.\n" },
+
+    { "cpupool_destroy", 
+      (PyCFunction)pyxc_cpupool_destroy, 
+      METH_VARARGS, "\n"
+      "Destroy a cpupool.\n"
+      " pool [int]:    Identifier of cpupool to be destroyed.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_getinfo", 
+      (PyCFunction)pyxc_cpupool_getinfo,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Get information regarding a set of cpupools, in increasing id order.\n"
+      " first_pool [int, 0]:    First cpupool to retrieve info about.\n"
+      " max_pools  [int, 1024]: Maximum number of cpupools to retrieve info"
+      " about.\n\n"
+      "Returns: [list of dicts] if list length is less than 'max_pools'\n"
+      "         parameter then there was an error, or the end of the\n"
+      "         cpupool-id space was reached.\n"
+      " pool     [int]: Identifier of cpupool to which this info pertains\n"
+      " sched    [int]:  Scheduler used for this cpupool\n"
+      " n_dom    [int]:  Number of Domains in this cpupool\n"
+      " cpulist  [list]: List of CPUs this cpupool is using\n" },
+
+    { "cpupool_addcpu",
+       (PyCFunction)pyxc_cpupool_addcpu,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Add a cpu to a cpupool.\n"
+      " pool    [int]: Identifier of cpupool.\n"
+      " cpu     [int, -1]: Cpu to add (lowest free if -1)\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_removecpu",
+       (PyCFunction)pyxc_cpupool_removecpu,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Remove a cpu from a cpupool.\n"
+      " pool    [int]: Identifier of cpupool.\n"
+      " cpu     [int, -1]: Cpu to remove (highest used if -1)\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_movedomain",
+       (PyCFunction)pyxc_cpupool_movedomain,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Move a domain to another cpupool.\n"
+      " pool    [int]: Identifier of cpupool to move doain to.\n"
+      " dom     [int]: Domain to move\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_freeinfo",
+      (PyCFunction)pyxc_cpupool_freeinfo,
+      METH_NOARGS, "\n"
+      "Get info about cpus not in any cpupool.\n"
+      "Returns: [list]: List of CPUs\n" },
 
     { NULL, NULL, 0, NULL }
 };
diff -r 655dc3bc1d8e tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py  Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xend/XendAPI.py  Thu Apr 09 10:42:59 2009 +0200
@@ -46,6 +46,7 @@ from XendPSCSI import XendPSCSI
 from XendPSCSI import XendPSCSI
 from XendDSCSI import XendDSCSI
 from XendXSPolicy import XendXSPolicy, XendACMPolicy
+from xen.xend.XendCPUPool import XendCPUPool
 
 from XendAPIConstants import *
 from xen.util.xmlrpclib2 import stringify
@@ -485,7 +486,8 @@ classes = {
     'PPCI'         : valid_object("PPCI"),
     'DPCI'         : valid_object("DPCI"),
     'PSCSI'        : valid_object("PSCSI"),
-    'DSCSI'        : valid_object("DSCSI")
+    'DSCSI'        : valid_object("DSCSI"),
+    'cpu_pool'     : valid_object("cpu_pool"),
 }
 
 autoplug_classes = {
@@ -500,6 +502,7 @@ autoplug_classes = {
     'DSCSI'       : XendDSCSI,
     'XSPolicy'    : XendXSPolicy,
     'ACMPolicy'   : XendACMPolicy,
+    'cpu_pool'    : XendCPUPool,
 }
 
 class XendAPI(object):
@@ -899,7 +902,8 @@ class XendAPI(object):
                     'API_version_minor',
                     'API_version_vendor',
                     'API_version_vendor_implementation',
-                    'enabled']
+                    'enabled',
+                    'resident_cpu_pools']
     
     host_attr_rw = ['name_label',
                     'name_description',
@@ -987,6 +991,8 @@ class XendAPI(object):
         return xen_api_todo()
     def host_get_logging(self, _, host_ref):
         return xen_api_todo()
+    def host_get_resident_cpu_pools(self, _, host_ref):
+        return xen_api_success(XendCPUPool.get_all())
 
     # object methods
     def host_disable(self, session, host_ref):
@@ -1048,7 +1054,9 @@ class XendAPI(object):
                   'PIFs': XendPIF.get_all(),
                   'PBDs': XendPBD.get_all(),
                   'PPCIs': XendPPCI.get_all(),
-                  'PSCSIs': XendPSCSI.get_all()}
+                  'PSCSIs': XendPSCSI.get_all(),
+                  'resident_cpu_pools': XendCPUPool.get_all(),
+                 }
         return xen_api_success(record)
 
     # class methods
@@ -1077,7 +1085,10 @@ class XendAPI(object):
                         'stepping',
                         'flags',
                         'utilisation',
-                        'features']
+                        'features',
+                        'cpu_pool']
+
+    host_cpu_funcs  = [('get_unassigned_cpus', 'Set(host_cpu)')]
 
     # attributes
     def _host_cpu_get(self, ref, field):
@@ -1102,21 +1113,28 @@ class XendAPI(object):
         return self._host_cpu_get(ref, 'flags')
     def host_cpu_get_utilisation(self, _, ref):
         return xen_api_success(XendNode.instance().get_host_cpu_load(ref))
+    def host_cpu_get_cpu_pool(self, _, ref):
+        return xen_api_success(XendCPUPool.get_cpu_pool_by_cpu_ref(ref))
 
     # object methods
     def host_cpu_get_record(self, _, ref):
         node = XendNode.instance()
         record = dict([(f, node.get_host_cpu_field(ref, f))
                        for f in self.host_cpu_attr_ro
-                       if f not in ['uuid', 'host', 'utilisation']])
+                       if f not in ['uuid', 'host', 'utilisation', 
'cpu_pool']])
         record['uuid'] = ref
         record['host'] = node.uuid
         record['utilisation'] = node.get_host_cpu_load(ref)
+        record['cpu_pool'] = XendCPUPool.get_cpu_pool_by_cpu_ref(ref)
         return xen_api_success(record)
 
     # class methods
     def host_cpu_get_all(self, session):
         return xen_api_success(XendNode.instance().get_host_cpu_refs())
+    def host_cpu_get_unassigned_cpus(self, session):
+        return xen_api_success(
+            [ref for ref in XendNode.instance().get_host_cpu_refs()
+                 if len(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) == 0])
 
 
     # Xen API: Class host_metrics
@@ -1175,6 +1193,7 @@ class XendAPI(object):
                   'is_control_domain',
                   'metrics',
                   'crash_dumps',
+                  'cpu_pool',
                   ]
                   
     VM_attr_rw = ['name_label',
@@ -1203,7 +1222,9 @@ class XendAPI(object):
                   'platform',
                   'PCI_bus',
                   'other_config',
-                  'security_label']
+                  'security_label',
+                  'pool_name',
+                  ]
 
     VM_methods = [('clone', 'VM'),
                   ('start', None),
@@ -1231,7 +1252,9 @@ class XendAPI(object):
                   ('set_memory_dynamic_min_live', None),
                   ('send_trigger', None),
                   ('migrate', None),
-                  ('destroy', None)]
+                  ('destroy', None),
+                  ('cpu_pool_migrate', None),
+                  ]
     
     VM_funcs  = [('create', 'VM'),
                  ('restore', None),
@@ -1426,6 +1449,17 @@ class XendAPI(object):
         xd = XendDomain.instance()
         return xen_api_success(
             xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain())
+
+    def VM_get_cpu_pool(self, session, vm_ref):
+        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
+        pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool())
+        return xen_api_success(pool_ref)
+
+    def VM_get_pool_name(self, session, vm_ref):
+        return self.VM_get('cpu_pool_name', session, vm_ref)
+
+    def VM_set_pool_name(self, session, vm_ref, value):
+        return self.VM_set('pool_name', session, vm_ref, value)
 
     def VM_set_name_label(self, session, vm_ref, label):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
@@ -1722,7 +1756,9 @@ class XendAPI(object):
             'is_control_domain': xeninfo.info['is_control_domain'],
             'metrics': xeninfo.get_metrics(),
             'security_label': xeninfo.get_security_label(),
-            'crash_dumps': []
+            'crash_dumps': [],
+            'pool_name': xeninfo.info.get('pool_name'),
+            'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()),
         }
         return xen_api_success(record)
 
@@ -1817,6 +1853,25 @@ class XendAPI(object):
     def VM_restore(self, _, src, paused):
         xendom = XendDomain.instance()
         xendom.domain_restore(src, bool(paused))
+        return xen_api_success_void()
+
+    def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref):
+        xendom = XendDomain.instance()
+        xeninfo = xendom.get_vm_by_uuid(vm_ref)
+        domid = xeninfo.getDomid()
+        pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass())
+        if pool == None:
+            return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref])
+        if domid is not None:
+            if domid == 0:
+                return xen_api_error(['OPERATION_NOT_ALLOWED',
+                    'could not move Domain-0'])
+            try:
+                XendCPUPool.move_domain(cpu_pool_ref, domid)
+            except Exception, ex:
+                return xen_api_error(['INTERNAL_ERROR',
+                    'could not move domain'])
+        self.VM_set('pool_name', session, vm_ref, pool.get_name_label())
         return xen_api_success_void()
 
 
diff -r 655dc3bc1d8e tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py       Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xend/XendConfig.py       Fri Apr 17 06:57:52 2009 +0200
@@ -123,6 +123,7 @@ XENAPI_CFG_TO_LEGACY_CFG = {
     'actions_after_crash': 'on_crash', 
     'PV_bootloader': 'bootloader',
     'PV_bootloader_args': 'bootloader_args',
+    'pool_name' : 'pool_name',
 }
 
 LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG)
@@ -220,6 +221,7 @@ XENAPI_CFG_TYPES = {
     'machine_address_size': int,
     'suppress_spurious_page_faults': bool0,
     's3_integrity' : int,
+    'pool_name' : str,
 }
 
 # List of legacy configuration keys that have no equivalent in the
@@ -265,6 +267,7 @@ LEGACY_CFG_TYPES = {
     'rtc/timeoffset': str,
     'bootloader':    str,
     'bootloader_args': str,
+    'pool_name':     str,
 }
 
 # Values that should be stored in xenstore's /vm/<uuid> that is used
@@ -287,6 +290,7 @@ LEGACY_XENSTORE_VM_PARAMS = [
     'on_xend_stop',
     'bootloader',
     'bootloader_args',
+    'pool_name',
 ]
 
 ##
@@ -392,6 +396,7 @@ class XendConfig(dict):
             'other_config': {},
             'platform': {},
             'target': 0,
+            'pool_name' : 'Pool-0',
         }
         
         return defaults
diff -r 655dc3bc1d8e tools/python/xen/xend/XendConstants.py
--- a/tools/python/xen/xend/XendConstants.py    Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xend/XendConstants.py    Thu Apr 09 10:45:43 2009 +0200
@@ -136,6 +136,7 @@ VTPM_DELETE_SCRIPT = '/etc/xen/scripts/v
 #
 
 XS_VMROOT = "/vm/"
+XS_POOLROOT = "/local/pool/"
 
 NR_PCI_DEV = 32
 AUTO_PHP_SLOT = NR_PCI_DEV
diff -r 655dc3bc1d8e tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xend/XendDomainInfo.py   Fri Apr 17 07:40:55 2009 +0200
@@ -52,6 +52,7 @@ from xen.xend.xenstore.xswatch import xs
 from xen.xend.xenstore.xswatch import xswatch
 from xen.xend.XendConstants import *
 from xen.xend.XendAPIConstants import *
+from xen.xend.XendCPUPool import XendCPUPool
 from xen.xend.server.DevConstants import xenbusState
 
 from xen.xend.XendVMMetrics import XendVMMetrics
@@ -2306,6 +2307,19 @@ class XendDomainInfo:
                               "supported by your CPU and enabled in your "
                               "BIOS?")
 
+        # look-up pool id to use
+        pool_name = self.info['pool_name']
+        if len(pool_name) == 0:
+            pool_name = "Pool-0"
+
+        pool = XendCPUPool.lookup_pool(pool_name)
+
+        if pool is None:
+            raise VmError("unkown pool %s" % pool_name)
+        pool_id = pool.query_pool_id()
+        if pool_id is None:
+            raise VmError("pool %s not activated" % pool_name)
+
         # Hack to pre-reserve some memory for initial domain creation.
         # There is an implicit memory overhead for any domain creation. This
         # overhead is greater for some types of domain than others. For
@@ -2331,7 +2345,9 @@ class XendDomainInfo:
                 ssidref = ssidref,
                 handle = uuid.fromString(self.info['uuid']),
                 flags = flags,
-                target = self.info.target())
+                target = self.info.target(),
+                cpupool = pool_id,
+                )
         except Exception, e:
             # may get here if due to ACM the operation is not permitted
             if security.on() == xsconstants.XS_POLICY_ACM:
@@ -3226,6 +3242,13 @@ class XendDomainInfo:
 
         retval = xc.sched_credit_domain_get(self.getDomid())
         return retval
+
+    def get_cpu_pool(self):
+        if self.getDomid() is None:
+            return None
+        xeninfo = dom_get(self.domid)
+        return xeninfo['cpupool']
+
     def get_power_state(self):
         return XEN_API_VM_POWER_STATE[self._stateGet()]
     def get_platform(self):
diff -r 655dc3bc1d8e tools/python/xen/xend/XendError.py
--- a/tools/python/xen/xend/XendError.py        Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xend/XendError.py        Thu Apr 09 10:49:39 2009 +0200
@@ -18,6 +18,7 @@
 
 from xmlrpclib import Fault
 
+import types
 import XendClient
 
 class XendInvalidDomain(Fault):
@@ -186,6 +187,26 @@ class DirectPCIError(XendAPIError):
     def __str__(self):
         return 'DIRECT_PCI_ERROR: %s' % self.error
 
+class PoolError(XendAPIError):
+    def __init__(self, error, spec=None):
+        XendAPIError.__init__(self)
+        self.spec = []
+        if spec:
+            if isinstance(spec, types.ListType):
+                self.spec = spec
+            else:
+                self.spec = [spec]
+        self.error = error
+
+    def get_api_error(self):
+        return [self.error] + self.spec
+
+    def __str__(self):
+        if self.spec:
+            return '%s: %s' % (self.error, self.spec)
+        else:
+            return '%s' % self.error
+    
 from xen.util.xsconstants import xserr2string
 
 class SecurityError(XendAPIError):
diff -r 655dc3bc1d8e tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xend/XendNode.py Thu Apr 09 10:54:11 2009 +0200
@@ -41,6 +41,7 @@ from XendMonitor import XendMonitor
 from XendMonitor import XendMonitor
 from XendPPCI import XendPPCI
 from XendPSCSI import XendPSCSI
+from xen.xend.XendCPUPool import XendCPUPool
 
 class XendNode:
     """XendNode - Represents a Domain 0 Host."""
@@ -314,6 +315,16 @@ class XendNode:
                                                    uuid.createString())
                 XendPSCSI(pscsi_uuid, pscsi_record)
 
+        # Initialise cpu_pools
+        saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass())
+        if saved_cpu_pools:
+            for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items():
+                try:
+                    XendCPUPool.recreate(cpu_pool, cpu_pool_uuid)
+                except CreateUnspecifiedAttributeError:
+                    log.warn("Error recreating %s %s",
+                             (XendCPUPool.getClass(), cpu_pool_uuid))
+        XendCPUPool.recreate_active_pools()
 
     def add_network(self, interface):
         # TODO
@@ -486,6 +497,7 @@ class XendNode:
         self.save_SRs()
         self.save_PPCIs()
         self.save_PSCSIs()
+        self.save_cpu_pools()
 
     def save_PIFs(self):
         pif_records = dict([(pif_uuid, XendAPIStore.get(
@@ -521,6 +533,12 @@ class XendNode:
                                   pscsi_uuid, "PSCSI").get_record())
                             for pscsi_uuid in XendPSCSI.get_all()])
         self.state_store.save_state('pscsi', pscsi_records)
+
+    def save_cpu_pools(self):
+        cpu_pool_records = dict([(cpu_pool_uuid, XendAPIStore.get(
+                    cpu_pool_uuid, XendCPUPool.getClass()).get_record())
+                    for cpu_pool_uuid in XendCPUPool.get_all_managed()])
+        self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records)
 
     def shutdown(self):
         return 0
@@ -814,6 +832,7 @@ class XendNode:
         info['free_memory']  = info['free_memory'] / 1024
         info['node_to_cpu']  = self.format_node_to_cpu(info)
         info['node_to_memory'] = self.format_node_to_memory(info)
+        info['free_cpus'] = len(XendCPUPool.unbound_cpus())
 
         ITEM_ORDER = ['nr_cpus',
                       'nr_nodes',
@@ -822,6 +841,7 @@ class XendNode:
                       'cpu_mhz',
                       'hw_caps',
                       'virt_caps',
+                      'free_cpus',
                       'total_memory',
                       'free_memory',
                       'node_to_cpu',
diff -r 655dc3bc1d8e tools/python/xen/xend/server/SrvServer.py
--- a/tools/python/xen/xend/server/SrvServer.py Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xend/server/SrvServer.py Thu Apr 09 10:54:40 2009 +0200
@@ -52,6 +52,7 @@ from xen.xend.XendLogging import log
 from xen.xend.XendLogging import log
 from xen.xend.XendClient import XEN_API_SOCKET
 from xen.xend.XendDomain import instance as xenddomain
+from xen.xend.XendCPUPool import XendCPUPool
 from xen.web.SrvDir import SrvDir
 
 from SrvRoot import SrvRoot
@@ -146,6 +147,12 @@ class XendServers:
                 status.close()
                 status = None
 
+            # auto start pools before domains are started
+            try:
+                XendCPUPool.autostart_pools()
+            except Exception, e:
+                log.exception("Failed while autostarting pools")
+            
             # Reaching this point means we can auto start domains
             try:
                 xenddomain().autostart_domains()
diff -r 655dc3bc1d8e tools/python/xen/xend/server/XMLRPCServer.py
--- a/tools/python/xen/xend/server/XMLRPCServer.py      Thu Apr 16 11:54:06 
2009 +0100
+++ b/tools/python/xen/xend/server/XMLRPCServer.py      Thu Apr 09 10:57:04 
2009 +0200
@@ -33,6 +33,7 @@ from xen.xend.XendConstants import DOM_S
 from xen.xend.XendConstants import DOM_STATE_RUNNING
 from xen.xend.XendLogging import log
 from xen.xend.XendError import XendInvalidDomain
+from xen.xend.XendCPUPool import XendCPUPool
 
 # vcpu_avail is a long and is not needed by the clients.  It's far easier
 # to just remove it then to try and marshal the long.
@@ -97,6 +98,10 @@ methods = ['device_create', 'device_conf
            'getRestartCount', 'getBlockDeviceClass']
 
 exclude = ['domain_create', 'domain_restore']
+
+POOL_FUNCS = ['pool_create', 'pool_new', 'pool_start', 'pool_list',
+              'pool_destroy', 'pool_delete', 'pool_cpu_add', 'pool_cpu_remove',
+              'pool_migrate']
 
 class XMLRPCServer:
     def __init__(self, auth, use_xenapi, use_tcp = False,
@@ -197,6 +202,11 @@ class XMLRPCServer:
                 if name not in exclude:
                     self.server.register_function(fn, "xend.domain.%s" % 
name[7:])
 
+        # Functions in XendPool
+        for name in POOL_FUNCS:
+            fn = getattr(XendCPUPool, name)
+            self.server.register_function(fn, "xend.cpu_pool.%s" % name[5:])
+
         # Functions in XendNode and XendDmesg
         for type, lst, n in [(XendNode, ['info', 'pciinfo', 'send_debug_keys'],
                              'node'),
diff -r 655dc3bc1d8e tools/python/xen/xm/create.dtd
--- a/tools/python/xen/xm/create.dtd    Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xm/create.dtd    Thu Apr 09 10:58:32 2009 +0200
@@ -50,6 +50,7 @@
                  s3_integrity           CDATA #REQUIRED
                  vcpus_max              CDATA #REQUIRED
                  vcpus_at_startup       CDATA #REQUIRED
+                 pool_name              CDATA #REQUIRED
                  actions_after_shutdown %NORMAL_EXIT; #REQUIRED 
                  actions_after_reboot   %NORMAL_EXIT; #REQUIRED
                  actions_after_crash    %CRASH_BEHAVIOUR; #REQUIRED
diff -r 655dc3bc1d8e tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py     Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xm/create.py     Thu Apr 09 11:00:11 2009 +0200
@@ -604,6 +604,10 @@ gopts.var('suppress_spurious_page_faults
 gopts.var('suppress_spurious_page_faults', val='yes|no',
           fn=set_bool, default=None,
           use="""Do not inject spurious page faults into this guest""")
+
+gopts.var('pool', val='POOL NAME',
+          fn=set_value, default=None,
+          use="""CPU pool to use for the VM""")
 
 gopts.var('pci_msitranslate', val='TRANSLATE',
           fn=set_int, default=1,
@@ -976,6 +980,8 @@ def make_config(vals):
         config.append(['backend', ['tpmif']])
     if vals.localtime:
         config.append(['localtime', vals.localtime])
+    if vals.pool:
+        config.append(['pool_name', vals.pool])
 
     config_image = configure_image(vals)
     if vals.bootloader:
diff -r 655dc3bc1d8e tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xm/main.py       Wed Apr 15 11:39:13 2009 +0200
@@ -55,6 +55,7 @@ import xen.util.xsm.xsm as security
 import xen.util.xsm.xsm as security
 from xen.util.xsm.xsm import XSMError
 from xen.util.acmpolicy import ACM_LABEL_UNLABELED_DISPLAY
+from xen.util.sxputils import sxp2map, map2sxp as map_to_sxp
 
 import XenAPI
 
@@ -219,6 +220,23 @@ SUBCOMMAND_HELP = {
     'labels'        :  ('[policy] [type=dom|res|any]',
                         'List <type> labels for (active) policy.'),
     'serve'         :  ('', 'Proxy Xend XMLRPC over stdio.'),
+
+    #
+    # pool commands
+    #
+    'pool-create'   :  ('<ConfigFile> [vars]',
+                        'Create a CPU pool based an ConfigFile.'),
+    'pool-new'      :  ('<ConfigFile> [vars]',
+                        'Adds a CPU pool to Xend CPU pool management'),
+    'pool-start'    :  ('<CPU Pool>', 'Starts a Xend CPU pool'),
+    'pool-list'     :  ('[<CPU Pool>] [-l|--long] [-c|--cpus]', 'List CPU 
pools on host'),
+    'pool-destroy'  :  ('<CPU Pool>', 'Deactivates a CPU pool'),
+    'pool-delete'   :  ('<CPU Pool>',
+                        'Removes a CPU pool from Xend management'),
+    'pool-cpu-add'  :  ('<CPU Pool> <CPU nr>', 'Adds a CPU to a CPU pool'),
+    'pool-cpu-remove': ('<CPU Pool> <CPU nr>', 'Removes a CPU from a CPU 
pool'),
+    'pool-migrate'  :  ('<Domain> <CPU Pool>',
+                        'Moves a domain into a CPU pool'),
 }
 
 SUBCOMMAND_OPTIONS = {
@@ -242,6 +260,7 @@ SUBCOMMAND_OPTIONS = {
        ('-l', '--long',         'Output all VM details in SXP'),
        ('', '--label',          'Include security labels'),
        ('', '--state=<state>',  'Select only VMs with the specified state'),
+       ('', '--pool=<pool>',    'Select only VMs in specified cpu pool'),
     ),
     'console': (
        ('-q', '--quiet', 'Do not print an error message if the domain does not 
exist'),
@@ -282,6 +301,10 @@ SUBCOMMAND_OPTIONS = {
     'info': (
        ('-c', '--config', 'List Xend configuration parameters'),
     ),
+    'pool-list': (
+       ('-l', '--long', 'Output all CPU pool details in SXP format'),
+       ('-c', '--cpus', 'Output list of CPUs used by a pool'),
+    ),
 }
 
 common_commands = [
@@ -396,9 +419,21 @@ acm_commands = [
     "getpolicy",
     ]
 
+pool_commands = [
+    "pool-create",
+    "pool-new",
+    "pool-start",
+    "pool-list",
+    "pool-destroy",
+    "pool-delete",
+    "pool-cpu-add",
+    "pool-cpu-remove",
+    "pool-migrate",
+    ]
+
 all_commands = (domain_commands + host_commands + scheduler_commands +
                 device_commands + vnet_commands + acm_commands +
-                ['shell', 'event-monitor'])
+                ['shell', 'event-monitor'] + pool_commands)
 
 
 ##
@@ -791,7 +826,7 @@ def datetime_to_secs(v):
         v = str(v).replace(c, "")
     return time.mktime(time.strptime(v[0:14], '%Y%m%dT%H%M%S'))
 
-def getDomains(domain_names, state, full = 0):
+def getDomains(domain_names, state, full = 0, pool = None):
     if serverType == SERVER_XEN_API:
         doms_sxp = []
         doms_dict = []
@@ -800,6 +835,9 @@ def getDomains(domain_names, state, full
         dom_metrics_recs = server.xenapi.VM_metrics.get_all_records()
 
         for dom_ref, dom_rec in dom_recs.items():
+            if pool and pool != dom_rec['pool_name']:
+                continue
+
             dom_metrics_rec = dom_metrics_recs[dom_rec['metrics']]
 
             states = ('running', 'blocked', 'paused', 'shutdown',
@@ -840,7 +878,15 @@ def getDomains(domain_names, state, full
         if domain_names:
             return [server.xend.domain(dom, full) for dom in domain_names]
         else:
-            return server.xend.domains_with_state(True, state, full)
+            doms = server.xend.domains_with_state(True, state, full)
+            if not pool:
+                return doms
+            else:
+                doms_in_pool = []
+                for dom in doms:
+                    if sxp.child_value(dom, 'pool_name', '') == pool:
+                        doms_in_pool.append(dom)
+                return doms_in_pool
 
 
 def xm_list(args):
@@ -848,10 +894,11 @@ def xm_list(args):
     show_vcpus = 0
     show_labels = 0
     state = 'all'
+    pool = None
     try:
         (options, params) = getopt.gnu_getopt(args, 'lv',
                                               ['long','vcpus','label',
-                                               'state='])
+                                               'state=','pool='])
     except getopt.GetoptError, opterr:
         err(opterr)
         usage('list')
@@ -865,10 +912,16 @@ def xm_list(args):
             show_labels = 1
         if k in ['--state']:
             state = v
+        if k in ['--pool']:
+            pool = v
 
     if state != 'all' and len(params) > 0:
         raise OptionError(
             "You may specify either a state or a particular VM, but not both")
+
+    if pool and len(params) > 0:
+        raise OptionError(
+            "You may specify either a pool or a particular VM, but not both")
 
     if show_vcpus:
         print >>sys.stderr, (
@@ -876,7 +929,7 @@ def xm_list(args):
         xm_vcpu_list(params)
         return
 
-    doms = getDomains(params, state, use_long)
+    doms = getDomains(params, state, use_long, pool)
 
     if use_long:
         map(PrettyPrint.prettyprint, doms)
@@ -1704,7 +1757,14 @@ def xm_info(args):
                 return host_cpu_records[0].get("features", "")
             else:
                 return ""
-                
+
+        def getFreeCpuCount():
+            cnt = 0
+            for host_cpu_record in host_cpu_records:
+                if len(host_cpu_record.get("cpu_pool", [])) == 0:
+                    cnt += 1
+            return cnt
+
         info = {
             "host":              getVal(["name_label"]),
             "release":           getVal(["software_version", "release"]),
@@ -1716,6 +1776,7 @@ def xm_info(args):
             "threads_per_core":  getVal(["cpu_configuration", 
"threads_per_core"]),
             "cpu_mhz":           getCpuMhz(),
             "hw_caps":           getCpuFeatures(),
+            "free_cpus":         getFreeCpuCount(),
             "total_memory":      
int(host_metrics_record["memory_total"])/1024/1024,
             "free_memory":       
int(host_metrics_record["memory_free"])/1024/1024,
             "xen_major":         getVal(["software_version", "xen_major"]),
@@ -2829,7 +2890,170 @@ def xm_network_show(args):
 
             print format2 % r
 
-            
+def get_pool_ref(name): 
+    refs = server.xenapi.cpu_pool.get_by_name_label(name)
+    if len(refs) > 0:
+        return refs[0]
+    else:
+        err('unkown pool name')
+        sys.exit(1)
+    
+def xm_pool_start(args):
+    arg_check(args, "pool-start", 1)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        server.xenapi.cpu_pool.activate(ref)
+    else:
+        server.xend.cpu_pool.start(args[0])
+
+def brief_pool_list(sxprs):
+    format_str = "%-16s   %3s  %8s       %s          %s"
+    for sxpr in sxprs:
+        if sxpr == sxprs[0]:
+            print "Name               CPUs   Sched     Active   Domain count"
+        record = sxp2map(sxpr)
+        name = record['name_label']
+        sched_policy = record['sched_policy']
+        if record['activated']:
+            cpus = record.get('host_CPU_numbers', [])
+            vms = record.get('started_VM_names', [])
+            if not isinstance(cpus, types.ListType):
+                cpus = [cpus]
+            if not isinstance(vms, types.ListType):
+                vms = [vms]
+            cpu_count = len(cpus)
+            vm_count  = len(vms)
+            active = 'y'
+        else:
+            cpu_count = record['ncpu']
+            vm_count  = 0
+            active = 'n'
+        print format_str % (name, cpu_count, sched_policy, active, vm_count)
+    
+def brief_pool_list_cpus(sxprs):
+    format_str = "%-16s %s"
+    for sxpr in sxprs: 
+        if sxpr == sxprs[0]:
+            print format_str % ("Name", "CPU list")
+        record = sxp2map(sxpr)
+        name = record['name_label']
+        cpus = ""
+        if record['activated']:
+            cpus = record.get('host_CPU_numbers', [])
+            if isinstance(cpus, types.ListType):
+                cpus.sort()
+                cpus = reduce(lambda x,y: x + "%s," % y, cpus, "")
+                cpus = cpus[0:len(cpus)-1]
+            else:
+                cpus = str(cpus)
+        if len(cpus) == 0:
+            cpus = "-"
+        print format_str % (name, cpus)
+
+def xm_pool_list(args):
+    arg_check(args, "pool-list", 0, 2)
+    try:
+        (options, params) = getopt.gnu_getopt(args, 'lc', ['long','cpus'])
+    except getopt.GetoptError, opterr:
+        err(opterr)
+        usage('pool-list')
+    if len(params) > 1:
+        err("Only one pool name for selection allowed")
+        usage('pool-list')
+    
+    use_long = False
+    show_cpus = False
+    for (k, _) in options:
+        if k in ['-l', '--long']:
+            use_long = True
+        if k in ['-c', '--cpus']:
+            show_cpus = True
+
+    if serverType == SERVER_XEN_API:
+        pools = server.xenapi.cpu_pool.get_all_records()
+        cpu_recs = server.xenapi.host_cpu.get_all_records()
+        sxprs = [] 
+        for pool in pools.values():
+            if pool['name_label'] in params or len(params) == 0:
+                started_VM_names = [['started_VM_names'] + [
+                    server.xenapi.VM.get_name_label(started_VM)
+                    for started_VM in pool['started_VMs'] ] ]
+                host_CPU_numbers = [['host_CPU_numbers'] + [
+                    cpu_recs[cpu_ref]['number']
+                    for cpu_ref in pool['host_CPUs'] ] ]
+                sxpr = [ pool['uuid'] ] + map_to_sxp(pool) + \
+                    host_CPU_numbers + started_VM_names
+                sxprs.append(sxpr)
+    else:
+        sxprs = server.xend.cpu_pool.list(params)
+
+    if len(params) > 0 and len(sxprs) == 0:
+        # pool not found
+        err("Pool '%s' does not exist." % params[0])
+
+    if use_long:
+        for sxpr in sxprs:
+            PrettyPrint.prettyprint(sxpr)
+    elif show_cpus:
+        brief_pool_list_cpus(sxprs)
+    else:
+        brief_pool_list(sxprs)
+
+def xm_pool_destroy(args):
+    arg_check(args, "pool-destroy", 1)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        server.xenapi.cpu_pool.deactivate(ref)
+    else:
+        server.xend.cpu_pool.destroy(args[0])
+
+def xm_pool_delete(args):
+    arg_check(args, "pool-delete", 1)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        server.xenapi.cpu_pool.destroy(ref)
+    else:
+        server.xend.cpu_pool.delete(args[0])
+
+def xm_pool_cpu_add(args):
+    arg_check(args, "pool-cpu-add", 2)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        cpu_ref_list = server.xenapi.host_cpu.get_all_records()
+        cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
+                                  if c_rec['number'] == args[1] ]
+        if len(cpu_ref) == 0:
+            err('cpu number unkown')
+        else:
+            server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
+    else:
+        server.xend.cpu_pool.cpu_add(args[0], args[1])
+
+def xm_pool_cpu_remove(args):
+    arg_check(args, "pool-cpu-remove", 2)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        cpu_ref_list = server.xenapi.host_cpu.get_all_records()
+        cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
+                                  if c_rec['number'] ==  args[1] ]
+        if len(cpu_ref) == 0:
+            err('cpu number unkown')
+        else:
+            server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
+    else:
+        server.xend.cpu_pool.cpu_remove(args[0], args[1])
+
+def xm_pool_migrate(args):
+    arg_check(args, "pool-migrate", 2)
+    domname = args[0]
+    poolname = args[1]
+    if serverType == SERVER_XEN_API:
+        pool_ref = get_pool_ref(poolname)
+        server.xenapi.VM.cpu_pool_migrate(get_single_vm(domname), pool_ref)
+    else:
+        server.xend.cpu_pool.migrate(domname, poolname)
+
+ 
 commands = {
     "shell": xm_shell,
     "event-monitor": xm_event_monitor,
@@ -2904,6 +3128,14 @@ commands = {
     "scsi-attach": xm_scsi_attach,
     "scsi-detach": xm_scsi_detach,
     "scsi-list": xm_scsi_list,
+    # pool
+    "pool-start": xm_pool_start,
+    "pool-list": xm_pool_list,
+    "pool-destroy": xm_pool_destroy,
+    "pool-delete": xm_pool_delete,
+    "pool-cpu-add": xm_pool_cpu_add,
+    "pool-cpu-remove": xm_pool_cpu_remove,
+    "pool-migrate": xm_pool_migrate,
     }
 
 ## The commands supported by a separate argument parser in xend.xm.
@@ -2921,6 +3153,8 @@ IMPORTED_COMMANDS = [
     'getpolicy',
     'setpolicy',
     'resetpolicy',
+    'pool-create',
+    'pool-new',
     ]
 
 for c in IMPORTED_COMMANDS:
diff -r 655dc3bc1d8e tools/python/xen/xm/xenapi_create.py
--- a/tools/python/xen/xm/xenapi_create.py      Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/python/xen/xm/xenapi_create.py      Thu Apr 09 11:14:43 2009 +0200
@@ -310,7 +310,9 @@ class xenapi_create:
             "HVM_boot_params":
                 {},
             "PCI_bus":
-               ""
+               "",
+            "pool_name":
+               vm.attributes["pool_name"].value,
             }
 
         if vm.attributes.has_key("security_label"):
@@ -654,6 +656,8 @@ class sxp2xml:
             = str(get_child_by_name(config, "vcpus", 1))
         vm.attributes["s3_integrity"] \
             = str(get_child_by_name(config, "s3_integrity", 0))
+        vm.attributes["pool_name"] \
+            = str(get_child_by_name(config, "pool_name", "Pool-0"))
 
         sec_data = get_child_by_name(config, "security")
         if sec_data:
diff -r 655dc3bc1d8e tools/python/xen/util/sxputils.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/util/sxputils.py Fri Apr 17 10:51:15 2009 +0200
@@ -0,0 +1,64 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (c) 2009 Fujitsu Technology Solutions
+#============================================================================
+
+""" convert sxp to map / map to sxp.
+"""
+
+import types
+from xen.xend import sxp
+
+def map2sxp(map_val):
+    """ conversion of all key-value pairs of a map (recursively) to sxp.
+        @param map_val: map; if a value contains a list or dict it is also
+                    converted to sxp
+        @type map_val: dict
+        @return sxp expr
+        @rtype: list
+    """
+    sxp_vals = []
+    for (k, v) in map_val.items():
+        if isinstance(v, types.DictionaryType):
+            sxp_vals += [[k] + map2sxp(v)]
+        elif isinstance(v, types.ListType):
+            sxp_vals += [[k] + v]
+        else:
+            sxp_vals += [[k, v]]
+    return sxp_vals                          
+
+def sxp2map( s ):
+    """ conversion of sxp to map.
+        @param s: sxp expr
+        @type s:  list
+        @return: map
+        @rtype: dict
+    """
+    sxphash = {}
+        
+    for child in sxp.children( s ):
+        if isinstance( child, types.ListType ) and len( child ) > 1:
+            if isinstance( child[1], types.ListType ) and len( child[1] ) > 1:
+                sxphash[ child[0] ] = sxp2map( child )
+            else:
+                childs = sxp.children(child)
+                if len(childs) > 1:
+                    sxphash[ child[0] ] = childs
+                else:
+                    sxphash[ child[0] ] = childs[0]
+        
+    return sxphash  
+    
+    
diff -r 655dc3bc1d8e tools/python/xen/xend/XendCPUPool.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xend/XendCPUPool.py      Fri Apr 17 10:51:38 2009 +0200
@@ -0,0 +1,872 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (c) 2009 Fujitsu Technology Solutions.
+#============================================================================
+
+""" CPU Pool support including XEN-API and Legacy API.
+"""
+
+import types
+import threading
+import re
+import xen.lowlevel.xc
+import XendNode
+import XendDomain
+from xen.xend.XendLogging import log
+from xen.xend.XendBase import XendBase
+from xen.xend import XendAPIStore
+from xen.xend.XendConstants import XS_POOLROOT
+from xen.xend import uuid as genuuid
+from xen.xend.XendError import VmError, XendAPIError, PoolError
+from xen.xend.xenstore.xstransact import xstransact
+from xen.util.sxputils import sxp2map, map2sxp
+
+
+XEND_ERROR_INTERNAL             = 'INTERNAL_ERROR'
+XEND_ERROR_UNKOWN_SCHED_POLICY  = 'UNKOWN_SCHED_POLICY'
+XEND_ERROR_BAD_POOL_STATE       = 'POOL_BAD_STATE'
+XEND_ERROR_POOL_PARAM           = 'PARAMETER_ERROR'
+XEND_ERROR_INSUFFICIENT_CPUS    = 'INSUFFICIENT_CPUS'
+XEND_ERROR_POOL_RECONF          = 'POOL_RECONF'
+XEND_ERROR_INVALID_CPU          = 'INVAILD_CPU'
+XEND_ERROR_LAST_CPU_NOT_REM     = 'LAST_CPU_NOT_REMOVEABLE'
+
+
+XEN_SCHEDULER_TO_ID = {
+    'credit' : xen.lowlevel.xc.XEN_SCHEDULER_CREDIT,
+    'sedf'   : xen.lowlevel.xc.XEN_SCHEDULER_SEDF,
+    }
+
+xc = xen.lowlevel.xc.xc()
+
+class XendCPUPool(XendBase):
+    """ CPU Pool management.
+        @ivar pool_lock: Lock to secure modification of pool data
+        @type pool_lock: Rlock
+    """
+
+    pool_lock = threading.RLock()
+    
+    def getClass(cls):
+        return "cpu_pool"
+
+    def getAttrRO(cls):
+        attrRO = ['resident_on',
+                  'started_VMs',
+                  'host_CPUs',
+                  'activated',
+                 ]
+        return XendBase.getAttrRO() + attrRO
+    
+    def getAttrRW(cls):
+        attrRW = ['name_label',
+                  'name_description',
+                  'auto_power_on',
+                  'ncpu',
+                  'sched_policy',
+                  'proposed_CPUs',
+                  'other_config',
+                 ]
+        return XendBase.getAttrRW() + attrRW
+
+    def getMethods(cls):
+        methods = ['destroy',
+                   'activate',
+                   'deactivate',
+                   'add_host_CPU_live',
+                   'remove_host_CPU_live',
+                   'add_to_proposed_CPUs',
+                   'remove_from_proposed_CPUs',
+                   'add_to_other_config',
+                   'remove_from_other_config',
+                  ]
+        return XendBase.getMethods() + methods
+
+    def getFuncs(cls):
+        funcs = ['create',
+                 'get_by_name_label',
+                ]
+        return XendBase.getFuncs() + funcs
+    
+    getClass    = classmethod(getClass)
+    getAttrRO   = classmethod(getAttrRO)
+    getAttrRW   = classmethod(getAttrRW)
+    getMethods  = classmethod(getMethods)
+    getFuncs    = classmethod(getFuncs)
+
+
+    #
+    # XenAPI function calls
+    #
+    
+    def create(cls, record):
+        """ Create a new managed pool instance.
+            @param record: attributes of pool
+            @type record:  dict
+            @return: uuid of created pool
+            @rtype:  str
+        """
+        new_uuid = genuuid.createString()
+        XendCPUPool(record, new_uuid)
+        XendNode.instance().save_cpu_pools()
+        return new_uuid
+
+    create = classmethod(create)
+
+
+    def get_by_name_label(cls, name_label):
+        """ Query a Pool(ref) by its name.
+            @return: ref of pool
+            @rtype:  str
+        """
+        cls.pool_lock.acquire()
+        try:
+            return [ inst.get_uuid()
+                     for inst in XendAPIStore.get_all(cls.getClass())
+                     if inst.name_label == name_label
+                   ]
+        finally:
+            cls.pool_lock.release()
+
+    get_by_name_label = classmethod(get_by_name_label)
+
+    
+    def get_cpu_pool_by_cpu_ref(cls, host_cpu):
+        """ Query cpu_pool ref the given cpu belongs to.
+            @param host_cpu: ref of host_cpu to lookup
+            @type host_cpu:  str
+            @return: list cpu_pool refs (list contains not more than one 
element)
+            @rtype:  list of str
+        """
+        node = XendNode.instance()
+        cpu_nr = node.get_host_cpu_field(host_cpu, 'number')
+        for pool_rec in xc.cpupool_getinfo():
+            if cpu_nr in pool_rec['cpulist']:
+                # pool found; return the ref
+                return cls.query_pool_ref(pool_rec['cpupool'])
+        return []
+
+    get_cpu_pool_by_cpu_ref = classmethod(get_cpu_pool_by_cpu_ref)
+
+
+    def get_all_managed(cls):
+        """ Query all managed pools.
+            @return: uuids of all managed pools
+            @rtype:  list of str
+        """
+        cls.pool_lock.acquire()
+        try:
+            managed_pools = [ inst.get_uuid()
+                              for inst in XendAPIStore.get_all(cls.getClass())
+                              if inst.is_managed() ]
+        finally:
+            cls.pool_lock.release()
+        return managed_pools
+    
+    get_all_managed = classmethod(get_all_managed)
+
+    
+    #
+    # XenAPI methods calls
+    #
+    
+    def __init__(self, record, new_uuid, managed_pool=True):
+        XendBase.__init__(self, new_uuid, record)
+        try:
+            self._managed = managed_pool
+            self.name_label = None
+            
+            name = record.get('name_label', 'Pool-Unnamed')
+            self._checkName(name)
+            self.name_label = name
+            self.name_description = record.get('name_description',
+                                               self.name_label)
+            self.proposed_cpus = [ int(cpu)
+                                   for cpu in record.get('proposed_CPUs', []) ]
+            self.auto_power_on = bool(record.get('auto_power_on', False))
+            self.ncpu = int(record.get('ncpu', 1))
+            self.sched_policy = record.get('sched_policy', '')
+            self.other_config = record.get('other_config', {})
+        except Exception, ex:
+            XendBase.destroy(self)
+            raise ex
+
+
+    def get_resident_on(self):
+        """ Always return uuid of own node.
+            @return: uuid of this node
+            @rytpe:  str
+        """
+        return XendNode.instance().uuid
+
+    def get_started_VMs(self):
+        """ Query all VMs currently assigned to pool.
+            @return: ref of all VMs assigned to pool; if pool is not active,
+                     an empty list will be returned
+            @rtype:  list of str
+        """
+        if self.get_activated():
+            # search VMs related to this pool
+            pool_id = self.query_pool_id()
+            started_VMs = [ vm.get_uuid()
+                            for vm in XendDomain.instance().list('all')
+                            if vm.get_cpu_pool() == pool_id ]
+        else:
+            # pool not active, so it couldn't have any started VMs
+            started_VMs = []
+            
+        return started_VMs
+
+    def get_host_CPUs(self):
+        """ Query all cpu refs of this pool currently asisgned .
+            - Read pool id of this pool from xenstore
+            - Read cpu configuration from hypervisor
+            - lookup cpu number -> cpu ref
+            @return: host_cpu refs
+            @rtype:  list of str
+        """
+        if self.get_activated():
+            node = XendNode.instance()
+            pool_id = self.query_pool_id()
+            if pool_id == None:
+                raise PoolError(XEND_ERROR_INTERNAL,
+                                [self.getClass(), 'get_host_CPUs'])
+            cpus = []
+            for pool_rec in xc.cpupool_getinfo():
+                if pool_rec['cpupool'] == pool_id:
+                    cpus = pool_rec['cpulist'] 
+             
+            # query host_cpu ref for any cpu of the pool
+            host_CPUs = [ cpu_ref
+                          for cpu_ref in node.get_host_cpu_refs()
+                          if node.get_host_cpu_field(cpu_ref, 'number')
+                              in cpus ]
+        else:
+            # pool not active, so it couldn't have any assigned cpus
+            host_CPUs = []
+            
+        return host_CPUs
+    
+    def get_activated(self):
+        """ Query if the pool is registered in XendStore.
+            If pool uuid is not in XenStore, the pool is not activated.
+            @return: True, if activated
+            @rtype:  bool
+        """
+        return self.query_pool_id() != None
+        
+    def get_name_label(self):
+        return self.name_label
+
+    def get_name_description(self):
+        return self.name_description
+
+    def get_auto_power_on(self):
+        return self.auto_power_on
+
+    def get_ncpu(self):
+        return self.ncpu
+
+    def get_sched_policy(self):
+        if len(self.sched_policy) == 0:
+            # default scheduler selected
+            return XendNode.instance().get_vcpus_policy()
+        else:
+            return self.sched_policy
+
+    def get_proposed_CPUs(self):
+        return [ str(cpu) for cpu in self.proposed_cpus ]
+
+    def get_other_config(self):
+        return self.other_config
+
+    def set_name_label(self, name_label):
+        self._checkName(name_label)
+        self.name_label = name_label
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+    def set_name_description(self, name_descr):
+        self.name_description = name_descr
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+    def set_auto_power_on(self, auto_power_on):
+        self.auto_power_on = bool(int(auto_power_on))
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+    def set_ncpu(self, ncpu):
+        _ncpu = int(ncpu)
+        if _ncpu < 1:
+            raise PoolError(XEND_ERROR_POOL_PARAM, 'ncpu')
+        self.ncpu = _ncpu 
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+    def set_sched_policy(self, sched_policy):
+        if self.get_activated():
+            raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
+        self.sched_policy = sched_policy
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+    def set_proposed_CPUs(self, proposed_cpus):
+        if self.get_activated():
+            raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
+        self.proposed_cpus = [ int(cpu) for cpu in proposed_cpus ]
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+    def set_other_config(self, other_config):
+        self.other_config = other_config
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+    def destroy(self):
+        """ In order to destroy a cpu pool, it must be deactivated """
+        self.pool_lock.acquire()
+        try:
+            if self.get_activated():
+                raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
+            XendBase.destroy(self)
+        finally:
+            self.pool_lock.release()
+        XendNode.instance().save_cpu_pools()
+
+    def activate(self):
+        """ Create pool in hypervisor and add cpus.
+            Preconditions:
+            - pool not already active
+            - enough unbound cpus available
+            Actions:
+            - create pool in hypervisor
+            - select free cpus (preferred from proposed_CPUs list) and bind it 
to
+              the pool
+            - create entries in Xenstore
+        """
+        self.pool_lock.acquire()
+        try:
+            if self.get_activated():
+                raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
+            if self.sched_policy != XendNode.instance().get_vcpus_policy():
+                raise PoolError(XEND_ERROR_UNKOWN_SCHED_POLICY)           
+            unbound_cpus = set(self.unbound_cpus())
+            if len(unbound_cpus) < self.ncpu:
+                raise PoolError(XEND_ERROR_INSUFFICIENT_CPUS,
+                                [str(self.ncpu), str(len(unbound_cpus))])
+                
+            # build list of cpu numbers to bind to pool
+            cpu_set = set(self.proposed_cpus).intersection(unbound_cpus)
+            if len(cpu_set) < self.ncpu:
+                pool_cpus = (list(cpu_set) +
+                             list(unbound_cpus.difference(cpu_set)))
+            else:
+                pool_cpus = list(cpu_set)
+            pool_cpus = pool_cpus[0:self.ncpu]
+
+            # create pool in hypervisor
+            pool_id = xc.cpupool_create(
+                sched = XEN_SCHEDULER_TO_ID.get(self.sched_policy, 0))
+            
+            self.update_XS(pool_id)
+            # add cpus
+            for cpu in pool_cpus:
+                xc.cpupool_addcpu(pool_id, cpu)
+                
+        finally:
+            self.pool_lock.release()
+
+    def deactivate(self):
+        """ Delete pool in hypervisor
+            Preconditions:
+            - pool is activated
+            - no running VMs in pool
+            Actions:
+            - call hypervisor for deletion
+            - remove path of pool in xenstore
+        """
+        self.pool_lock.acquire()
+        try:
+            if not self.get_activated():
+                raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated')
+            if len(self.get_started_VMs()) != 0:
+                raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'in use')
+
+            pool_id = self.query_pool_id()
+            # remove cpus from pool
+            cpus = []
+            for pool_rec in xc.cpupool_getinfo():
+                if pool_rec['cpupool'] == pool_id:
+                    cpus = pool_rec['cpulist'] 
+            for cpu_number in cpus:
+                xc.cpupool_removecpu(pool_id, cpu_number)
+            xc.cpupool_destroy(pool_id)
+
+            # update XenStore
+            xs_path = XS_POOLROOT + "%s/" % pool_id
+            xstransact.Remove(xs_path)
+        finally:
+            self.pool_lock.release()
+        
+    def add_host_CPU_live(self, cpu_ref):
+        """ Add cpu to pool, if it is currently not assigned to a pool.
+            @param cpu_ref: reference of host_cpu instance to add
+            @type  cpu_ref: str
+        """
+        if not self.get_activated():
+            raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated')
+        node = XendNode.instance()
+        number = node.get_host_cpu_field(cpu_ref, 'number')
+
+        self.pool_lock.acquire()
+        try:
+            pool_id = self.query_pool_id()
+            other_pool_ref = self.get_cpu_pool_by_cpu_ref(cpu_ref)
+            if len(other_pool_ref) != 0:
+                raise PoolError(XEND_ERROR_INVALID_CPU,
+                            "cpu already assigned to pool '%s'" % 
other_pool_ref[0])
+            xc.cpupool_addcpu(pool_id, number)
+        finally:
+            self.pool_lock.release()                              
+                              
+    def remove_host_CPU_live(self, cpu_ref):
+        """ Remove cpu from pool.
+            After successfull call, the cpu is free.
+            Remove of the last cpu of the pool is rejected.
+            @param cpu_ref: reference of host_cpu instance to remove
+            @type  cpu_ref: str
+        """
+        if not self.get_activated():
+            raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated')
+        node = XendNode.instance()
+        number = node.get_host_cpu_field(cpu_ref, 'number')
+
+        self.pool_lock.acquire()
+        try:
+            pool_id = self.query_pool_id()
+            pool_rec = {}
+            for pool in xc.cpupool_getinfo():
+                if pool['cpupool'] == pool_id:
+                    pool_rec = pool
+                    break
+
+            if number in pool_rec['cpulist']:
+                if len(pool_rec['cpulist']) < 2 and pool_rec['n_dom'] > 0:
+                    raise PoolError(XEND_ERROR_LAST_CPU_NOT_REM,
+                                    'could not remove last cpu')
+                xc.cpupool_removecpu(pool_id, number)
+            else:
+                raise PoolError(XEND_ERROR_INVALID_CPU,
+                                'CPU not assigned to pool')
+        finally:
+            self.pool_lock.release()
+                              
+    def add_to_proposed_CPUs(self, cpu):
+        if self.get_activated():
+            raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
+
+        _cpu = int(cpu)
+        if _cpu not in self.proposed_cpus:
+            self.proposed_cpus.append(_cpu) 
+            self.proposed_cpus.sort()
+            if self._managed:
+                XendNode.instance().save_cpu_pools()
+                          
+    def remove_from_proposed_CPUs(self, cpu):
+        if self.get_activated():
+            raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
+        _cpu = int(cpu)
+        if _cpu in self.proposed_cpus:
+            self.proposed_cpus.remove(_cpu) 
+            if self._managed:
+                XendNode.instance().save_cpu_pools()
+                              
+    def add_to_other_config(self, key, value):
+        self.other_config[key] = value
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+                              
+    def remove_from_other_config(self, key):
+        if key in self.other_config:
+            del self.other_config[key]
+        if self._managed:
+            XendNode.instance().save_cpu_pools()
+
+
+    #
+    # Legacy RPC calls
+    #
+    def pool_new(cls, config):
+        try:
+            record = sxp2map(config)
+            if record.has_key('proposed_CPUs') and \
+               not isinstance(record['proposed_CPUs'], types.ListType):
+                record['proposed_CPUs'] = [record['proposed_CPUs']]
+            new_uuid = cls.create(record)
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+        return new_uuid
+    
+    def pool_create(cls, config):
+        try:
+            record = sxp2map(config)
+            if record.has_key('proposed_CPUs') and \
+               not isinstance(record['proposed_CPUs'], types.ListType):
+                record['proposed_CPUs'] = [record['proposed_CPUs']]
+            new_uuid = genuuid.createString()
+            pool = XendCPUPool(record, new_uuid, False)
+            pool.activate()
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+    
+    def pool_start(cls, poolname):
+        pool = cls.lookup_pool(poolname)
+        if not pool:
+            raise VmError('unkown pool %s' % poolname)
+        try:
+            pool.activate()
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+
+    def pool_list(cls, names):
+        sxprs = []
+        try:
+            node = XendNode.instance()
+            xd = XendDomain.instance()
+            pools = cls.get_all_records()
+            for (pool_uuid, pool_vals) in pools.items():
+                if pool_vals['name_label'] in names or len(names) == 0:
+                    # conv host_cpu refs to cpu number
+                    cpus = [ node.get_host_cpu_field(cpu_ref, 'number')
+                             for cpu_ref in pool_vals['host_CPUs'] ]
+                    cpus.sort()
+                    pool_vals['host_CPU_numbers'] = cpus
+                    vm_names = [ xd.get_vm_by_uuid(uuid).getName()
+                                 for uuid in pool_vals['started_VMs'] ]
+                    pool_vals['started_VM_names'] = vm_names
+                    pool_vals['auto_power_on'] = 
int(pool_vals['auto_power_on'])
+                    sxprs += [[pool_uuid] + map2sxp(pool_vals)]
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+        return sxprs
+    
+    def pool_destroy(cls, poolname):
+        pool = cls.lookup_pool(poolname)
+        if not pool:
+            raise VmError('unkown pool %s' % poolname)
+        try:
+            pool.deactivate()
+            if not pool.is_managed():
+                pool.destroy()
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+        
+    def pool_delete(cls, poolname):
+        pool = cls.lookup_pool(poolname)
+        if not pool:
+            raise VmError('unkown pool %s' % poolname)
+        try:
+            pool.destroy()
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+
+    def pool_cpu_add(cls, poolname, cpu):
+        pool = cls.lookup_pool(poolname)
+        if not pool:
+            raise VmError('unkown pool %s' % poolname)
+        try:
+            cpu_ref = cls._cpu_number_to_ref(int(cpu))
+            if cpu_ref:
+                pool.add_host_CPU_live(cpu_ref)
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+
+    def pool_cpu_remove(cls, poolname, cpu):
+        pool = cls.lookup_pool(poolname)
+        if not pool:
+            raise VmError('unkown pool %s' % poolname)
+        try:
+            cpu_ref = cls._cpu_number_to_ref(int(cpu))
+            if cpu_ref:
+                pool.remove_host_CPU_live(cpu_ref)
+        except XendAPIError, ex:
+            raise VmError(ex.get_api_error())
+
+    def pool_migrate(cls, domname, poolname):
+        dom = XendDomain.instance() 
+        pool = cls.lookup_pool(poolname)
+        if not pool:
+            raise VmError('unkown pool %s' % poolname)
+        dominfo = dom.domain_lookup_nr(domname)
+        if not dominfo:
+            raise VmError('unkown domain %s' % domname)
+        domid = dominfo.getDomid()
+        if domid is not None:
+            if domid == 0:
+                raise VmError('could not move Domain-0')
+            try:
+                cls.move_domain(pool.get_uuid(), domid)
+            except Exception, ex:
+                raise VmError('could not move domain')
+        dominfo.info['pool_name'] = poolname
+        dom.managed_config_save(dominfo)
+
+    pool_new        = classmethod(pool_new)
+    pool_create     = classmethod(pool_create)
+    pool_start      = classmethod(pool_start)
+    pool_list       = classmethod(pool_list)
+    pool_destroy    = classmethod(pool_destroy)
+    pool_delete     = classmethod(pool_delete)
+    pool_cpu_add    = classmethod(pool_cpu_add)
+    pool_cpu_remove = classmethod(pool_cpu_remove)
+    pool_migrate    = classmethod(pool_migrate)
+
+
+    #
+    # methods
+    #
+
+    def is_managed(self):
+        """ Check, if pool is managed.
+            @return: True, if managed
+            @rtype: bool
+        """
+        return self._managed
+    
+    def query_pool_id(self):
+        """ Get corresponding pool-id of pool instance from XenStore.
+            @return: pool id or None
+            @rytpe:  int
+        """
+        self.pool_lock.acquire()
+        try:
+            for pool_id in xstransact.List(XS_POOLROOT):
+                uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid')
+                if uuid == self.get_uuid():
+                    return int(pool_id)
+        finally:
+            self.pool_lock.release()
+            
+        return None
+
+    def update_XS(self, pool_id):
+        """ Write (or update) data in xenstore taken from instance.
+            @param pool_id: Pool id to build path to pool data in xenstore
+            @type  pool_id: int
+        """
+        self.pool_lock.acquire()
+        try:
+            xs_path = XS_POOLROOT + "%s/" % pool_id
+            xs_entries = { 'uuid' : self.get_uuid(),
+                           'name' : self.name_label,
+                           'description' : self.name_description
+                         }
+            xstransact.Mkdir(xs_path)
+            xstransact.Mkdir(xs_path, 'other_config')
+            xstransact.Write(xs_path, xs_entries)
+            xstransact.Write('%s%s' % (xs_path, 'other_config'),
+                             self.other_config)
+        finally:
+            self.pool_lock.release()
+    
+    def _checkName(self, name):
+        """ Check if a pool name is valid. Valid names contain alphabetic
+            characters, digits, or characters in '_-.:/+'.
+            The same name cannot be used for more than one pool at the same 
+            time.
+            @param name: name
+            @type name:  str
+            @raise: PoolError if invalid
+        """
+        if name is None or name == '':
+            raise PoolError(XEND_ERROR_POOL_PARAM, 'Missing Pool Name')
+        if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
+            raise PoolError(XEND_ERROR_POOL_PARAM, 'Invalid Pool Name')
+        
+        pool = self.lookup_pool(name)
+        if pool and pool.get_uuid() != self.get_uuid():
+            raise PoolError(XEND_ERROR_POOL_PARAM,
+                "Pool name '%s' already exists" % name)
+
+
+    #
+    # class methods
+    #
+
+    def recreate_active_pools(cls):
+        """ Read active pool config from hypervisor and create pool instances.
+            - Query pool ids and assigned CPUs from hypervisor.
+            - Query additional information for any pool from xenstore.
+              If an entry for a pool id is missing in xenstore, it will be
+              recreated with a new uuid and generic name (this is an error 
case)
+            - Create an XendCPUPool instance for any pool id
+            Function have to be called after recreation of managed pools.      
      
+        """
+        log.debug('recreate_active_pools')
+
+        pool_ids = [ pool_info['cpupool'] 
+                     for pool_info in xc.cpupool_getinfo() ]
+
+        for pool in pool_ids:
+            # read pool data from xenstore
+            path = XS_POOLROOT + "%s/" % pool
+            uuid = xstransact.Read(path, 'uuid')
+            if not uuid:
+                # xenstore entry missing / invaild; create entry with new uuid
+                uuid = genuuid.createString()
+                name = "Pool-%s" % pool
+                try:
+                    inst = XendCPUPool( { 'name_label' : name }, uuid, False )
+                    inst.update_XS(pool)
+                except PoolError, ex:
+                    # log error and skip domain
+                    log.error('cannot recreate pool %s; skipping (reason: %s)' 
\
+                        % (name, ex))                
+            else:
+                (name, descr) = xstransact.Read(path, 'name', 'description')
+                other_config = {}
+                for key in xstransact.List(path + 'other_config'):
+                    other_config[key] = xstransact.Read(
+                        path + 'other_config/%s' % key)
+
+                # check existance of pool instance
+                inst = XendAPIStore.get(uuid, cls.getClass())
+                if inst:
+                    # update attributes of existing instance
+                    inst.name_label = name
+                    inst.name_description = descr
+                    inst.other_config = other_config
+                else:
+                    # recreate instance
+                    try:
+                        inst = XendCPUPool( 
+                            { 'name_label' : name,
+                              'name_description' : descr,
+                              'other_config' : other_config,
+                            }, 
+                            uuid, False )
+                    except PoolError, ex:
+                        # log error and skip domain
+                        log.error(
+                            'cannot recreate pool %s; skipping (reason: %s)' \
+                            % (name, ex))                
+            
+    recreate_active_pools = classmethod(recreate_active_pools)
+
+        
+    def recreate(cls, record, current_uuid):
+        """ Recreate a pool instance while xend restart.
+            @param record: attributes of pool
+            @type record:  dict
+            @param current_uuid: uuid of pool to create
+            @type current_uuid:  str
+        """
+        XendCPUPool(record, current_uuid)
+
+    recreate = classmethod(recreate)
+
+
+    def autostart_pools(cls):
+        """ Start managed pools that are marked as autostart pools.
+            Function is called after recreation of managed domains while
+            xend restart.
+        """
+        cls.pool_lock.acquire()
+        try:
+            for inst in XendAPIStore.get_all(cls.getClass()):
+                if inst.is_managed() and inst.auto_power_on and \
+                   inst.query_pool_id() == None:
+                    inst.activate()
+        finally:
+            cls.pool_lock.release()
+
+    autostart_pools = classmethod(autostart_pools)
+
+    
+    def move_domain(cls, pool_ref, domid):
+        cls.pool_lock.acquire()
+        try:
+            pool = XendAPIStore.get(pool_ref, cls.getClass())
+            pool_id = pool.query_pool_id()
+            
+            xc.cpupool_movedomain(pool_id, domid)
+        finally:
+            cls.pool_lock.release()
+
+    move_domain = classmethod(move_domain)
+
+    
+    def query_pool_ref(cls, pool_id):
+        """ Get pool ref by pool id.
+            Take the ref from xenstore.
+            @param pool_id:
+            @type  pool_id: int
+            @return: ref
+            @rtype:  str
+        """
+        uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid')
+        if uuid:
+            return [uuid]
+        else:
+            return []
+
+    query_pool_ref = classmethod(query_pool_ref)
+
+
+    def lookup_pool(cls, id_or_name):
+        """ Search XendCPUPool instance with given id_or_name.
+            @param id_or_name: pool id or pool nameto search
+            @type id_or_name:  [int, str]
+            @return: instane or None if not found
+            @rtype:  XendCPUPool
+        """
+        pool_uuid = None
+        try:
+            pool_id = int(id_or_name)
+            # pool id given
+            pool_uuid = cls.query_pool_ref(pool_id)
+        except ValueError:
+            # pool name given
+            pool_uuid = cls.get_by_name_label(id_or_name)
+
+        if len(pool_uuid) > 0:
+            return XendAPIStore.get(pool_uuid[0], cls.getClass())
+        else:
+            return None
+        
+    lookup_pool = classmethod(lookup_pool)
+
+
+    def _cpu_number_to_ref(cls, number):
+        node = XendNode.instance()
+        for cpu_ref in node.get_host_cpu_refs():
+            if node.get_host_cpu_field(cpu_ref, 'number') == number:
+                return cpu_ref
+        return None
+
+    _cpu_number_to_ref = classmethod(_cpu_number_to_ref)
+
+    
+    def unbound_cpus(cls):
+        """ Build list containing the numbers of all cpus not bound to a pool.
+            Info is taken from Hypervisor.
+            @return: list of cpu numbers
+            @rytpe:  list of int
+        """
+        return xc.cpupool_freeinfo()
+    
+    unbound_cpus = classmethod(unbound_cpus)
+
diff -r 655dc3bc1d8e tools/python/xen/xm/pool-create.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xm/pool-create.py        Fri Apr 17 10:52:08 2009 +0200
@@ -0,0 +1,51 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (C) 2009 Fujitsu Technology Solutions
+#============================================================================
+
+""" Create a new unmanaged pool.
+"""
+
+import sys
+from xen.xm.main import serverType, SERVER_XEN_API, server
+from xen.xm.pool import parseCommandLine, err, help as help_options
+from xen.util.sxputils import sxp2map
+
+def help():
+    return help_options()
+
+
+def main(argv):
+    try:
+        (opts, config) = parseCommandLine(argv)
+    except StandardError, ex:
+        err(str(ex))
+
+    if not opts:
+        return
+
+    if serverType == SERVER_XEN_API:
+        record = sxp2map(config)
+        if type(record.get('proposed_CPUs', [])) != list:
+            record['proposed_CPUs'] = [record['proposed_CPUs']]
+        ref = server.xenapi.cpu_pool.create(record)
+        if ref:
+            server.xenapi.cpu_pool.activate(ref)
+    else:
+        server.xend.cpu_pool.create(config)
+
+if __name__ == '__main__':
+    main(sys.argv)
+    
diff -r 655dc3bc1d8e tools/python/xen/xm/pool-new.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xm/pool-new.py   Fri Apr 17 10:51:56 2009 +0200
@@ -0,0 +1,50 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (C) 2009 Fujitsu Technology Solutions
+#============================================================================
+
+""" Create a new managed pool.
+"""
+
+import sys
+from xen.xm.main import serverType, SERVER_XEN_API, server
+from xen.xm.pool import parseCommandLine, err, help as help_options
+from xen.util.sxputils import sxp2map
+
+
+def help():
+    return help_options()
+
+
+def main(argv):
+    try:
+        (opts, config) = parseCommandLine(argv)
+    except StandardError, ex:
+        err(str(ex))
+        
+    if not opts:
+        return
+
+    if serverType == SERVER_XEN_API:
+        record = sxp2map(config)
+        if type(record.get('proposed_CPUs', [])) != list:
+            record['proposed_CPUs'] = [record['proposed_CPUs']]
+        server.xenapi.cpu_pool.create(record)
+    else:
+        server.xend.cpu_pool.new(config)
+
+if __name__ == '__main__':
+    main(sys.argv)
+    
diff -r 655dc3bc1d8e tools/python/xen/xm/pool.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xm/pool.py       Fri Apr 17 10:52:22 2009 +0200
@@ -0,0 +1,236 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (C) 2009 Fujitsu Technology Solutions
+#============================================================================
+
+""" Common function of cmds pool-new / pool-create.
+"""
+
+import sys
+import types
+import os
+
+from xen.xend import PrettyPrint
+from xen.xend import sxp
+
+from xen.xm.opts import Opts, set_value, set_true, append_value, OptionError
+
+GOPTS = Opts(use="""[options] [vars]
+
+Create a pool.
+
+Pool creation parameters can be set by command-line switches, from
+a python configuration script or an SXP config file. See documentation
+for --defconfig, --config. Configuration variables can be set using
+VAR=VAL on the command line. For example name=Pool-1 sets name to Pool-1.
+
+""")
+
+GOPTS.opt('help', short='h',
+          fn=set_true, default=0,
+          use="Print this help.")
+
+GOPTS.opt('help_config',
+          fn=set_true, default=0,
+          use="Print the available configuration variables (vars) for the "
+          "configuration script.")
+
+GOPTS.opt('path', val='PATH',
+          fn=set_value, default='.:/etc/xen/pool',
+          use="Search path for configuration scripts. "
+          "The value of PATH is a colon-separated directory list.")
+
+GOPTS.opt('defconfig', short='f', val='FILE',
+          fn=set_value, default='xmdefconfig',
+          use="Use the given Python configuration script."
+          "The configuration script is loaded after arguments have been "
+          "processed. Each command-line option sets a configuration "
+          "variable named after its long option name, and these "
+          "variables are placed in the environment of the script before "
+          "it is loaded. Variables for options that may be repeated have "
+          "list values. Other variables can be set using VAR=VAL on the "
+          "command line. "     
+          "After the script is loaded, option values that were not set "
+          "on the command line are replaced by the values set in the script.")
+
+GOPTS.default('defconfig')
+
+GOPTS.opt('config', short='F', val='FILE',
+          fn=set_value, default=None,
+          use="CPU pool configuration to use (SXP).\n"
+          "SXP is the underlying configuration format used by Xen.\n"
+          "SXP configurations can be hand-written or generated from Python "
+          "configuration scripts, using the -n (dryrun) option to print "
+          "the configuration.")
+
+GOPTS.opt('dryrun', short='n',
+          fn=set_true, default=0,
+          use="Dry run - prints the resulting configuration in SXP but "
+          "does not create the CPU pool.")
+
+GOPTS.var('name', val='NAME', fn=set_value, default=None,
+          use="CPU pool name.")
+
+GOPTS.var('sched', val='SCHED', fn=set_value, default='credit',
+          use="Scheduler to use for the CPU pool.")
+
+GOPTS.var('cpus', val='CPUS', fn=set_value, default=1,
+          use="CPUS to assign to the CPU pool.")
+
+GOPTS.var('other_config', val='OTHER_CONFIG', fn=append_value, default=[],
+          use="Additional info for CPU pool")
+
+
+def sxp2map(sxp_val):
+    record = {}
+    for x in sxp_val:
+        if isinstance(x, (types.ListType, types.TupleType)) \
+           and len(x) > 1:
+            if isinstance(x[1], (types.ListType, types.TupleType)):
+                record[x[0]] = sxp2map(x[1])
+            else:                    
+                record[x[0]] = x[1]
+    return record
+
+def err(msg):
+    print >> sys.stderr, "Error: %s" % msg
+    sys.exit(-1)
+
+def make_cpus_config(cfg_cpus):
+    """ Taken from XendConfig. """
+    # Convert 'cpus' to list of list of ints
+    
+    cpus_list = []
+    # Convert the following string to list of ints.
+    # The string supports a list of ranges (0-3),
+    # seperated by commas, and negation (^1).  
+    # Precedence is settled by order of the string:
+    #    "0-3,^1"      -> [0,2,3]
+    #    "0-3,^1,1"    -> [0,1,2,3]
+    def cnv(s):
+        l = []
+        for c in s.split(','):
+            if c.find('-') != -1:
+                (x, y) = c.split('-')
+                for i in range(int(x), int(y)+1):
+                    l.append(int(i))
+            else:
+                # remove this element from the list 
+                if len(c) > 0:
+                    if c[0] == '^':
+                        l = [x for x in l if x != int(c[1:])]
+                    else:
+                        l.append(int(c))
+        return l
+    
+    if type(cfg_cpus) == list:
+        if len(cfg_cpus) > 0 and type(cfg_cpus[0]) == list:
+            # If sxp_cfg was created from config.sxp,
+            # the form of 'cpus' is list of list of string.
+            # Convert 'cpus' to list of list of ints.
+            # Conversion examples:
+            #    [['1']]               -> [[1]]
+            #    [['0','2'],['1','3']] -> [[0,2],[1,3]]
+            try:
+                for c1 in cfg_cpus:
+                    cpus = []
+                    for c2 in c1:
+                        cpus.append(int(c2))
+                    cpus_list.append(cpus)
+            except ValueError, e:
+                raise err('cpus = %s: %s' % (cfg_cpus, e))
+        else:
+            # Conversion examples:
+            #    ["1"]               -> [[1]]
+            #    ["0,2","1,3"]       -> [[0,2],[1,3]]
+            #    ["0-3,^1","1-4,^2"] -> [[0,2,3],[1,3,4]]
+            try:
+                for c in cfg_cpus:
+                    cpus = cnv(c)
+                    cpus_list.append(cpus)
+            except ValueError, e:
+                raise err('cpus = %s: %s' % (cfg_cpus, e))
+    else:
+        # Conversion examples:
+        #  cpus=1:
+        #    "1"      -> [[1]]
+        #    "0-3,^1" -> [[0,2,3]]
+        #  cpus=2:
+        #    "1"      -> [[1],[1]]
+        #    "0-3,^1" -> [[0,2,3],[0,2,3]]
+        try:
+            cpus_list = cnv(cfg_cpus)
+        except ValueError, e:
+            err('cpus = %s: %s' % (cfg_cpus, e))
+    return cpus_list
+
+def make_config(vals):
+    config  = ['pool']
+    config += [['name_label', vals.name]]
+    config += [['sched_policy', vals.sched]]
+    if type(vals.cpus) == int:            
+        config +=  [['ncpu', vals.cpus], ['proposed_CPUs' , []]]
+    elif type(vals.cpus) == str and len(vals.cpus) > 1 and vals.cpus[0] == '#':
+        try:
+            config +=  [['ncpu', int(vals.cpus[1:])], ['proposed_CPUs' , []]]
+        except ValueError, ex:
+            err('Wrong illegal of parameter "cpus"')
+    else:
+        prop_cpus = make_cpus_config(vals.cpus)
+        config +=  [['ncpu', len(prop_cpus)],
+                    ['proposed_CPUs'] + prop_cpus]
+    other_config = []
+    for entry in vals.other_config:
+        if '=' in entry:
+            (var, val) = entry.strip().split('=', 1)
+            other_config.append([var, val])
+    config +=  [['other_config'] + other_config]
+    return config
+
+def parseCommandLine(argv):
+    GOPTS.reset()
+    args = GOPTS.parse(argv)
+
+    if GOPTS.vals.help or GOPTS.vals.help_config:
+        if GOPTS.vals.help_config:
+            print GOPTS.val_usage()
+        return (None, None)
+    
+    # Process remaining args as config variables.
+    for arg in args:
+        if '=' in arg:
+            (var, val) = arg.strip().split('=', 1)
+            GOPTS.setvar(var.strip(), val.strip())
+    if GOPTS.vals.config:
+        try:
+            config = sxp.parse(file(GOPTS.vals.config))[0]
+        except IOError, ex:
+            raise OptionError("Cannot read file %s: %s" % (config, ex[1]))
+    else:
+        GOPTS.load_defconfig()
+        if not GOPTS.getopt('name') and GOPTS.getopt('defconfig'):
+            GOPTS.setopt('name', os.path.basename(
+                GOPTS.getopt('defconfig')))
+        config = make_config(GOPTS.vals)
+
+    if GOPTS.vals.dryrun:
+        PrettyPrint.prettyprint(config)
+        return (None, None)
+    
+    return (GOPTS, config)
+
+def help():
+    return str(GOPTS)
+
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [Patch 4/6] xen: cpupool support - python stuff (xm, xend), Juergen Gross <=