WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Merge with xen-ia64-unstable.hg

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Merge with xen-ia64-unstable.hg
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 23 Mar 2007 05:30:46 -0700
Delivery-date: Fri, 23 Mar 2007 06:22:00 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1174644718 0
# Node ID 2cecfa20ffa9928f8c18d1e24123fb587406154a
# Parent  be1017157768e8d6d5d5552cf0008d81d611b1bb
# Parent  a234dcbd8357d70ee0c6e6d71969ea0d7584d376
Merge with xen-ia64-unstable.hg
---
 tools/libxen/test/test_bindings.c            |    2 
 tools/python/xen/util/xmlrpclib2.py          |    5 
 tools/python/xen/xend/XendAPI.py             |   96 ++++++++++++--
 tools/python/xen/xend/XendConfig.py          |   52 +++-----
 tools/python/xen/xend/XendDomain.py          |   60 +++++++--
 tools/python/xen/xend/XendDomainInfo.py      |   52 +++-----
 tools/python/xen/xend/XendNode.py            |    4 
 tools/python/xen/xend/XendQCoWStorageRepo.py |   23 ---
 tools/python/xen/xend/XendVMMetrics.py       |   92 +++++++++++---
 tools/python/xen/xm/main.py                  |  175 ++++++++++++++++++++++-----
 tools/python/xen/xm/xenapi_create.py         |   14 --
 11 files changed, 406 insertions(+), 169 deletions(-)

diff -r be1017157768 -r 2cecfa20ffa9 tools/libxen/test/test_bindings.c
--- a/tools/libxen/test/test_bindings.c Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/libxen/test/test_bindings.c Fri Mar 23 10:11:58 2007 +0000
@@ -403,7 +403,7 @@ static xen_vm create_new_vm(xen_session 
      * Create a new disk for the new VM.
      */
     xen_sr_set *srs;
-    if (!xen_sr_get_by_name_label(session, &srs, "Local") ||
+    if (!xen_sr_get_by_name_label(session, &srs, "QCoW") ||
         srs->size < 1)
     {
         fprintf(stderr, "SR lookup failed.\n");
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/util/xmlrpclib2.py
--- a/tools/python/xen/util/xmlrpclib2.py       Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/util/xmlrpclib2.py       Fri Mar 23 10:11:58 2007 +0000
@@ -54,9 +54,10 @@ def stringify(value):
        (isinstance(value, int) and not isinstance(value, bool)):
         return str(value)
     elif isinstance(value, dict):
+        new_value = {}
         for k, v in value.items():
-            value[k] = stringify(v)
-        return value
+            new_value[stringify(k)] = stringify(v)
+        return new_value
     elif isinstance(value, (tuple, list)):
         return [stringify(v) for v in value]
     else:
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py  Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xend/XendAPI.py  Fri Mar 23 10:11:58 2007 +0000
@@ -643,6 +643,7 @@ class XendAPI(object):
 
     host_attr_ro = ['software_version',
                     'resident_VMs',
+                    'PIFs',
                     'host_CPUs',
                     'cpu_configuration',
                     'metrics',
@@ -712,6 +713,8 @@ class XendAPI(object):
         return xen_api_success(XendNode.instance().xen_version())
     def host_get_resident_VMs(self, session, host_ref):
         return xen_api_success(XendDomain.instance().get_domain_refs())
+    def host_get_PIFs(self, session, ref):
+        return xen_api_success(XendNode.instance().get_PIF_refs())
     def host_get_host_CPUs(self, session, host_ref):
         return xen_api_success(XendNode.instance().get_host_cpu_refs())
     def host_get_metrics(self, _, ref):
@@ -1034,9 +1037,8 @@ class XendAPI(object):
 
     VM_attr_ro = ['power_state',
                   'resident_on',
-                  'memory_static_max',                  
+                  'memory_static_max',
                   'memory_static_min',
-                  'VCPUs_number',
                   'consoles',
                   'VIFs',
                   'VBDs',
@@ -1054,6 +1056,8 @@ class XendAPI(object):
                   'auto_power_on',
                   'memory_dynamic_max',
                   'memory_dynamic_min',
+                  'VCPUs_max',
+                  'VCPUs_at_startup',
                   'VCPUs_params',
                   'actions_after_shutdown',
                   'actions_after_reboot',
@@ -1081,9 +1085,11 @@ class XendAPI(object):
                   ('suspend', None),
                   ('resume', None),
                   ('send_sysrq', None),
+                  ('set_VCPUs_number_live', None),
                   ('add_to_HVM_boot_params', None),
                   ('remove_from_HVM_boot_params', None),
                   ('add_to_VCPUs_params', None),
+                  ('add_to_VCPUs_params_live', None),
                   ('remove_from_VCPUs_params', None),
                   ('add_to_platform', None),
                   ('remove_from_platform', None),
@@ -1104,6 +1110,8 @@ class XendAPI(object):
         'memory_dynamic_max',
         'memory_dynamic_min',
         'memory_static_min',
+        'VCPUs_max',
+        'VCPUs_at_startup',
         'VCPUs_params',
         'actions_after_shutdown',
         'actions_after_reboot',
@@ -1150,10 +1158,6 @@ class XendAPI(object):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
         return xen_api_success(dom.get_memory_static_min())
     
-    def VM_get_VCPUs_number(self, session, vm_ref):
-        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
-        return xen_api_success(dom.getVCpuCount())
-    
     def VM_get_VIFs(self, session, vm_ref):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
         return xen_api_success(dom.get_vifs())
@@ -1177,6 +1181,14 @@ class XendAPI(object):
     def VM_get_metrics(self, _, vm_ref):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
         return xen_api_success(dom.get_metrics())
+
+    def VM_get_VCPUs_max(self, _, vm_ref):
+        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
+        return xen_api_todo()
+
+    def VM_get_VCPUs_at_startup(self, _, vm_ref):
+        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
+        return xen_api_todo()
 
     # attributes (rw)
     def VM_get_name_label(self, session, vm_ref):
@@ -1191,9 +1203,8 @@ class XendAPI(object):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
         return xen_api_todo()
     
-    def VM_get_is_a_template(self, session, vm_ref):
-        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
-        return xen_api_todo()
+    def VM_get_is_a_template(self, session, ref):
+        return self.VM_get('is_a_template', session, ref)
     
     def VM_get_memory_dynamic_max(self, session, vm_ref):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
@@ -1302,6 +1313,36 @@ class XendAPI(object):
         dom.info['vcpus_params'][key] = value
         return self._VM_save(dom)
 
+    def VM_add_to_VCPUs_params_live(self, session, vm_ref, key, value):
+        self.VM_add_to_VCPUs_params(session, vm_ref, key, value)
+        self._VM_VCPUs_params_refresh(vm_ref)
+        return xen_api_success_void()
+
+    def _VM_VCPUs_params_refresh(self, vm_ref):
+        xendom  = XendDomain.instance()
+        xeninfo = xendom.get_vm_by_uuid(vm_ref)
+
+        #update the cpumaps
+        for key, value in xeninfo.info['vcpus_params'].items():
+            if key.startswith("cpumap"):
+                vcpu = int(key[6:])
+                try:
+                    xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
+                except Exception, ex:
+                    log.exception(ex)
+
+        #need to update sched params aswell
+        if 'weight' in xeninfo.info['vcpus_params'] \
+           and 'cap' in xeninfo.info['vcpus_params']:
+            weight = xeninfo.info['vcpus_params']['weight']
+            cap = xeninfo.info['vcpus_params']['cap']
+            xendom.domain_sched_credit_set(xeninfo.getDomid(), weight, cap)
+
+    def VM_set_VCPUs_number_live(self, _, vm_ref, num):
+        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
+        dom.setVCpuCount(int(num))
+        return xen_api_success_void()
+     
     def VM_remove_from_VCPUs_params(self, session, vm_ref, key):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
         if 'vcpus_params' in dom.info \
@@ -1442,7 +1483,7 @@ class XendAPI(object):
             'name_label': xeninfo.getName(),
             'name_description': xeninfo.getName(),
             'user_version': 1,
-            'is_a_template': False,
+            'is_a_template': xeninfo.info.get('is_a_template'),
             'auto_power_on': False,
             'resident_on': XendNode.instance().uuid,
             'memory_static_min': xeninfo.get_memory_static_min(),
@@ -1450,7 +1491,8 @@ class XendAPI(object):
             'memory_dynamic_min': xeninfo.get_memory_dynamic_min(),
             'memory_dynamic_max': xeninfo.get_memory_dynamic_max(),
             'VCPUs_params': xeninfo.get_vcpus_params(),
-            'VCPUs_number': xeninfo.getVCpuCount(),
+            'VCPUs_at_startup': xeninfo.getVCpuCount(),
+            'VCPUs_max': xeninfo.getVCpuCount(),
             'actions_after_shutdown': xeninfo.get_on_shutdown(),
             'actions_after_reboot': xeninfo.get_on_reboot(),
             'actions_after_suspend': xeninfo.get_on_suspend(),
@@ -1472,6 +1514,7 @@ class XendAPI(object):
             'other_config': xeninfo.info.get('other_config', {}),
             'domid': domid is None and -1 or domid,
             'is_control_domain': xeninfo == xendom.privilegedDomain(),
+            'metrics': xeninfo.get_metrics()
         }
         return xen_api_success(record)
 
@@ -1547,8 +1590,12 @@ class XendAPI(object):
     # ----------------------------------------------------------------
 
     VM_metrics_attr_ro = ['memory_actual',
-                           'vcpus_number',
-                           'vcpus_utilisation']
+                          'VCPUs_number',
+                          'VCPUs_utilisation',
+                          'VCPUs_CPU',
+                          'VCPUs_flags',
+                          'VCPUs_params',
+                          'start_time']
     VM_metrics_attr_rw = []
     VM_metrics_methods = []
 
@@ -1564,11 +1611,24 @@ class XendAPI(object):
     def VM_metrics_get_memory_actual(self, _, ref):
         return xen_api_success(self._VM_metrics_get(ref).get_memory_actual())
 
-    def VM_metrics_get_vcpus_number(self, _, ref):
-        return xen_api_success(self._VM_metrics_get(ref).get_vcpus_number())
-
-    def VM_metrics_get_vcpus_utilisation(self, _, ref):
-        return 
xen_api_success(self._VM_metrics_get(ref).get_vcpus_utilisation())
+    def VM_metrics_get_VCPUs_number(self, _, ref):
+        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_number())
+
+    def VM_metrics_get_VCPUs_utilisation(self, _, ref):
+        return 
xen_api_success(self._VM_metrics_get(ref).get_VCPUs_utilisation())
+
+    def VM_metrics_get_VCPUs_CPU(self, _, ref):
+        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_CPU())
+    
+    def VM_metrics_get_VCPUs_flags(self, _, ref):
+        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_flags())
+
+    def VM_metrics_get_VCPUs_params(self, _, ref):
+        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_params())
+
+    def VM_metrics_get_start_time(self, _, ref):
+        return xen_api_success(self._VM_metrics_get(ref).get_start_time())
+
 
     # Xen API: Class VBD
     # ----------------------------------------------------------------
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py       Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xend/XendConfig.py       Fri Mar 23 10:11:58 2007 +0000
@@ -44,7 +44,7 @@ def reverse_dict(adict):
     return dict([(v, k) for k, v in adict.items()])
 
 def bool0(v):
-    return v != '0' and bool(v)
+    return v != '0' and v != 'False' and bool(v)
 
 # Recursively copy a data struct, scrubbing out VNC passwords.
 # Will scrub any dict entry with a key of 'vncpasswd' or any
@@ -81,18 +81,18 @@ def scrub_password(data):
 #
 # CPU fields:
 #
-# vcpus_number -- the maximum number of vcpus that this domain may ever have.
+# VCPUs_max    -- the maximum number of vcpus that this domain may ever have.
 #                 aka XendDomainInfo.getVCpuCount().
 # vcpus        -- the legacy configuration name for above.
 # max_vcpu_id  -- vcpus_number - 1.  This is given to us by Xen.
 #
 # cpus         -- the list of pCPUs available to each vCPU.
 #
-#   vcpu_avail:  a bitmap telling the guest domain whether it may use each of
-#                its VCPUs.  This is translated to
-#                <dompath>/cpu/<id>/availability = {online,offline} for use
-#                by the guest domain.
-# online_vpcus -- the number of VCPUs currently up, as reported by Xen.  This
+# vcpu_avail   -- a bitmap telling the guest domain whether it may use each of
+#                 its VCPUs.  This is translated to
+#                 <dompath>/cpu/<id>/availability = {online,offline} for use
+#                 by the guest domain.
+# VCPUs_live   -- the number of VCPUs currently up, as reported by Xen.  This
 #                 is changed by changing vcpu_avail, and waiting for the
 #                 domain to respond.
 #
@@ -103,7 +103,7 @@ def scrub_password(data):
 
 XENAPI_CFG_TO_LEGACY_CFG = {
     'uuid': 'uuid',
-    'vcpus_number': 'vcpus',
+    'VCPUs_max': 'vcpus',
     'cpus': 'cpus',
     'name_label': 'name',
     'actions_after_shutdown': 'on_poweroff',
@@ -128,7 +128,6 @@ XENAPI_PLATFORM_CFG = [ 'acpi', 'apic', 
 
 XENAPI_CFG_TYPES = {
     'uuid': str,
-    'power_state': str,
     'name_label': str,
     'name_description': str,
     'user_version': str,
@@ -139,9 +138,10 @@ XENAPI_CFG_TYPES = {
     'memory_dynamic_min': int,
     'memory_dynamic_max': int,
     'cpus': list,
-    'vcpus_policy': str,
     'vcpus_params': dict,
-    'vcpus_number': int,
+    'VCPUs_max': int,
+    'VCPUs_at_startup': int,
+    'VCPUs_live': int,
     'actions_after_shutdown': str,
     'actions_after_reboot': str,
     'actions_after_crash': str,
@@ -318,7 +318,9 @@ class XendConfig(dict):
             'cpus': [],
             'cpu_weight': 256,
             'cpu_cap': 0,
-            'vcpus_number': 1,
+            'VCPUs_max': 1,
+            'VCPUs_live': 1,
+            'VCPUs_at_startup': 1,
             'vcpus_params': {},
             'console_refs': [],
             'vif_refs': [],
@@ -371,8 +373,8 @@ class XendConfig(dict):
                                       event)
 
     def _vcpus_sanity_check(self):
-        if 'vcpus_number' in self and 'vcpu_avail' not in self:
-            self['vcpu_avail'] = (1 << self['vcpus_number']) - 1
+        if 'VCPUs_max' in self and 'vcpu_avail' not in self:
+            self['vcpu_avail'] = (1 << self['VCPUs_max']) - 1
 
     def _uuid_sanity_check(self):
         """Make sure UUID is in proper string format with hyphens."""
@@ -406,7 +408,7 @@ class XendConfig(dict):
     def _dominfo_to_xapi(self, dominfo):
         self['domid'] = dominfo['domid']
         self['online_vcpus'] = dominfo['online_vcpus']
-        self['vcpus_number'] = dominfo['max_vcpu_id'] + 1
+        self['VCPUs_max'] = dominfo['max_vcpu_id'] + 1
 
         self['memory_dynamic_min'] = dominfo['mem_kb'] * 1024
         self['memory_dynamic_max'] = dominfo['mem_kb'] * 1024
@@ -562,12 +564,12 @@ class XendConfig(dict):
             image_vcpus = sxp.child_value(image_sxp, 'vcpus')
             if image_vcpus != None:
                 try:
-                    if 'vcpus_number' not in cfg:
-                        cfg['vcpus_number'] = int(image_vcpus)
-                    elif cfg['vcpus_number'] != int(image_vcpus):
-                        cfg['vcpus_number'] = int(image_vcpus)
+                    if 'VCPUs_max' not in cfg:
+                        cfg['VCPUs_max'] = int(image_vcpus)
+                    elif cfg['VCPUs_max'] != int(image_vcpus):
+                        cfg['VCPUs_max'] = int(image_vcpus)
                         log.warn('Overriding vcpus from %d to %d using image'
-                                 'vcpus value.', cfg['vcpus_number'])
+                                 'vcpus value.', cfg['VCPUs_max'])
                 except ValueError, e:
                     raise XendConfigError('integer expeceted: %s: %s' %
                                           image_sxp, e)
@@ -850,16 +852,6 @@ class XendConfig(dict):
 
         sxpr.append(["maxmem", int(self["memory_static_max"])/MiB])
         sxpr.append(["memory", int(self["memory_dynamic_max"])/MiB])
-
-        if not legacy_only:
-            sxpr.append(['memory_dynamic_min',
-                     int(self.get('memory_dynamic_min'))])
-            sxpr.append(['memory_dynamic_max',
-                     int(self.get('memory_dynamic_max'))])
-            sxpr.append(['memory_static_max',
-                     int(self.get('memory_static_max'))])
-            sxpr.append(['memory_static_min',
-                     int(self.get('memory_static_min'))])
 
         for legacy in LEGACY_UNSUPPORTED_BY_XENAPI_CFG:
             if legacy in ('domid', 'uuid'): # skip these
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xend/XendDomain.py       Fri Mar 23 10:11:58 2007 +0000
@@ -34,7 +34,7 @@ import xen.lowlevel.xc
 
 from xen.xend import XendOptions, XendCheckpoint, XendDomainInfo
 from xen.xend.PrettyPrint import prettyprint
-from xen.xend.XendConfig import XendConfig
+from xen.xend import XendConfig
 from xen.xend.XendError import XendError, XendInvalidDomain, VmError
 from xen.xend.XendError import VMBadState
 from xen.xend.XendLogging import log
@@ -191,6 +191,10 @@ class XendDomain:
                         self._managed_domain_register(new_dom)
                     else:
                         self._managed_domain_register(running_dom)
+                        for key in XendConfig.XENAPI_CFG_TYPES.keys():
+                            if key not in XendConfig.LEGACY_XENSTORE_VM_PARAMS 
and \
+                                   key in dom:
+                                running_dom.info[key] = dom[key]
                 except Exception:
                     log.exception("Failed to create reference to managed "
                                   "domain: %s" % dom_name)
@@ -316,7 +320,7 @@ class XendDomain:
         for dom_uuid in dom_uuids:
             try:
                 cfg_file = self._managed_config_path(dom_uuid)
-                cfg = XendConfig(filename = cfg_file)
+                cfg = XendConfig.XendConfig(filename = cfg_file)
                 if cfg.get('uuid') != dom_uuid:
                     # something is wrong with the SXP
                     log.error("UUID mismatch in stored configuration: %s" %
@@ -414,7 +418,7 @@ class XendDomain:
         running_domids = [d['domid'] for d in running if d['dying'] != 1]
         for domid, dom in self.domains.items():
             if domid not in running_domids and domid != DOM0_ID:
-                self.remove_domain(dom, domid)
+                self._remove_domain(dom, domid)
 
 
     def add_domain(self, info):
@@ -433,6 +437,16 @@ class XendDomain:
             self._managed_domain_register(info)
 
     def remove_domain(self, info, domid = None):
+        """Remove the domain from the list of running domains, taking the
+        domains_lock first.
+        """
+        self.domains_lock.acquire()
+        try:
+            self._remove_domain(info, domid)
+        finally:
+            self.domains_lock.release()
+
+    def _remove_domain(self, info, domid = None):
         """Remove the domain from the list of running domains
         
         @requires: Expects to be protected by the domains_lock.
@@ -683,7 +697,7 @@ class XendDomain:
         self.domains_lock.acquire()
         try:
             try:
-                xeninfo = XendConfig(xapi = xenapi_vm)
+                xeninfo = XendConfig.XendConfig(xapi = xenapi_vm)
                 dominfo = XendDomainInfo.createDormant(xeninfo)
                 log.debug("Creating new managed domain: %s: %s" %
                           (dominfo.getName(), dominfo.get_uuid()))
@@ -906,7 +920,7 @@ class XendDomain:
         self.domains_lock.acquire()
         try:
             try:
-                domconfig = XendConfig(sxp_obj = config)
+                domconfig = XendConfig.XendConfig(sxp_obj = config)
                 dominfo = XendDomainInfo.createDormant(domconfig)
                 log.debug("Creating new managed domain: %s" %
                           dominfo.getName())
@@ -971,18 +985,34 @@ class XendDomain:
                     raise VMBadState("Domain is still running",
                                      POWER_STATE_NAMES[DOM_STATE_HALTED],
                                      POWER_STATE_NAMES[dominfo.state])
-
-                log.info("Domain %s (%s) deleted." %
-                         (dominfo.getName(), dominfo.info.get('uuid')))
-
-                self._managed_domain_unregister(dominfo)
-                self.remove_domain(dominfo)
-                XendDevices.destroy_device_state(dominfo)
+                
+                self._domain_delete_by_info(dominfo)
             except Exception, ex:
                 raise XendError(str(ex))
         finally:
             self.domains_lock.release()
-        
+
+
+    def domain_delete_by_dominfo(self, dominfo):
+        """Only for use by XendDomainInfo.
+        """
+        self.domains_lock.acquire()
+        try:
+            self._domain_delete_by_info(dominfo)
+        finally:
+            self.domains_lock.release()
+
+
+    def _domain_delete_by_info(self, dominfo):
+        """Expects to be protected by domains_lock.
+        """
+        log.info("Domain %s (%s) deleted." %
+                 (dominfo.getName(), dominfo.info.get('uuid')))
+                
+        self._managed_domain_unregister(dominfo)
+        self._remove_domain(dominfo)
+        XendDevices.destroy_device_state(dominfo)
+
 
     def domain_configure(self, config):
         """Configure an existing domain.
@@ -1230,7 +1260,9 @@ class XendDomain:
             try:
                 rc = xc.vcpu_setaffinity(dominfo.getDomid(), int(v), cpumap)
             except Exception, ex:
-                raise XendError(str(ex))
+                log.exception(ex)
+                raise XendError("Cannot pin vcpu: %s to cpu: %s - %s" % \
+                                (v, cpumap, str(ex)))
         return rc
 
     def domain_cpu_sedf_set(self, domid, period, slice_, latency, extratime,
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xend/XendDomainInfo.py   Fri Mar 23 10:11:58 2007 +0000
@@ -614,9 +614,9 @@ class XendDomainInfo:
             sxpr = ['domain',
                     ['domid',      self.domid],
                     ['name',       self.info['name_label']],
-                    ['vcpu_count', self.info['vcpus_number']]]
-
-            for i in range(0, self.info['vcpus_number']):
+                    ['vcpu_count', self.info['VCPUs_max']]]
+
+            for i in range(0, self.info['VCPUs_max']):
                 info = xc.vcpu_getinfo(self.domid, i)
 
                 sxpr.append(['vcpu',
@@ -660,7 +660,6 @@ class XendDomainInfo:
         vm_config = dict(zip(augment_entries, vm_config))
         
         for arg in augment_entries:
-            xapicfg = arg
             val = vm_config[arg]
             if val != None:
                 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
@@ -678,7 +677,7 @@ class XendDomainInfo:
         # settings to take precedence over any entries in the store.
         if priv:
             xeninfo = dom_get(self.domid)
-            self.info['vcpus_number'] = xeninfo['online_vcpus']
+            self.info['VCPUs_max'] = xeninfo['online_vcpus']
             self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
 
         # read image value
@@ -831,7 +830,7 @@ class XendDomainInfo:
                 return 'offline'
 
         result = {}
-        for v in range(0, self.info['vcpus_number']):
+        for v in range(0, self.info['VCPUs_max']):
             result["cpu/%d/availability" % v] = availability(v)
         return result
 
@@ -952,7 +951,7 @@ class XendDomainInfo:
         return self.info['features']
 
     def getVCpuCount(self):
-        return self.info['vcpus_number']
+        return self.info['VCPUs_max']
 
     def setVCpuCount(self, vcpus):
         if vcpus <= 0:
@@ -964,16 +963,16 @@ class XendDomainInfo:
             # update dom differently depending on whether we are adjusting
             # vcpu number up or down, otherwise _vcpuDomDetails does not
             # disable the vcpus
-            if self.info['vcpus_number'] > vcpus:
+            if self.info['VCPUs_max'] > vcpus:
                 # decreasing
                 self._writeDom(self._vcpuDomDetails())
-                self.info['vcpus_number'] = vcpus
+                self.info['VCPUs_live'] = vcpus
             else:
                 # same or increasing
-                self.info['vcpus_number'] = vcpus
+                self.info['VCPUs_live'] = vcpus
                 self._writeDom(self._vcpuDomDetails())
         else:
-            self.info['vcpus_number'] = vcpus
+            self.info['VCPUs_live'] = vcpus
             xen.xend.XendDomain.instance().managed_config_save(self)
         log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
                  vcpus)
@@ -1427,7 +1426,7 @@ class XendDomainInfo:
         self._recreateDom()
 
         # Set maximum number of vcpus in domain
-        xc.domain_max_vcpus(self.domid, int(self.info['vcpus_number']))
+        xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
 
         # register the domain in the list 
         from xen.xend import XendDomain
@@ -1464,7 +1463,7 @@ class XendDomainInfo:
             # this is done prior to memory allocation to aide in memory
             # distribution for NUMA systems.
             if self.info['cpus'] is not None and len(self.info['cpus']) > 0:
-                for v in range(0, self.info['vcpus_number']):
+                for v in range(0, self.info['VCPUs_max']):
                     xc.vcpu_setaffinity(self.domid, v, self.info['cpus'])
 
             # Use architecture- and image-specific calculations to determine
@@ -1651,6 +1650,12 @@ class XendDomainInfo:
 
         self._cleanup_phantom_devs(paths)
 
+        if "transient" in self.info["other_config"] \
+           and bool(self.info["other_config"]["transient"]):
+            from xen.xend import XendDomain
+            XendDomain.instance().domain_delete_by_dominfo(self)
+
+
     def destroyDomain(self):
         log.debug("XendDomainInfo.destroyDomain(%s)", str(self.domid))
 
@@ -1662,25 +1667,16 @@ class XendDomainInfo:
                 self.domid = None
                 for state in DOM_STATES_OLD:
                     self.info[state] = 0
+                self._stateSet(DOM_STATE_HALTED)
         except:
             log.exception("XendDomainInfo.destroy: xc.domain_destroy failed.")
 
         from xen.xend import XendDomain
-
-        if "transient" in self.info["other_config"]\
-           and bool(self.info["other_config"]["transient"]):
-            xendDomainInstance = XendDomain.instance()
-            
-            xendDomainInstance.domains_lock.acquire()
-            xendDomainInstance._refresh(refresh_shutdown = False)
-            xendDomainInstance.domains_lock.release()
-            
-            xendDomainInstance.domain_delete(self.info["name_label"])
-        else:
-            XendDomain.instance().remove_domain(self)
+        XendDomain.instance().remove_domain(self)
 
         self.cleanupDomain()
         self._cleanup_phantom_devs(paths)
+
 
     def resumeDomain(self):
         log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
@@ -1860,7 +1856,7 @@ class XendDomainInfo:
         if arch.type == "x86":
             # 1MB per vcpu plus 4Kib/Mib of RAM.  This is higher than 
             # the minimum that Xen would allocate if no value were given.
-            overhead_kb = self.info['vcpus_number'] * 1024 + \
+            overhead_kb = self.info['VCPUs_max'] * 1024 + \
                           (self.info['memory_static_max'] / 1024 / 1024) * 4
             overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
             # The domain might already have some shadow memory
@@ -2258,8 +2254,8 @@ class XendDomainInfo:
     def get_vcpus_util(self):
         vcpu_util = {}
         xennode = XendNode.instance()
-        if 'vcpus_number' in self.info and self.domid != None:
-            for i in range(0, self.info['vcpus_number']):
+        if 'VCPUs_max' in self.info and self.domid != None:
+            for i in range(0, self.info['VCPUs_max']):
                 util = xennode.get_vcpu_util(self.domid, i)
                 vcpu_util[str(i)] = util
                 
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xend/XendNode.py Fri Mar 23 10:11:58 2007 +0000
@@ -215,6 +215,10 @@ class XendNode:
         self.save_networks()
 
 
+    def get_PIF_refs(self):
+        return self.pifs.keys()
+
+
     def _PIF_create(self, name, mtu, vlan, mac, network, persist = True,
                     pif_uuid = None, metrics_uuid = None):
         for pif in self.pifs.values():
diff -r be1017157768 -r 2cecfa20ffa9 
tools/python/xen/xend/XendQCoWStorageRepo.py
--- a/tools/python/xen/xend/XendQCoWStorageRepo.py      Thu Mar 22 09:30:54 
2007 -0600
+++ b/tools/python/xen/xend/XendQCoWStorageRepo.py      Fri Mar 23 10:11:58 
2007 +0000
@@ -208,7 +208,8 @@ class XendQCoWStorageRepo(XendStorageRep
         self.lock.acquire()
         try:
             if not self._has_space_available_for(desired_size_bytes):
-                raise XendError("Not enough space")
+                raise XendError("Not enough space (need %d)" %
+                                desired_size_bytes)
 
             image_uuid = uuid.createString()
             qcow_path = os.path.join(self.location,
@@ -251,6 +252,7 @@ class XendQCoWStorageRepo(XendStorageRep
                 except OSError:
                     log.exception("Failed to destroy image")
                 del self.images[image_uuid]
+                self._refresh()
                 return True
         finally:
             self.lock.release()
@@ -317,15 +319,12 @@ class XendQCoWStorageRepo(XendStorageRep
     def create_vdi(self, vdi_struct):
         image_uuid = None
         try:
-            sector_count = int(vdi_struct.get('virtual_size', 0))
-            sector_size = int(vdi_struct.get('sector_size', 1024))
-            size_bytes = (sector_count * sector_size)
+            size_bytes = int(vdi_struct.get('virtual_size', 0))
 
             image_uuid = self._create_image_files(size_bytes)
             
             image = self.images[image_uuid]
             image_cfg = {
-                'sector_size': sector_size,
                 'virtual_size': size_bytes,
                 'type': vdi_struct.get('type', 'system'),
                 'name_label': vdi_struct.get('name_label', ''),
@@ -350,17 +349,3 @@ class XendQCoWStorageRepo(XendStorageRep
             raise
 
         return image_uuid
-
-
-# remove everything below this line!! for testing only
-if __name__ == "__main__":
-    xsr = XendStorageRepository()
-    print 'Free Space: %d MB' % (xsr.free_space_bytes()/MB)
-    print "Create Image:",
-    print xsr._create_image_files(10 * MB)
-    print 'Delete all images:'
-    for image_uuid in xsr.list_images():
-        print image_uuid,
-        xsr._destroy_image_files(image_uuid)
-
-    print
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xend/XendVMMetrics.py
--- a/tools/python/xen/xend/XendVMMetrics.py    Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xend/XendVMMetrics.py    Fri Mar 23 10:11:58 2007 +0000
@@ -13,9 +13,13 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 #============================================================================
 # Copyright (c) 2006-2007 Xensource Inc.
+# Copyright (c) 2007 Tom Wilkie
 #============================================================================
 
 from xen.xend.XendLogging import log
+import xen.lowlevel.xc
+
+xc = xen.lowlevel.xc.xc()
 
 instances = {}
 
@@ -46,25 +50,79 @@ class XendVMMetrics:
         return self.uuid
 
     def get_memory_actual(self):
-        return self.get_record()["memory_actual"]
+        domInfo = self.xend_domain_instance.getDomInfo()
+        if domInfo:
+            return domInfo["mem_kb"] * 1024
+        else:
+            return 0
 
-    def get_vcpus_number(self):
-        return self.get_record()["vcpus_number"]
+    def get_VCPUs_number(self):
+        domInfo = self.xend_domain_instance.getDomInfo()
+        if domInfo:
+            return domInfo["online_vcpus"]
+        else:
+            return 0
     
-    def get_vcpus_utilisation(self):
+    def get_VCPUs_utilisation(self):
         return self.xend_domain_instance.get_vcpus_util()
 
+    def get_VCPUs_CPU(self):
+        domid = self.xend_domain_instance.getDomid()
+        if domid is not None:
+            vcpus_cpu = {}
+            vcpus_max = self.xend_domain_instance.info['VCPUs_max']
+            for i in range(0, vcpus_max):
+                info = xc.vcpu_getinfo(domid, i)
+                vcpus_cpu[i] = info['cpu']
+            return vcpus_cpu
+        else:
+            return {}
+
+    def get_VCPUs_flags(self):
+        domid = self.xend_domain_instance.getDomid()
+        if domid is not None:
+            vcpus_flags = {}
+            vcpus_max = self.xend_domain_instance.info['VCPUs_max']
+            for i in range(0, vcpus_max):
+                info = xc.vcpu_getinfo(domid, i)
+                flags = []
+                def set_flag(flag):
+                    if info[flag] == 1:
+                        flags.append(flag)
+                set_flag('blocked')
+                set_flag('online')
+                set_flag('running')
+                vcpus_flags[i] = ",".join(flags)
+            return vcpus_flags
+        else:
+            return {}
+
+    def get_VCPUs_params(self):
+        domid = self.xend_domain_instance.getDomid()
+        if domid is not None:
+            params_live = {}
+            vcpus_max = self.xend_domain_instance.info['VCPUs_max']
+            for i in range(0, vcpus_max):
+                info = xc.vcpu_getinfo(domid, i)
+                params_live['cpumap%i' % i] = \
+                    ",".join(map(str, info['cpumap']))
+
+            params_live.update(xc.sched_credit_domain_get(domid))
+            
+            return params_live
+        else:
+            return {}
+
+    def get_start_time(self):
+        return self.xend_domain_instance.info.get("start_time", -1)
+    
     def get_record(self):
-        domInfo = self.xend_domain_instance.getDomInfo()
-        if domInfo:
-            return { 'uuid'              : self.uuid,
-                     'memory_actual'     : domInfo["mem_kb"] * 1024,
-                     'vcpus_number'      : domInfo["online_vcpus"],
-                     'vcpus_utilisation' : self.get_vcpus_utilisation()
-                   }
-        else:
-            return { 'uuid'              : self.uuid,
-                     'memory_actual'     : 0,
-                     'vcpus_number'      : 0,
-                     'vcpus_utilisation' : {}
-                   }
+        return { 'uuid'              : self.uuid,
+                 'memory_actual'     : self.get_memory_actual(),
+                 'VCPUs_number'      : self.get_VCPUs_number(),
+                 'VCPUs_utilisation' : self.get_VCPUs_utilisation(),
+                 'VCPUs_CPU'         : self.get_VCPUs_CPU(),
+                 'VCPUs_flags'       : self.get_VCPUs_flags(),
+                 'VCPUs_params'      : self.get_VCPUs_params(),
+                 'start_time'        : self.get_start_time(),
+               }
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xm/main.py       Fri Mar 23 10:11:58 2007 +0000
@@ -502,6 +502,13 @@ def get_default_Network():
     return [network_ref
             for network_ref in server.xenapi.network.get_all()][0]
 
+class XenAPIUnsupportedException(Exception):
+    pass
+
+def xenapi_unsupported():
+    if serverType == SERVER_XEN_API:
+        raise XenAPIUnsupportedException, "This function is not supported by 
Xen-API"
+
 def map2sxp(m):
     return [[k, m[k]] for k in m.keys()]
 
@@ -550,10 +557,13 @@ def err(msg):
 
 
 def get_single_vm(dom):
-    uuids = server.xenapi.VM.get_by_name_label(dom)
-    n = len(uuids)
-    if n == 1:
-        return uuids[0]
+    if serverType == SERVER_XEN_API:
+        uuids = server.xenapi.VM.get_by_name_label(dom)
+        n = len(uuids)
+        if n > 0:
+            return uuids[0]
+        else:
+            raise OptionError("Domain '%s' not found." % dom)
     else:
         dominfo = server.xend.domain(dom, False)
         return dominfo['uuid']
@@ -697,9 +707,10 @@ def getDomains(domain_names, state, full
             dom_metrics = server.xenapi.VM_metrics.get_record(dom_metrics_ref)
             dom_rec.update({'name':     dom_rec['name_label'],
                             'memory_actual': 
int(dom_metrics['memory_actual'])/1024,
-                            'vcpus':    dom_metrics['vcpus_number'],
+                            'vcpus':    dom_metrics['VCPUs_number'],
                             'state':    '-----',
-                            'cpu_time': dom_metrics['vcpus_utilisation']})
+                            'cpu_time': dom_metrics['VCPUs_utilisation'],
+                            'start_time': dom_metrics['start_time']})
                        
             doms_sxp.append(['domain'] + map2sxp(dom_rec))
             doms_dict.append(dom_rec)
@@ -885,11 +896,71 @@ def xm_label_list(doms):
 
 
 def xm_vcpu_list(args):
-    if args:
-        dominfo = map(server.xend.domain.getVCPUInfo, args)
-    else:
-        doms = server.xend.domains(False)
-        dominfo = map(server.xend.domain.getVCPUInfo, doms)
+    if serverType == SERVER_XEN_API:
+        if args:
+            vm_refs = map(get_single_vm, args)
+        else:
+            vm_refs = server.xenapi.VM.get_all()
+            
+        vm_records = dict(map(lambda vm_ref:
+                                  (vm_ref, server.xenapi.VM.get_record(
+                                      vm_ref)),
+                              vm_refs))
+
+        vm_metrics = dict(map(lambda (ref, record):
+                                  (ref,
+                                   server.xenapi.VM_metrics.get_record(
+                                       record['metrics'])),
+                              vm_records.items()))
+
+        dominfo = []
+
+        # vcpu_list doesn't list 'managed' domains
+        # when they are not running, so filter them out
+
+        vm_refs = [vm_ref
+                  for vm_ref in vm_refs
+                  if vm_records[vm_ref]["power_state"] != "Halted"]
+
+        for vm_ref in vm_refs:
+            info = ['domain',
+                    ['domid',      vm_records[vm_ref]['domid']],
+                    ['name',       vm_records[vm_ref]['name_label']],
+                    ['vcpu_count', vm_records[vm_ref]['VCPUs_max']]]
+
+            
+
+            for i in range(int(vm_records[vm_ref]['VCPUs_max'])):
+                def chk_flag(flag):
+                    return vm_metrics[vm_ref]['VCPUs_flags'][str(i)] \
+                           .find(flag) > -1 and 1 or 0
+                
+                vcpu_info = ['vcpu',
+                             ['number',
+                                  i],
+                             ['online',
+                                  chk_flag("online")],
+                             ['blocked',
+                                  chk_flag("blocked")],
+                             ['running',
+                                  chk_flag("running")],
+                             ['cpu_time',
+                                  
vm_metrics[vm_ref]['VCPUs_utilisation'][str(i)]],
+                             ['cpu',
+                                  vm_metrics[vm_ref]['VCPUs_CPU'][str(i)]],
+                             ['cpumap',
+                                  vm_metrics[vm_ref]['VCPUs_params']\
+                                  ['cpumap%i' % i].split(",")]]
+                
+                info.append(vcpu_info)
+
+            dominfo.append(info)
+    else:    
+        if args:
+            dominfo = map(server.xend.domain.getVCPUInfo, args)
+        else:
+            doms = server.xend.domains(False)
+            dominfo = map(server.xend.domain.getVCPUInfo, doms)
 
     print '%-32s %3s %5s %5s %5s %9s %s' % \
           ('Name', 'ID', 'VCPU', 'CPU', 'State', 'Time(s)', 'CPU Affinity')
@@ -947,16 +1018,20 @@ def xm_vcpu_list(args):
             cpumap = map(lambda x: int(x), cpumap)
             cpumap.sort()
 
-            for x in server.xend.node.info()[1:]:
-                if len(x) > 1 and x[0] == 'nr_cpus':
-                    nr_cpus = int(x[1])
-                    # normalize cpumap by modulus nr_cpus, and drop duplicates
-                    cpumap = dict.fromkeys(
-                                map(lambda x: x % nr_cpus, cpumap)).keys()
-                    if len(cpumap) == nr_cpus:
-                        return "any cpu"
-                    break
- 
+            if serverType == SERVER_XEN_API:
+                nr_cpus = len(server.xenapi.host.get_host_CPUs(
+                    server.xenapi.session.get_this_host()))
+            else:
+                for x in server.xend.node.info()[1:]:
+                    if len(x) > 1 and x[0] == 'nr_cpus':
+                        nr_cpus = int(x[1])
+
+            # normalize cpumap by modulus nr_cpus, and drop duplicates
+            cpumap = dict.fromkeys(
+                       map(lambda x: x % nr_cpus, cpumap)).keys()
+            if len(cpumap) == nr_cpus:
+                return "any cpu"
+
             return format_pairs(list_to_rangepairs(cpumap))
 
         name  =     get_info('name')
@@ -1154,13 +1229,17 @@ def xm_vcpu_pin(args):
         return cpus
 
     dom  = args[0]
-    vcpu = args[1]
+    vcpu = int(args[1])
     if args[2] == 'all':
         cpumap = cpu_make_map('0-63')
     else:
         cpumap = cpu_make_map(args[2])
-    
-    server.xend.domain.pincpu(dom, vcpu, cpumap)
+
+    if serverType == SERVER_XEN_API:
+        server.xenapi.VM.add_to_VCPUs_params_live(
+            get_single_vm(dom), "cpumap%i" % vcpu, ",".join(cpumap))
+    else:
+        server.xend.domain.pincpu(dom, vcpu, cpumap)
 
 def xm_mem_max(args):
     arg_check(args, "mem-max", 2)
@@ -1194,7 +1273,7 @@ def xm_vcpu_set(args):
     vcpus = int(args[1])
 
     if serverType == SERVER_XEN_API:
-        server.xenapi.VM.set_vcpus_live(get_single_vm(dom), vcpus)
+        server.xenapi.VM.set_VCPUs_number_live(get_single_vm(dom), vcpus)
     else:
         server.xend.domain.setVCpuCount(dom, vcpus)
 
@@ -1231,6 +1310,8 @@ def xm_domname(args):
         print sxp.child_value(dom, 'name')
 
 def xm_sched_sedf(args):
+    xenapi_unsupported()
+    
     def ns_to_ms(val):
         return float(val) * 0.000001
     
@@ -1355,10 +1436,21 @@ def xm_sched_credit(args):
         
         for d in doms:
             try:
-                info = server.xend.domain.sched_credit_get(d['domid'])
+                if serverType == SERVER_XEN_API:
+                    info = server.xenapi.VM_metrics.get_VCPUs_params(
+                        server.xenapi.VM.get_metrics(
+                            get_single_vm(d['name'])))
+                else:
+                    info = server.xend.domain.sched_credit_get(d['domid'])
             except xmlrpclib.Fault:
+                pass
+
+            if 'weight' not in info or 'cap' not in info:
                 # domain does not support sched-credit?
                 info = {'weight': -1, 'cap': -1}
+
+            info['weight'] = int(info['weight'])
+            info['cap']    = int(info['cap'])
             
             info['name']  = d['name']
             info['domid'] = int(d['domid'])
@@ -1368,10 +1460,20 @@ def xm_sched_credit(args):
             # place holder for system-wide scheduler parameters
             err("No domain given.")
             usage('sched-credit')
-        
-        result = server.xend.domain.sched_credit_set(domid, weight, cap)
-        if result != 0:
-            err(str(result))
+
+        if serverType == SERVER_XEN_API:
+            server.xenapi.VM.add_to_VCPUs_params_live(
+                get_single_vm(domid),
+                "weight",
+                weight)
+            server.xenapi.VM.add_to_VCPUs_params_live(
+                get_single_vm(domid),
+                "cap",
+                cap)            
+        else:
+            result = server.xend.domain.sched_credit_set(domid, weight, cap)
+            if result != 0:
+                err(str(result))
 
 def xm_info(args):
     arg_check(args, "info", 0)
@@ -1754,6 +1856,7 @@ def xm_block_list(args):
                    % ni)
 
 def xm_vtpm_list(args):
+    xenapi_unsupported()
     (use_long, params) = arg_check_for_resource_list(args, "vtpm-list")
 
     dom = params[0]
@@ -1883,7 +1986,10 @@ def xm_network_attach(args):
             record = vif_record
             for key in keys[:-1]:
                 record = record[key]
-            record[keys[-1]] = val 
+            record[keys[-1]] = val
+
+        def get_net_from_bridge(bridge):
+            raise "Not supported just yet"
          
         vif_conv = {
             'type':
@@ -1991,6 +2097,7 @@ def xm_network_detach(args):
 
 
 def xm_vnet_list(args):
+    xenapi_unsupported()
     try:
         (options, params) = getopt.gnu_getopt(args, 'l', ['long'])
     except getopt.GetoptError, opterr:
@@ -2019,6 +2126,7 @@ def xm_vnet_list(args):
             print vnet, ex
 
 def xm_vnet_create(args):
+    xenapi_unsupported()
     arg_check(args, "vnet-create", 1)
     conf = args[0]
     if not os.access(conf, os.R_OK):
@@ -2028,6 +2136,7 @@ def xm_vnet_create(args):
     server.xend_vnet_create(conf)
 
 def xm_vnet_delete(args):
+    xenapi_unsupported()
     arg_check(args, "vnet-delete", 1)
     vnet = args[0]
     server.xend_vnet_delete(vnet)
@@ -2044,7 +2153,7 @@ commands = {
     "domid": xm_domid,
     "domname": xm_domname,
     "dump-core": xm_dump_core,
-    "reboot": xm_reboot,    
+    "reboot": xm_reboot,
     "rename": xm_rename,
     "restore": xm_restore,
     "resume": xm_resume,
@@ -2228,6 +2337,8 @@ def _run_cmd(cmd, cmd_name, args):
         err(str(e))
         _usage(cmd_name)
         print e.usage
+    except XenAPIUnsupportedException, e:
+        err(str(e))
     except Exception, e:
         if serverType != SERVER_XEN_API:
            from xen.util import security
diff -r be1017157768 -r 2cecfa20ffa9 tools/python/xen/xm/xenapi_create.py
--- a/tools/python/xen/xm/xenapi_create.py      Thu Mar 22 09:30:54 2007 -0600
+++ b/tools/python/xen/xm/xenapi_create.py      Fri Mar 23 10:11:58 2007 +0000
@@ -242,9 +242,9 @@ class xenapi_create:
             "user_version":
                 get_text_in_child_node(vm, "version"),
             "is_a_template":
-                vm.attributes["is_a_template"].value,
+                vm.attributes["is_a_template"].value == 'true',
             "auto_power_on":
-                vm.attributes["auto_power_on"].value,
+                vm.attributes["auto_power_on"].value == 'true',
             "memory_static_max":
                 get_child_node_attribute(vm, "memory", "static_max"),
             "memory_static_min":
@@ -253,11 +253,11 @@ class xenapi_create:
                 get_child_node_attribute(vm, "memory", "dynamic_max"),
             "memory_dynamic_min":
                 get_child_node_attribute(vm, "memory", "dynamic_min"),
-            "vcpus_params":
+            "VCPUs_params":
                 get_child_nodes_as_dict(vm, "vcpu_param", "key", "value"),
-            "vcpus_max":
+            "VCPUs_max":
                 vm.attributes["vcpus_max"].value,
-            "vcpus_at_startup":
+            "VCPUs_at_startup":
                 vm.attributes["vcpus_at_startup"].value,
             "actions_after_shutdown":
                 vm.attributes["actions_after_shutdown"].value,
@@ -591,7 +591,6 @@ class sxp2xml:
     def extract_vdi(self, vbd_sxp, document):
         src = get_child_by_name(vbd_sxp, "uname")
         name = "vdi" + str(src.__hash__())
-        path = src[src.find(":")+1:]
 
         vdi = document.createElement("vdi")
 
@@ -599,8 +598,7 @@ class sxp2xml:
         vdi.attributes["read_only"] \
             = (get_child_by_name(vbd_sxp, "mode") != "w") \
                and "true" or "false"
-        vdi.attributes["size"] \
-            = str(os.path.getsize(path))
+        vdi.attributes["size"] = '-1'
         vdi.attributes["type"] = "system"
         vdi.attributes["shareable"] = "false"
         vdi.attributes["name"] = name

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>