[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] tools: surface smp guest cpu allocation



The xen-unstable branch has support for smp guests.  The current method
for allocating virtual cpus to guests is:

1) export XEN_VCPUS=8 # or some other value 
2) restart xend with the above variable set.
3) update guest config file to use maxcpus= kernel parameter to limit
cpus discovered.

The attached patch adds an optional parameter (vcpus) to the
xc_linux_build function replacing the getenv() previously used removing
the requirement of using maxcpus kernel parameter to limit the number of
virtual cpus a guest uses.  The value can now be controlled in the
domain configuration files.

The default value of 1 is set in XenDomainInfo.py but is overridden by
parsing the config value.

I tested the above patch on the 2005-01-10 unstable tarball with 2.6.10
kernels (dom0 and dom1) rebuilt with CONFIG_SMP=y.

Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253   T/L: 678-9253
ryanh@xxxxxxxxxx

diffstat output:
 examples/xmexample1               |    3 +++
 examples/xmexample2               |    4 ++++
 libxc/xc.h                        |    3 ++-
 libxc/xc_linux_build.c            |   19 +++++++++----------
 python/xen/lowlevel/xc/xc.c       |   11 ++++++-----
 python/xen/xend/XendDomainInfo.py |   11 ++++++++++-
 python/xen/xm/create.py           |    7 +++++++
 7 files changed, 41 insertions(+), 17 deletions(-)

Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
---

diff -urN a/tools/examples/xmexample1 b/tools/examples/xmexample1
--- a/tools/examples/xmexample1 2005-01-09 22:39:27.000000000 -0600
+++ b/tools/examples/xmexample1 2005-01-11 16:28:22.000000000 -0600
@@ -25,6 +25,9 @@
 # Which CPU to start domain on? 
 #cpu = -1   # leave to Xen to pick
 
+# Number of Virtual CPUS to use, default is 1
+#vcpus = 1
+
 #----------------------------------------------------------------------------
 # Define network interfaces.
 
diff -urN a/tools/examples/xmexample2 b/tools/examples/xmexample2
--- a/tools/examples/xmexample2 2005-01-09 22:39:33.000000000 -0600
+++ b/tools/examples/xmexample2 2005-01-11 16:29:12.000000000 -0600
@@ -55,6 +55,10 @@
 #cpu = -1   # leave to Xen to pick
 cpu = vmid  # set based on vmid (mod number of CPUs)
 
+# Number of Virtual CPUS to use, default is 1
+#vcpus = 1
+vcpus = 4 # make your domain a 4-way
+
 #----------------------------------------------------------------------------
 # Define network interfaces.
 
diff -urN a/tools/libxc/xc.h b/tools/libxc/xc.h
--- a/tools/libxc/xc.h  2005-01-09 22:39:30.000000000 -0600
+++ b/tools/libxc/xc.h  2005-01-11 15:57:52.000000000 -0600
@@ -97,7 +97,8 @@
                    const char *ramdisk_name,
                    const char *cmdline,
                    unsigned int control_evtchn,
-                   unsigned long flags);
+                   unsigned long flags,
+                   unsigned int vcpus);
 
 int
 xc_plan9_build (int xc_handle,
diff -urN a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      2005-01-09 22:39:28.000000000 -0600
+++ b/tools/libxc/xc_linux_build.c      2005-01-11 15:59:36.000000000 -0600
@@ -97,7 +97,8 @@
                          const char *cmdline,
                          unsigned long shared_info_frame,
                          unsigned int control_evtchn,
-                         unsigned long flags)
+                         unsigned long flags,
+                         unsigned int vcpus)
 {
     l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
     l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
@@ -127,8 +128,6 @@
     unsigned long vpt_end;
     unsigned long v_end;
 
-    char *n_vcpus;
-
     memset(&dsi, 0, sizeof(struct domain_setup_info));
 
     rc = parseelfimage(image, image_size, &dsi);
@@ -337,11 +336,10 @@
     /* Mask all upcalls... */
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
         shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
-    n_vcpus = getenv("XEN_VCPUS");
-    if ( n_vcpus )
-       shared_info->n_vcpu = atoi(n_vcpus);
-    else
-       shared_info->n_vcpu = 1;
+
+    shared_info->n_vcpu = vcpus;
+    printf("VCPUS = %d\n",shared_info->n_vcpu);
+
     munmap(shared_info, PAGE_SIZE);
 
     /* Send the page update requests down to the hypervisor. */
@@ -433,7 +431,8 @@
                    const char *ramdisk_name,
                    const char *cmdline,
                    unsigned int control_evtchn,
-                   unsigned long flags)
+                   unsigned long flags,
+                   unsigned int vcpus)
 {
     dom0_op_t launch_op, op;
     int initrd_fd = -1;
@@ -498,7 +497,7 @@
                        &vstartinfo_start, &vkern_entry,
                        ctxt, cmdline,
                        op.u.getdomaininfo.shared_info_frame,
-                       control_evtchn, flags) < 0 )
+                       control_evtchn, flags, vcpus) < 0 )
     {
         ERROR("Error constructing guest OS");
         goto error_out;
diff -urN a/tools/python/xen/lowlevel/xc/xc.c 
b/tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c 2005-01-09 22:39:26.000000000 -0600
+++ b/tools/python/xen/lowlevel/xc/xc.c 2005-01-11 16:00:40.000000000 -0600
@@ -348,19 +348,19 @@
 
     u32   dom;
     char *image, *ramdisk = NULL, *cmdline = "";
-    int   control_evtchn, flags = 0;
+    int   control_evtchn, flags = 0, vcpus = 1;
 
     static char *kwd_list[] = { "dom", "control_evtchn", 
-                                "image", "ramdisk", "cmdline", "flags",
+                                "image", "ramdisk", "cmdline", "flags", 
"vcpus",
                                 NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|ssi", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|ssii", kwd_list, 
                                       &dom, &control_evtchn, 
-                                      &image, &ramdisk, &cmdline, &flags) )
+                                      &image, &ramdisk, &cmdline, &flags, 
&vcpus) )
         return NULL;
 
     if ( xc_linux_build(xc->xc_handle, dom, image,
-                        ramdisk, cmdline, control_evtchn, flags) != 0 )
+                        ramdisk, cmdline, control_evtchn, flags, vcpus) != 0 )
         return PyErr_SetFromErrno(xc_error);
     
     Py_INCREF(zero);
@@ -1023,6 +1023,7 @@
       " image   [str]:      Name of kernel image file. May be gzipped.\n"
       " ramdisk [str, n/a]: Name of ramdisk file, if any.\n"
       " cmdline [str, n/a]: Kernel parameters, if any.\n\n"
+      " vcpus   [int, 1]:   Number of Virtual CPUS in domain.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "vmx_build", 
diff -urN a/tools/python/xen/xend/XendDomainInfo.py 
b/tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   2005-01-09 22:39:26.000000000 
-0600
+++ b/tools/python/xen/xend/XendDomainInfo.py   2005-01-11 16:12:01.000000000 
-0600
@@ -321,6 +321,7 @@
         self.console_port = None
         self.savedinfo = None
         self.is_vmx = 0
+        self.vcpus = 1
 
     def setdom(self, dom):
         """Set the domain id.
@@ -448,6 +449,11 @@
             cpu = sxp.child_value(config, 'cpu')
             if self.recreate and self.dom and cpu is not None:
                 xc.domain_pincpu(self.dom, int(cpu))
+            try:
+                image = sxp.child_value(self.config, 'image')
+                self.vcpus = int(sxp.child_value(image, 'vcpus'))
+            except:
+                raise VmError('invalid vcpus value')
 
             self.init_domain()
             self.configure_console()
@@ -746,12 +752,14 @@
                        ramdisk        = ramdisk,
                        flags          = flags)
        else:
+               log.warning('building dom with %d vcpus', self.vcpus)
                err = buildfn(dom            = dom,
                                image          = kernel,
                        control_evtchn = self.console.getRemotePort(),
                        cmdline        = cmdline,
                        ramdisk        = ramdisk,
-                       flags          = flags)
+                       flags          = flags,
+                       vcpus          = self.vcpus)
         if err != 0:
             raise VmError('Building domain failed: type=%s dom=%d err=%d'
                           % (ostype, dom, err))
@@ -1280,6 +1288,7 @@
 add_config_handler('image',      vm_field_ignore)
 add_config_handler('device',     vm_field_ignore)
 add_config_handler('backend',    vm_field_ignore)
+add_config_handler('vcpus',      vm_field_ignore)
 
 # Register other config handlers.
 add_config_handler('maxmem',     vm_field_maxmem)
diff -urN a/tools/python/xen/xm/create.py b/tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py     2005-01-09 22:39:24.000000000 -0600
+++ b/tools/python/xen/xm/create.py     2005-01-11 16:13:28.000000000 -0600
@@ -109,6 +109,10 @@
           fn=set_int, default=None,
           use="CPU to run the domain on.")
 
+gopts.var('vcpus', val='VCPUS',
+          fn=set_int, default=1,
+          use="# of Virtual CPUS in domain.")
+
 gopts.var('cpu_weight', val='WEIGHT',
           fn=set_float, default=None,
           use="""Set the new domain's cpu weight.
@@ -245,7 +249,10 @@
         config_image.append(['root', cmdline_root])
     if vals.extra:
         config_image.append(['args', vals.extra])
+    if vals.vcpus:
+        config_image.append(['vcpus', vals.vcpus])
     config.append(['image', config_image ])
+
     
 def configure_disks(config_devs, vals):
     """Create the config for disks (virtual block devices).


-------------------------------------------------------
The SF.Net email is sponsored by: Beat the post-holiday blues
Get a FREE limited edition SourceForge.net t-shirt from ThinkGeek.
It's fun and FREE -- well, almost....http://www.thinkgeek.com/sfshirt
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.