[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 ]libxl: allow to set more than 31 vcpus



In current implementation, it uses integer for record current avail cpus and 
this only allows user to specify 31 vcpus. 
In following patch, it uses cpumap instead interger which make more sense than 
before. Also there is no limit to the max vcpus.

Signed-off-by: Yang Zhang <yang.z.zhang@xxxxxxxxx>

diff -r c8a01e966ec8 tools/libxl/libxl_create.c
--- a/tools/libxl/libxl_create.c        Thu May 17 12:59:49 2012 +0800
+++ b/tools/libxl/libxl_create.c        Thu May 17 13:01:26 2012 +0800
@@ -142,8 +142,11 @@ int libxl__domain_build_info_setdefault(

     if (!b_info->max_vcpus)
         b_info->max_vcpus = 1;
-    if (!b_info->cur_vcpus)
-        b_info->cur_vcpus = 1;
+    if (!b_info->avail_vcpus.size) {
+        if (libxl_cpumap_alloc_size(CTX, &b_info->avail_vcpus, 1))
+            return ERROR_NOMEM;
+        libxl_cpumap_set(&b_info->avail_vcpus, 0);
+    }

     if (!b_info->cpumap.size) {
         if (libxl_cpumap_alloc(CTX, &b_info->cpumap))
diff -r c8a01e966ec8 tools/libxl/libxl_dm.c
--- a/tools/libxl/libxl_dm.c    Thu May 17 12:59:49 2012 +0800
+++ b/tools/libxl/libxl_dm.c    Thu May 17 13:01:26 2012 +0800
@@ -200,10 +200,19 @@ static char ** libxl__build_device_model
                               libxl__sprintf(gc, "%d", b_info->max_vcpus),
                               NULL);
         }
-        if (b_info->cur_vcpus) {
+        if (b_info->avail_vcpus.size) {
+            int nr_set_cpus = 0;
+            char *s;
+
+            nr_set_cpus = libxl_cpumap_count_set(
+                    &(((libxl_domain_build_info *)b_info)->avail_vcpus));
+
+            s = libxl_cpumap_to_hex_string(
+                    &(((libxl_domain_build_info *)b_info)->avail_vcpus));
             flexarray_vappend(dm_args, "-vcpu_avail",
-                              libxl__sprintf(gc, "0x%x", b_info->cur_vcpus),
+                              libxl__sprintf(gc, "0x%s", s),
                               NULL);
+            free(s);
         }
         for (i = 0; i < num_vifs; i++) {
             if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) {
@@ -441,11 +450,15 @@ static char ** libxl__build_device_model
         }
         if (b_info->max_vcpus > 1) {
             flexarray_append(dm_args, "-smp");
-            if (b_info->cur_vcpus)
+            if (b_info->avail_vcpus.size) {
+                int nr_set_cpus = 0;
+                nr_set_cpus = libxl_cpumap_count_set(
+                        &(((libxl_domain_build_info *)b_info)->avail_vcpus));
+
                 flexarray_append(dm_args, libxl__sprintf(gc, "%d,maxcpus=%d",
                                                          b_info->max_vcpus,
-                                                         b_info->cur_vcpus));
-            else
+                                                         nr_set_cpus));
+            } else
                 flexarray_append(dm_args, libxl__sprintf(gc, "%d",
                                                          b_info->max_vcpus));
         }
diff -r c8a01e966ec8 tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c   Thu May 17 12:59:49 2012 +0800
+++ b/tools/libxl/libxl_dom.c   Thu May 17 13:01:26 2012 +0800
@@ -195,7 +195,8 @@ int libxl__build_post(libxl__gc *gc, uin
     ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn);
     for (i = 0; i < info->max_vcpus; i++) {
         ents[12+(i*2)]   = libxl__sprintf(gc, "cpu/%d/availability", i);
-        ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << 
i)))
+        ents[12+(i*2)+1] = (i && info->avail_vcpus.size
+                            && !libxl_cpumap_test(&info->avail_vcpus, i))
                             ? "offline" : "online";
     }

@@ -346,7 +347,7 @@ static int hvm_build_set_params(xc_inter
     va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET);
     va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic);
     va_hvm->nr_vcpus = info->max_vcpus;
-    memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus));
+    memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size);
     for (i = 0, sum = 0; i < va_hvm->length; i++)
         sum += ((uint8_t *) va_hvm)[i];
     va_hvm->checksum -= sum;
diff -r c8a01e966ec8 tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl       Thu May 17 12:59:49 2012 +0800
+++ b/tools/libxl/libxl_types.idl       Thu May 17 13:01:26 2012 +0800
@@ -242,7 +242,7 @@ libxl_sched_params = Struct("sched_param
 # libxl_file_reference_unmap.
 libxl_domain_build_info = Struct("domain_build_info",[
     ("max_vcpus",       integer),
-    ("cur_vcpus",       integer),
+    ("avail_vcpus",     libxl_cpumap),
     ("cpumap",          libxl_cpumap),
     ("tsc_mode",        libxl_tsc_mode),
     ("max_memkb",       MemKB),
diff -r c8a01e966ec8 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c Thu May 17 12:59:49 2012 +0800
+++ b/tools/libxl/libxl_utils.c Thu May 17 13:01:26 2012 +0800
@@ -503,6 +503,19 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l
     return 0;
 }

+int libxl_cpumap_alloc_size(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus)
+{
+    int sz;
+
+    if (max_cpus == 0)
+        return ERROR_FAIL;
+
+    sz = (max_cpus + 7) / 8;
+    cpumap->map = libxl__zalloc(NULL, sz * sizeof(*cpumap->map));
+    cpumap->size = sz;
+    return 0;
+}
+
 void libxl_cpumap_dispose(libxl_cpumap *map)
 {
     free(map->map);
@@ -529,6 +542,29 @@ void libxl_cpumap_reset(libxl_cpumap *cp
     cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
 }

+int libxl_cpumap_count_set(libxl_cpumap *cpumap)
+{
+    int i, nr_set_cpus = 0;
+    libxl_for_each_set_cpu(i, *cpumap)
+        nr_set_cpus++;
+
+    return nr_set_cpus;
+}
+
+void *libxl_cpumap_to_hex_string(libxl_cpumap *cpumap)
+{
+    int i = cpumap->size;
+    uint8_t *p = libxl__zalloc(NULL, cpumap->size * 2 + 1);
+    uint8_t *q = p;
+    while(--i >= 0) {
+        fprintf(stderr,"i:%d, p :%p, map[%d]: %x\n", i, p, i, cpumap->map[i]);
+        sprintf((char *)p, "%02x", cpumap->map[i]);
+        p += 2;
+    }
+    *p = '\0';
+    return q;
+}
+
 int libxl_get_max_cpus(libxl_ctx *ctx)
 {
     return xc_get_max_cpus(ctx->xch);
diff -r c8a01e966ec8 tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h Thu May 17 12:59:49 2012 +0800
+++ b/tools/libxl/libxl_utils.h Thu May 17 13:01:26 2012 +0800
@@ -64,9 +64,12 @@ int libxl_vdev_to_device_disk(libxl_ctx
                                libxl_device_disk *disk);

 int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap);
+int libxl_cpumap_alloc_size(libxl_ctx *ctx, libxl_cpumap *cpumap, int 
max_cpus);
 int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
 void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
 void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
+int libxl_cpumap_count_set(libxl_cpumap *cpumap);
+void *libxl_cpumap_to_hex_string(libxl_cpumap *cpumap);
 static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap)
 {
     memset(cpumap->map, -1, cpumap->size);
diff -r c8a01e966ec8 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Thu May 17 12:59:49 2012 +0800
+++ b/tools/libxl/xl_cmdimpl.c  Thu May 17 13:01:26 2012 +0800
@@ -647,7 +647,14 @@ static void parse_config_data(const char

     if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) {
         b_info->max_vcpus = l;
-        b_info->cur_vcpus = (1 << l) - 1;
+
+        if (libxl_cpumap_alloc_size(ctx, &b_info->avail_vcpus, l)) {
+            fprintf(stderr, "Unable to allocate cpumap\n");
+            exit(1);
+        }
+        libxl_cpumap_set_none(&b_info->avail_vcpus);
+        while (l-- > 0)
+            libxl_cpumap_set((&b_info->avail_vcpus), l);
     }

     if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.