[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v10 08/11] xl: move away from the (deprecated) use of cpumap for hard affinity



and start using the vcpu_hard_affinity array instead. This comes
with a few bonuses:

 - allows us to unify the parsing of the two was VCPU affinity
   is specified in the domain config file (i.e., cpus="1,3,10-15"
   and cpus=[2, 4, 8]);

 - unifying the parsing makes it possible to do things like this:

      cpus = ["3-4", "2-6"]

   which it was not before. What it means is that VCPU 0 must be
   pinned to PCPU 3,4 and VCPU 1 to PCPUs 2,3,4,5,6. Before this
   change, in fact, the list variant (cpus=[xx, yy]) only supported
   only single values. (Of course, the old [2, 3] syntax continues
   to work, although, without the '"' quotes, it is not possible
   to specify ranges.)

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Changes from v9:
 * new patch, basically containing the xl bits of what was the
   cpumap deprecation patch in v9.
---
 docs/man/xl.cfg.pod.5    |    8 ++++----
 tools/libxl/xl_cmdimpl.c |   47 ++++++++++++++++++++++++----------------------
 2 files changed, 29 insertions(+), 26 deletions(-)

diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5
index c087cbc..af48622 100644
--- a/docs/man/xl.cfg.pod.5
+++ b/docs/man/xl.cfg.pod.5
@@ -143,11 +143,11 @@ Combining this with "all" is also possible, meaning 
"all,^nodes:1"
 results in all the vcpus of the guest running on all the cpus on the
 host, except for the cpus belonging to the host NUMA node 1.
 
-=item ["2", "3"] (or [2, 3])
+=item ["2", "3-8,^5"]
 
-To ask for specific vcpu mapping. That means (in this example), vcpu #0
-of the guest will run on cpu #2 of the host and vcpu #1 of the guest will
-run on cpu #3 of the host.
+To ask for specific vcpu mapping. That means (in this example), vcpu 0
+of the guest will run on cpu 2 of the host and vcpu 1 of the guest will
+run on cpus 3,4,6,7,8 of the host.
 
 =back
 
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index f2f5fb2..06478a8 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -656,14 +656,16 @@ static int update_cpumap_range(const char *str, 
libxl_bitmap *cpumap)
 static int vcpupin_parse(const char *cpu, libxl_bitmap *cpumap)
 {
     char *ptr, *saveptr = NULL;
+    char *buf = strdup(cpu);
     int rc = 0;
 
-    for (ptr = strtok_r(cpu, ",", &saveptr); ptr;
+    for (ptr = strtok_r(buf, ",", &saveptr); ptr;
          ptr = strtok_r(NULL, ",", &saveptr)) {
         rc = update_cpumap_range(ptr, cpumap);
         if (rc)
             break;
     }
+    free(buf);
 
     return rc;
 }
@@ -797,17 +799,31 @@ static void parse_config_data(const char *config_source,
     if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))
         b_info->max_vcpus = l;
 
-    if (!xlu_cfg_get_list (config, "cpus", &cpus, &num_cpus, 1)) {
+    buf = NULL; num_cpus = 0;
+    if (!xlu_cfg_get_list (config, "cpus", &cpus, &num_cpus, 1) ||
+        !xlu_cfg_get_string (config, "cpus", &buf, 0)) {
+        const char *buf2 = NULL; //XXX Trick the compiler!!!
         int j = 0;
 
+        /*
+         * If we are here, and buf is !NULL, we're dealing with a string. What
+         * we do in this case is parse it, and put the result in _all_ (up to
+         * b_info->max_vcpus) the elements of the vcpu affinity array.
+         *
+         * If buf is NULL, we have a list, and what we do is putting in the
+         * i-eth element of the vcpu affinity array the result of the parsing
+         * of the i-eth entry of the list. If there are more vcpus than
+         * entries, it is fine to just not touch the last array elements.
+         */
+
         /* Silently ignore values corresponding to non existing vcpus */
-        if (num_cpus > b_info->max_vcpus)
+        if (num_cpus > b_info->max_vcpus || buf)
             num_cpus = b_info->max_vcpus;
 
         b_info->vcpu_hard_affinity = xmalloc(num_cpus * sizeof(libxl_bitmap));
 
-        while ((buf = xlu_cfg_get_listitem(cpus, j)) != NULL && j < num_cpus) {
-            i = atoi(buf);
+        while ((buf || (buf2 = xlu_cfg_get_listitem(cpus, j)) != NULL) &&
+               j < num_cpus) {
 
             libxl_bitmap_init(&b_info->vcpu_hard_affinity[j]);
             if (libxl_cpu_bitmap_alloc(ctx,
@@ -815,8 +831,10 @@ static void parse_config_data(const char *config_source,
                 fprintf(stderr, "Unable to allocate cpumap for vcpu %d\n", j);
                 exit(1);
             }
-            libxl_bitmap_set_none(&b_info->vcpu_hard_affinity[j]);
-            libxl_bitmap_set(&b_info->vcpu_hard_affinity[j], i);
+
+            if (vcpupin_parse(buf ? buf : buf2,
+                              &b_info->vcpu_hard_affinity[j]))
+                exit(1);
 
             j++;
         }
@@ -825,21 +843,6 @@ static void parse_config_data(const char *config_source,
         /* We have a list of cpumaps, disable automatic placement */
         libxl_defbool_set(&b_info->numa_placement, false);
     }
-    else if (!xlu_cfg_get_string (config, "cpus", &buf, 0)) {
-        char *buf2 = strdup(buf);
-
-        if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) {
-            fprintf(stderr, "Unable to allocate cpumap\n");
-            exit(1);
-        }
-
-        libxl_bitmap_set_none(&b_info->cpumap);
-        if (vcpupin_parse(buf2, &b_info->cpumap))
-            exit(1);
-        free(buf2);
-
-        libxl_defbool_set(&b_info->numa_placement, false);
-    }
 
     if (!xlu_cfg_get_long (config, "memory", &l, 0)) {
         b_info->max_memkb = l * 1024;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.