[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xl: fix broken xl vcpu-list output



Hi,

# xl vcpu-list
hangs on my big box. The issue is an endless loop, where the algorithm
for printing the CPU affinity in a condensed way looks for a set bit in
a zero-byte:
             for (i = 0; !(pcpumap & 1); ++i, pcpumap >>= 1)
Looking at the code I found that it is entirely broken if more than 8
CPUs are used. Beside that endless loop issue the output is totally
bogus except for the "any CPU" case, which is handled explicitly earlier.
I tried to fix it, but the whole approach does not work if the outer
loops actually iterates (executing more than once).
I could not copy the Linux version of that algorithm due to licensing
incompatibilities and the Python version is not easily converted to C,
so I coded my own version from scratch. It is a bit verbose since it
iterates over bits instead of bytes, but more cleaner and survived some
unit-testing. I didn't spend much time in optimizing it, though.
I put it in a separate function as I plan to use it later for printing
cpupool affinity in a similar way (a post 4.1.0 patch living in one of my branches).

If you have a better implementation available, I can push it through my automated unit test easily.

Please review and apply to Xen 4.1.0-rc.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>

Regards,
Andre.

--
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany

commit 3a002468a2ff1575d32a09353e976eb0312bda77
Author: Andre Przywara <andre.przywara@xxxxxxx>
Date:   Fri Feb 4 14:57:05 2011 +0100

    xl: fix broken xl vcpu-list output with more than 8 CPUs
    
    where the algorithm for printing the CPU affinity in a condensed way
    looks for a set bit in a zero-byte:
                 for (i = 0; !(pcpumap & 1); ++i, pcpumap >>= 1)
    Looking at the code I found that it is entirely broken if more than 8
    CPUs are used. Beside that endless loop issue the output is totally
    bogus except for the "any CPU" case, which is handled explicitly earlier.
    I tried to fix it, but the whole approach does not work if the outer
    loops actually iterates (executing more than once).
    This fix reimplements the whole algorithm in a clean (though not much
    optimized way). It survived some unit-testing.
    
    Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>

diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 53e7941..fa9fef4 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -3372,13 +3372,60 @@ int main_button_press(int argc, char **argv)
     return 0;
 }
 
+static void print_bitmap(uint8_t *map, int maplen, FILE *stream)
+{
+    int i;
+    uint8_t pmap, bitmask;
+    int firstset, state = 0;
+
+    for (i = 0; i < maplen; i++) {
+        if (i % 8 == 0) {
+            pmap = *map++;
+            bitmask = 1;
+        } else bitmask <<= 1;
+
+        switch (state) {
+        case 0:
+        case 2:
+            if ((pmap & bitmask) != 0) {
+                firstset = i;
+                state++;
+            }
+            continue;
+        case 1:
+        case 3:
+            if ((pmap & bitmask) == 0) {
+                fprintf(stream, "%s%d", state > 1 ? "," : "", firstset);
+                if (i - 1 > firstset)
+                    fprintf(stream, "-%d", i - 1);
+                state = 2;
+            }
+            continue;
+        }
+    }
+    switch (state) {
+        case 0:
+            fprintf(stream, "none");
+            break;
+        case 2:
+            break;
+        case 1:
+            if (firstset == 0) {
+                fprintf(stream, "any cpu");
+                break;
+            }
+        case 3:
+            fprintf(stream, "%s%d", state > 1 ? "," : "", firstset);
+            if (i - 1 > firstset)
+                fprintf(stream, "-%d", i - 1);
+            break;
+    }
+}
+
 static void print_vcpuinfo(uint32_t tdomid,
                            const libxl_vcpuinfo *vcpuinfo,
                            uint32_t nr_cpus)
 {
-    int i, l;
-    uint8_t *cpumap;
-    uint8_t pcpumap;
     char *domname;
 
     /*      NAME  ID  VCPU */
@@ -3398,47 +3445,8 @@ static void print_vcpuinfo(uint32_t tdomid,
     /*      TIM */
     printf("%9.1f  ", ((float)vcpuinfo->vcpu_time / 1e9));
     /* CPU AFFINITY */
-    pcpumap = nr_cpus > 8 ? (uint8_t)-1 : ((1 << nr_cpus) - 1);
-    for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
-        if (*cpumap < pcpumap) {
-            break;
-        }
-        if (nr_cpus > 8) {
-            pcpumap = -1;
-            nr_cpus -= 8;
-        } else {
-            pcpumap = ((1 << nr_cpus) - 1);
-            nr_cpus = 0;
-        }
-    }
-    if (!nr_cpus) {
-        printf("any cpu\n");
-    } else {
-        for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
-            pcpumap = *cpumap;
-            for (i = 0; !(pcpumap & 1); ++i, pcpumap >>= 1)
-                ;
-            printf("%u", i);
-            for (l = i, pcpumap = (pcpumap >> 1); (pcpumap & 1); ++i, pcpumap 
>>= 1)
-                ;
-            if (l < i) {
-                printf("-%u", i);
-            }
-            for (++i; pcpumap; ++i, pcpumap >>= 1) {
-                if (pcpumap & 1) {
-                    printf(",%u", i);
-                    for (l = i, pcpumap = (pcpumap >> 1); (pcpumap & 1); ++i, 
pcpumap >>= 1)
-                        ;
-                    if (l < i) {
-                        printf("-%u", i);
-                    }
-                    ++i;
-                }
-            }
-            printf("\n");
-            nr_cpus = nr_cpus > 8 ? nr_cpus - 8 : 0;
-        }
-    }
+    print_bitmap(vcpuinfo->cpumap.map, nr_cpus, stdout);
+    printf("\n");
 }
 
 static void print_domain_vcpuinfo(uint32_t domid, uint32_t nr_cpus)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.