WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] cpumask <=> xenctl_cpumap: allocate CPU m

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] cpumask <=> xenctl_cpumap: allocate CPU masks and byte maps dynamically
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Tue, 25 Oct 2011 01:55:15 +0100
Delivery-date: Mon, 24 Oct 2011 17:56:24 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1319183124 -7200
# Node ID a7ccbc79fc17862c21e34c2f145a5a053711f917
# Parent  1c8789852eafea3a40664283ef19096e09a4cc43
cpumask <=> xenctl_cpumap: allocate CPU masks and byte maps dynamically

Generally there was a NR_CPUS-bits wide array in these functions and
another (through a cpumask_t) on their callers' stacks, which may get
a little large for big NR_CPUS. As the functions can fail anyway, do
the allocation in there.

For the x86/MCA case this require a little code restructuring: By using
different CPU mask accessors it was possible to avoid allocating a mask
in the broadcast case. Also, this was the only user that failed to
check the return value of the conversion function (which could have led
to undefined behvior).

Also constify the input parameters of the two functions.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 1c8789852eaf -r a7ccbc79fc17 xen/arch/x86/cpu/mcheck/mce.c
--- a/xen/arch/x86/cpu/mcheck/mce.c     Fri Oct 21 09:44:47 2011 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce.c     Fri Oct 21 09:45:24 2011 +0200
@@ -1531,25 +1531,28 @@
 
     case XEN_MC_inject_v2:
     {
-        cpumask_t cpumap;
+        const cpumask_t *cpumap;
+        cpumask_var_t cmv;
 
         if (nr_mce_banks == 0)
             return x86_mcerr("do_mca #MC", -ENODEV);
 
         if ( op->u.mc_inject_v2.flags & XEN_MC_INJECT_CPU_BROADCAST )
-            cpumask_copy(&cpumap, &cpu_online_map);
+            cpumap = &cpu_online_map;
         else
         {
-            int gcw;
-
-            xenctl_cpumap_to_cpumask(&cpumap,
-                                     &op->u.mc_inject_v2.cpumap);
-            gcw = cpumask_weight(&cpumap);
-            cpumask_and(&cpumap, &cpu_online_map, &cpumap);
-
-            if ( cpumask_empty(&cpumap) )
-                return x86_mcerr("No online CPU passed\n", -EINVAL);
-            else if ( gcw != cpumask_weight(&cpumap) )
+            ret = xenctl_cpumap_to_cpumask(&cmv,
+                                           &op->u.mc_inject_v2.cpumap);
+            if ( ret )
+                break;
+            cpumap = cmv;
+            if ( !cpumask_intersects(cpumap, &cpu_online_map) )
+            {
+                free_cpumask_var(cmv);
+                ret = x86_mcerr("No online CPU passed\n", -EINVAL);
+                break;
+            }
+            if ( !cpumask_subset(cpumap, &cpu_online_map) )
                 dprintk(XENLOG_INFO,
                         "Not all required CPUs are online\n");
         }
@@ -1558,19 +1561,25 @@
         {
         case XEN_MC_INJECT_TYPE_MCE:
             if ( mce_broadcast &&
-                 !cpumask_equal(&cpumap, &cpu_online_map) )
+                 !cpumask_equal(cpumap, &cpu_online_map) )
                 printk("Not trigger MCE on all CPUs, may HANG!\n");
-            on_selected_cpus(&cpumap, x86_mc_mceinject, NULL, 1);
+            on_selected_cpus(cpumap, x86_mc_mceinject, NULL, 1);
             break;
         case XEN_MC_INJECT_TYPE_CMCI:
             if ( !cmci_support )
-                return x86_mcerr(
+                ret = x86_mcerr(
                     "No CMCI supported in platform\n", -EINVAL);
-            on_selected_cpus(&cpumap, x86_cmci_inject, NULL, 1);
+            else
+                on_selected_cpus(cpumap, x86_cmci_inject, NULL, 1);
             break;
         default:
-            return x86_mcerr("Wrong mca type\n", -EINVAL);
+            ret = x86_mcerr("Wrong mca type\n", -EINVAL);
+            break;
         }
+
+        if (cpumap != &cpu_online_map)
+            free_cpumask_var(cmv);
+
         break;
     }
 
diff -r 1c8789852eaf -r a7ccbc79fc17 xen/arch/x86/platform_hypercall.c
--- a/xen/arch/x86/platform_hypercall.c Fri Oct 21 09:44:47 2011 +0200
+++ b/xen/arch/x86/platform_hypercall.c Fri Oct 21 09:45:24 2011 +0200
@@ -346,7 +346,7 @@
         uint32_t cpu;
         uint64_t idletime, now = NOW();
         struct xenctl_cpumap ctlmap;
-        cpumask_t cpumap;
+        cpumask_var_t cpumap;
         XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
         XEN_GUEST_HANDLE(uint64) idletimes;
 
@@ -366,22 +366,26 @@
             goto out;
         guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
 
-        for_each_cpu_mask ( cpu, cpumap )
+        for_each_cpu_mask ( cpu, *cpumap )
         {
             if ( idle_vcpu[cpu] == NULL )
-                cpu_clear(cpu, cpumap);
+                cpumask_clear_cpu(cpu, cpumap);
             idletime = get_cpu_idle_time(cpu);
 
-            ret = -EFAULT;
             if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) )
-                goto out;
+            {
+                ret = -EFAULT;
+                break;
+            }
         }
 
         op->u.getidletime.now = now;
-        if ( (ret = cpumask_to_xenctl_cpumap(&ctlmap, &cpumap)) != 0 )
-            goto out;
+        if ( ret == 0 )
+            ret = cpumask_to_xenctl_cpumap(&ctlmap, cpumap);
+        free_cpumask_var(cpumap);
 
-        ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
+        if ( ret == 0 && copy_to_guest(u_xenpf_op, op, 1) )
+            ret = -EFAULT;
     }
     break;
 
diff -r 1c8789852eaf -r a7ccbc79fc17 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri Oct 21 09:44:47 2011 +0200
+++ b/xen/common/domctl.c       Fri Oct 21 09:45:24 2011 +0200
@@ -30,11 +30,15 @@
 static DEFINE_SPINLOCK(domctl_lock);
 
 int cpumask_to_xenctl_cpumap(
-    struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
+    struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
 {
     unsigned int guest_bytes, copy_bytes, i;
     uint8_t zero = 0;
-    uint8_t bytemap[(NR_CPUS + 7) / 8];
+    int err = 0;
+    uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
+
+    if ( !bytemap )
+        return -ENOMEM;
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
     copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
@@ -43,37 +47,48 @@
 
     if ( copy_bytes != 0 )
         if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
-            return -EFAULT;
+            err = -EFAULT;
 
-    for ( i = copy_bytes; i < guest_bytes; i++ )
+    for ( i = copy_bytes; !err && i < guest_bytes; i++ )
         if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
-            return -EFAULT;
+            err = -EFAULT;
 
-    return 0;
+    xfree(bytemap);
+
+    return err;
 }
 
 int xenctl_cpumap_to_cpumask(
-    cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
+    cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
 {
     unsigned int guest_bytes, copy_bytes;
-    uint8_t bytemap[(NR_CPUS + 7) / 8];
+    int err = 0;
+    uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
+
+    if ( !bytemap )
+        return -ENOMEM;
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
     copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
 
-    memset(bytemap, 0, sizeof(bytemap));
-
     if ( copy_bytes != 0 )
     {
         if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
-            return -EFAULT;
+            err = -EFAULT;
         if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
             bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
     }
 
-    bitmap_byte_to_long(cpumask_bits(cpumask), bytemap, nr_cpu_ids);
+    if ( err )
+        /* nothing */;
+    else if ( alloc_cpumask_var(cpumask) )
+        bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
+    else
+        err = -ENOMEM;
 
-    return 0;
+    xfree(bytemap);
+
+    return err;
 }
 
 static inline int is_free_domid(domid_t dom)
@@ -558,7 +573,6 @@
         domid_t dom = op->domain;
         struct domain *d = rcu_lock_domain_by_id(dom);
         struct vcpu *v;
-        cpumask_t new_affinity;
 
         ret = -ESRCH;
         if ( d == NULL )
@@ -578,10 +592,15 @@
 
         if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
         {
+            cpumask_var_t new_affinity;
+
             ret = xenctl_cpumap_to_cpumask(
                 &new_affinity, &op->u.vcpuaffinity.cpumap);
             if ( !ret )
-                ret = vcpu_set_affinity(v, &new_affinity);
+            {
+                ret = vcpu_set_affinity(v, new_affinity);
+                free_cpumask_var(new_affinity);
+            }
         }
         else
         {
diff -r 1c8789852eaf -r a7ccbc79fc17 xen/common/trace.c
--- a/xen/common/trace.c        Fri Oct 21 09:44:47 2011 +0200
+++ b/xen/common/trace.c        Fri Oct 21 09:45:24 2011 +0200
@@ -378,7 +378,16 @@
         tbc->size = t_info_pages * PAGE_SIZE;
         break;
     case XEN_SYSCTL_TBUFOP_set_cpu_mask:
-        rc = xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);
+    {
+        cpumask_var_t mask;
+
+        rc = xenctl_cpumap_to_cpumask(&mask, &tbc->cpu_mask);
+        if ( !rc )
+        {
+            cpumask_copy(&tb_cpu_mask, mask);
+            free_cpumask_var(mask);
+        }
+    }
         break;
     case XEN_SYSCTL_TBUFOP_set_evt_mask:
         tb_event_mask = tbc->evt_mask;
diff -r 1c8789852eaf -r a7ccbc79fc17 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Fri Oct 21 09:44:47 2011 +0200
+++ b/xen/include/xen/cpumask.h Fri Oct 21 09:45:24 2011 +0200
@@ -488,9 +488,7 @@
 
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;
-int cpumask_to_xenctl_cpumap(
-    struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
-int xenctl_cpumap_to_cpumask(
-    cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
+int cpumask_to_xenctl_cpumap(struct xenctl_cpumap *, const cpumask_t *);
+int xenctl_cpumap_to_cpumask(cpumask_var_t *, const struct xenctl_cpumap *);
 
 #endif /* __XEN_CPUMASK_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] cpumask <=> xenctl_cpumap: allocate CPU masks and byte maps dynamically, Xen patchbot-unstable <=