WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] fix misc issues related to allowing support of more

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] fix misc issues related to allowing support of more CPUs
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Fri, 19 Sep 2008 14:01:59 +0100
Delivery-date: Fri, 19 Sep 2008 06:02:39 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This mainly means removing stack variables that (should) depend on
NR_CPUS (other than cpumask_t ones) and adjusting certain array sizes.

Includes a small improvement (I think) to last_cpu(), and a necessary
conversion from BUG_ON() to WARN_ON() in vsnprintf().

There's at least one open tools issue: The 'xm vcpu-pin' path assumes
a maximum of 64 CPU-s in many places.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2008-09-19/xen/arch/x86/nmi.c
===================================================================
--- 2008-09-19.orig/xen/arch/x86/nmi.c  2008-09-19 13:56:32.000000000 +0200
+++ 2008-09-19/xen/arch/x86/nmi.c       2008-09-19 13:59:19.000000000 +0200
@@ -96,7 +96,7 @@ int nmi_active;
 
 int __init check_nmi_watchdog (void)
 {
-    unsigned int prev_nmi_count[NR_CPUS];
+    static unsigned int __initdata prev_nmi_count[NR_CPUS];
     int cpu;
     
     if ( !nmi_watchdog )
Index: 2008-09-19/xen/arch/x86/smpboot.c
===================================================================
--- 2008-09-19.orig/xen/arch/x86/smpboot.c      2008-09-19 13:59:11.000000000 
+0200
+++ 2008-09-19/xen/arch/x86/smpboot.c   2008-09-19 13:59:19.000000000 +0200
@@ -1121,7 +1121,7 @@ static void __init smp_boot_cpus(unsigne
        Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
 
        kicked = 1;
-       for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
+       for (bit = 0; kicked < NR_CPUS && bit < NR_CPUS; bit++) {
                apicid = cpu_present_to_apicid(bit);
                /*
                 * Don't even attempt to start the boot CPU!
Index: 2008-09-19/xen/arch/x86/x86_32/domain_page.c
===================================================================
--- 2008-09-19.orig/xen/arch/x86/x86_32/domain_page.c   2008-09-19 
13:56:32.000000000 +0200
+++ 2008-09-19/xen/arch/x86/x86_32/domain_page.c        2008-09-19 
13:59:19.000000000 +0200
@@ -201,6 +201,9 @@ void *map_domain_page_global(unsigned lo
 
     ASSERT(!in_irq() && local_irq_is_enabled());
 
+    /* At least half the ioremap space should be available to us. */
+    BUILD_BUG_ON(IOREMAP_VIRT_START + (IOREMAP_MBYTES << 19) >= FIXADDR_START);
+
     spin_lock(&globalmap_lock);
 
     idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
Index: 2008-09-19/xen/common/domctl.c
===================================================================
--- 2008-09-19.orig/xen/common/domctl.c 2008-09-19 13:56:32.000000000 +0200
+++ 2008-09-19/xen/common/domctl.c      2008-09-19 13:59:19.000000000 +0200
@@ -145,16 +145,23 @@ static unsigned int default_vcpu0_locati
 {
     struct domain *d;
     struct vcpu   *v;
-    unsigned int   i, cpu, cnt[NR_CPUS] = { 0 };
+    unsigned int   i, cpu, nr_cpus, *cnt;
     cpumask_t      cpu_exclude_map;
 
     /* Do an initial CPU placement. Pick the least-populated CPU. */
-    rcu_read_lock(&domlist_read_lock);
-    for_each_domain ( d )
-        for_each_vcpu ( d, v )
-        if ( !test_bit(_VPF_down, &v->pause_flags) )
-            cnt[v->processor]++;
-    rcu_read_unlock(&domlist_read_lock);
+    nr_cpus = last_cpu(cpu_possible_map) + 1;
+    cnt = xmalloc_array(unsigned int, nr_cpus);
+    if ( cnt )
+    {
+        memset(cnt, 0, nr_cpus * sizeof(*cnt));
+
+        rcu_read_lock(&domlist_read_lock);
+        for_each_domain ( d )
+            for_each_vcpu ( d, v )
+                if ( !test_bit(_VPF_down, &v->pause_flags) )
+                    cnt[v->processor]++;
+        rcu_read_unlock(&domlist_read_lock);
+    }
 
     /*
      * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
@@ -172,10 +179,12 @@ static unsigned int default_vcpu0_locati
              (cpus_weight(cpu_sibling_map[i]) > 1) )
             continue;
         cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
-        if ( cnt[i] <= cnt[cpu] )
+        if ( !cnt || cnt[i] <= cnt[cpu] )
             cpu = i;
     }
 
+    xfree(cnt);
+
     return cpu;
 }
 
Index: 2008-09-19/xen/common/keyhandler.c
===================================================================
--- 2008-09-19.orig/xen/common/keyhandler.c     2008-09-19 13:56:32.000000000 
+0200
+++ 2008-09-19/xen/common/keyhandler.c  2008-09-19 13:59:19.000000000 +0200
@@ -172,7 +172,8 @@ static void dump_domains(unsigned char k
     struct domain *d;
     struct vcpu   *v;
     s_time_t       now = NOW();
-    char           tmpstr[100];
+    static char    tmpstr[4 * NR_CPUS];
+    BUILD_BUG_ON(NR_CPUS > 10000);
 
     printk("'%c' pressed -> dumping domain info (now=0x%X:%08X)\n", key,
            (u32)(now>>32), (u32)now);
Index: 2008-09-19/xen/common/sched_credit.c
===================================================================
--- 2008-09-19.orig/xen/common/sched_credit.c   2008-09-19 13:56:32.000000000 
+0200
+++ 2008-09-19/xen/common/sched_credit.c        2008-09-19 13:59:19.000000000 
+0200
@@ -1258,14 +1258,17 @@ csched_dump_pcpu(int cpu)
     struct csched_pcpu *spc;
     struct csched_vcpu *svc;
     int loop;
+    static char sibling_buf[(NR_CPUS / 32 + 1) * 9];
+    static char core_buf[(NR_CPUS / 32 + 1) * 9];
 
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
-    printk(" sort=%d, sibling=0x%lx, core=0x%lx\n",
-            spc->runq_sort_last,
-            cpu_sibling_map[cpu].bits[0],
-            cpu_core_map[cpu].bits[0]);
+    cpumask_scnprintf(sibling_buf, ARRAY_SIZE(sibling_buf),
+                      cpu_sibling_map[cpu]);
+    cpumask_scnprintf(core_buf, ARRAY_SIZE(core_buf), cpu_core_map[cpu]);
+    printk(" sort=%d, sibling=%s, core=%s\n",
+           spc->runq_sort_last, sibling_buf, core_buf);
 
     /* current VCPU */
     svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
@@ -1292,6 +1295,7 @@ csched_dump(void)
 {
     struct list_head *iter_sdom, *iter_svc;
     int loop;
+    static char idlers_buf[(NR_CPUS / 32 + 1) * 9];
 
     printk("info:\n"
            "\tncpus              = %u\n"
@@ -1317,7 +1321,8 @@ csched_dump(void)
            CSCHED_TICKS_PER_TSLICE,
            CSCHED_TICKS_PER_ACCT);
 
-    printk("idlers: 0x%lx\n", csched_priv.idlers.bits[0]);
+    cpumask_scnprintf(idlers_buf, ARRAY_SIZE(idlers_buf), csched_priv.idlers);
+    printk("idlers: %s\n", idlers_buf);
 
     CSCHED_STATS_PRINTK();
 
Index: 2008-09-19/xen/common/sched_sedf.c
===================================================================
--- 2008-09-19.orig/xen/common/sched_sedf.c     2008-09-19 13:56:32.000000000 
+0200
+++ 2008-09-19/xen/common/sched_sedf.c  2008-09-19 13:59:19.000000000 +0200
@@ -1298,8 +1298,18 @@ static int sedf_adjust_weights(struct xe
 {
     struct vcpu *p;
     struct domain      *d;
-    int                 sumw[NR_CPUS] = { 0 };
-    s_time_t            sumt[NR_CPUS] = { 0 };
+    unsigned int        nr_cpus = last_cpu(cpu_possible_map) + 1;
+    int                *sumw = xmalloc_array(int, nr_cpus);
+    s_time_t           *sumt = xmalloc_array(s_time_t, nr_cpus);
+
+    if ( !sumw || !sumt )
+    {
+        xfree(sumt);
+        xfree(sumw);
+        return -ENOMEM;
+    }
+    memset(sumw, 0, nr_cpus * sizeof(*sumw));
+    memset(sumt, 0, nr_cpus * sizeof(*sumt));
 
     /* Sum across all weights. */
     rcu_read_lock(&domlist_read_lock);
@@ -1348,6 +1358,9 @@ static int sedf_adjust_weights(struct xe
     }
     rcu_read_unlock(&domlist_read_lock);
 
+    xfree(sumt);
+    xfree(sumw);
+
     return 0;
 }
 
@@ -1356,6 +1369,7 @@ static int sedf_adjust_weights(struct xe
 static int sedf_adjust(struct domain *p, struct xen_domctl_scheduler_op *op)
 {
     struct vcpu *v;
+    int rc;
 
     PRINT(2,"sedf_adjust was called, domain-id %i new period %"PRIu64" "
           "new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
@@ -1411,8 +1425,9 @@ static int sedf_adjust(struct domain *p,
             }
         }
 
-        if ( sedf_adjust_weights(op) )
-            return -EINVAL;
+        rc = sedf_adjust_weights(op);
+        if ( rc )
+            return rc;
 
         for_each_vcpu ( p, v )
         {
Index: 2008-09-19/xen/common/vsprintf.c
===================================================================
--- 2008-09-19.orig/xen/common/vsprintf.c       2008-09-19 13:56:32.000000000 
+0200
+++ 2008-09-19/xen/common/vsprintf.c    2008-09-19 13:59:19.000000000 +0200
@@ -272,7 +272,11 @@ int vsnprintf(char *buf, size_t size, co
                                 /* 'z' changed to 'Z' --davidm 1/25/99 */
 
     /* Reject out-of-range values early */
-    BUG_ON((int)size < 0);
+    if ((int)size < 0 || (unsigned int)size != size) {
+        static int warn = 1;
+        WARN_ON(warn);
+        return warn = 0;
+    }
 
     str = buf;
     end = buf + size - 1;
Index: 2008-09-19/xen/include/xen/cpumask.h
===================================================================
--- 2008-09-19.orig/xen/include/xen/cpumask.h   2008-09-19 13:56:32.000000000 
+0200
+++ 2008-09-19/xen/include/xen/cpumask.h        2008-09-19 13:59:19.000000000 
+0200
@@ -225,8 +225,10 @@ static inline int __next_cpu(int n, cons
 #define last_cpu(src) __last_cpu(&(src), NR_CPUS)
 static inline int __last_cpu(const cpumask_t *srcp, int nbits)
 {
-       int cpu, pcpu = NR_CPUS;
-       for (cpu = first_cpu(*srcp); cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp))
+       int cpu, pcpu = cpus_weight(*srcp) - 1;
+       if (pcpu < 0)
+               pcpu = NR_CPUS;
+       for (cpu = pcpu; cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp))
                pcpu = cpu;
        return pcpu;
 }



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel