[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 05/12] allocate CPU sibling and core maps dynamically



... thus reducing the per-CPU data area size back to one page even when
building for large NR_CPUS.

At once eliminate the old __cpu{mask,list}_scnprintf() helpers.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- 2011-10-18.orig/xen/arch/ia64/linux-xen/setup.c     2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/linux-xen/setup.c  2011-10-14 15:53:37.000000000 
+0200
@@ -577,8 +577,12 @@ late_setup_arch (char **cmdline_p)
 
        cpu_physical_id(0) = hard_smp_processor_id();
 
-       cpu_set(0, per_cpu(cpu_sibling_map, 0));
-       cpu_set(0, per_cpu(cpu_core_map, 0));
+       if (!zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
+            !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)))
+               panic("No memory for boot CPU sibling/core maps\n");
+
+       cpumask_set_cpu(0, per_cpu(cpu_sibling_mask, 0));
+       cpumask_set_cpu(0, per_cpu(cpu_core_mask, 0));
 
        check_for_logical_procs();
        if (smp_num_cpucores > 1)
--- 2011-10-18.orig/xen/arch/ia64/linux-xen/smpboot.c   2011-10-11 
17:00:51.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/linux-xen/smpboot.c        2011-10-14 
16:00:49.000000000 +0200
@@ -144,8 +144,8 @@ EXPORT_SYMBOL(cpu_online_map);
 cpumask_t cpu_possible_map;
 EXPORT_SYMBOL(cpu_possible_map);
 
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_core_map);
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_sibling_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask);
 int smp_num_siblings = 1;
 int smp_num_cpucores = 1;
 
@@ -687,13 +687,13 @@ clear_cpu_sibling_map(int cpu)
 {
        int i;
 
-       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
-               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
-       for_each_cpu_mask(i, per_cpu(cpu_core_map, cpu))
-               cpumask_clear_cpu(cpu, &per_cpu(cpu_core_map, i));
+       for_each_cpu_mask(i, *per_cpu(cpu_sibling_mask, cpu))
+               cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, i));
+       for_each_cpu_mask(i, *per_cpu(cpu_core_mask, cpu))
+               cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, i));
 
-       cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
-       cpumask_clear(&per_cpu(cpu_core_map, cpu));
+       cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
+       cpumask_clear(per_cpu(cpu_core_mask, cpu));
 }
 
 static void
@@ -703,12 +703,12 @@ remove_siblinginfo(int cpu)
 
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, cpu));
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
+               cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, cpu));
+               cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
                return;
        }
 
-       last = (cpus_weight(per_cpu(cpu_core_map, cpu)) == 1);
+       last = (cpumask_weight(per_cpu(cpu_core_mask, cpu)) == 1);
 
        /* remove it from all sibling map's */
        clear_cpu_sibling_map(cpu);
@@ -794,11 +794,11 @@ set_cpu_sibling_map(int cpu)
 
        for_each_online_cpu(i) {
                if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
+                       cpumask_set_cpu(i, per_cpu(cpu_core_mask, cpu));
+                       cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, i));
                        if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
+                               cpumask_set_cpu(i, per_cpu(cpu_sibling_mask, 
cpu));
+                               cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, 
i));
                        }
                }
        }
@@ -821,6 +821,14 @@ __cpu_up (unsigned int cpu)
        if (cpu_isset(cpu, cpu_callin_map))
                return -EINVAL;
 
+       if (!per_cpu(cpu_sibling_mask, cpu) &&
+            !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)))
+               return -ENOMEM;
+
+       if (!per_cpu(cpu_core_mask, cpu) &&
+            !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)))
+               return -ENOMEM;
+
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
        /* Processor goes to start_secondary(), sets online flag */
        ret = do_boot_cpu(sapicid, cpu);
@@ -829,8 +837,8 @@ __cpu_up (unsigned int cpu)
 
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-               cpu_set(cpu, per_cpu(cpu_core_map, cpu));
+               cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
+               cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu));
                return 0;
        }
 
--- 2011-10-18.orig/xen/arch/ia64/xen/dom0_ops.c        2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/xen/dom0_ops.c     2011-10-14 15:49:47.000000000 
+0200
@@ -594,9 +594,9 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
         xen_sysctl_physinfo_t *pi = &op->u.physinfo;
 
         memset(pi, 0, sizeof(*pi));
-        pi->threads_per_core = cpus_weight(per_cpu(cpu_sibling_map, 0));
+        pi->threads_per_core = cpumask_weight(per_cpu(cpu_sibling_mask, 0));
         pi->cores_per_socket =
-            cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
+            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
         pi->nr_nodes         = (u32)num_online_nodes();
         pi->nr_cpus          = (u32)num_online_cpus();
         pi->total_pages      = total_pages; 
--- 2011-10-18.orig/xen/arch/ia64/xen/tlb_track.c       2011-10-11 
16:58:01.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/xen/tlb_track.c    2011-10-14 16:04:14.000000000 
+0200
@@ -504,7 +504,7 @@ __tlb_track_entry_printf(const char* fun
     char pcpumask_buf[NR_CPUS + 1];
     char vcpumask_buf[MAX_VIRT_CPUS + 1];
     cpumask_scnprintf(pcpumask_buf, sizeof(pcpumask_buf),
-                      entry->pcpu_dirty_mask);
+                      &entry->pcpu_dirty_mask);
     vcpumask_scnprintf(vcpumask_buf, sizeof(vcpumask_buf),
                        entry->vcpu_dirty_mask);
     printk("%s:%d\n"
--- 2011-10-18.orig/xen/arch/x86/cpu/mcheck/mce_intel.c 2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/x86/cpu/mcheck/mce_intel.c      2011-10-18 
13:41:52.000000000 +0200
@@ -867,7 +867,7 @@ static void intel_machine_check(struct c
         {
             char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
             ebufp = ebuf + strlen(ebuf);
-            cpumask_scnprintf(ebufp, 95 - strlen(ebuf), mce_fatal_cpus);
+            cpumask_scnprintf(ebufp, 95 - strlen(ebuf), &mce_fatal_cpus);
             mc_panic(ebuf);
         }
         atomic_set(&found_error, 0);
--- 2011-10-18.orig/xen/arch/x86/irq.c  2011-10-18 13:40:56.000000000 +0200
+++ 2011-10-18/xen/arch/x86/irq.c       2011-10-18 13:41:41.000000000 +0200
@@ -1998,7 +1998,7 @@ static void dump_irqs(unsigned char key)
         spin_lock_irqsave(&desc->lock, flags);
 
         cpumask_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
-                          desc->affinity);
+                          &desc->affinity);
         printk("   IRQ:%4d affinity:%s vec:%02x type=%-15s"
                " status=%08x ",
                irq, keyhandler_scratch, desc->arch.vector,
--- 2011-10-18.orig/xen/arch/x86/mm.c   2011-10-20 14:47:26.000000000 +0200
+++ 2011-10-18/xen/arch/x86/mm.c        2011-10-20 14:47:37.000000000 +0200
@@ -3201,7 +3201,7 @@ int do_mmuext_op(
                 cpumask_clear(&mask);
                 for_each_online_cpu(cpu)
                     if ( !cpumask_intersects(&mask,
-                                             &per_cpu(cpu_sibling_map, cpu)) )
+                                             per_cpu(cpu_sibling_mask, cpu)) )
                         cpumask_set_cpu(cpu, &mask);
                 flush_mask(&mask, FLUSH_CACHE);
             }
--- 2011-10-18.orig/xen/arch/x86/oprofile/op_model_p4.c 2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/x86/oprofile/op_model_p4.c      2011-10-14 
16:05:38.000000000 +0200
@@ -385,7 +385,7 @@ static unsigned int get_stagger(void)
 {
 #ifdef CONFIG_SMP
        int cpu = smp_processor_id();
-       return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
+       return (cpu != cpumask_first(per_cpu(cpu_sibling_mask, cpu)));
 #endif 
        return 0;
 }
--- 2011-10-18.orig/xen/arch/x86/smpboot.c      2011-10-14 15:08:45.000000000 
+0200
+++ 2011-10-18/xen/arch/x86/smpboot.c   2011-10-14 16:05:11.000000000 +0200
@@ -51,9 +51,9 @@
 unsigned long __read_mostly trampoline_phys;
 
 /* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_sibling_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask);
 /* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_t, cpu_core_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
 
 cpumask_t cpu_online_map __read_mostly;
 EXPORT_SYMBOL(cpu_online_map);
@@ -233,10 +233,10 @@ static cpumask_t cpu_sibling_setup_map;
 
 static void link_thread_siblings(int cpu1, int cpu2)
 {
-    cpu_set(cpu1, per_cpu(cpu_sibling_map, cpu2));
-    cpu_set(cpu2, per_cpu(cpu_sibling_map, cpu1));
-    cpu_set(cpu1, per_cpu(cpu_core_map, cpu2));
-    cpu_set(cpu2, per_cpu(cpu_core_map, cpu1));
+    cpumask_set_cpu(cpu1, per_cpu(cpu_sibling_mask, cpu2));
+    cpumask_set_cpu(cpu2, per_cpu(cpu_sibling_mask, cpu1));
+    cpumask_set_cpu(cpu1, per_cpu(cpu_core_mask, cpu2));
+    cpumask_set_cpu(cpu2, per_cpu(cpu_core_mask, cpu1));
 }
 
 static void set_cpu_sibling_map(int cpu)
@@ -262,13 +262,13 @@ static void set_cpu_sibling_map(int cpu)
     }
     else
     {
-        cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
+        cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
     }
 
     if ( c[cpu].x86_max_cores == 1 )
     {
-        cpumask_copy(&per_cpu(cpu_core_map, cpu),
-                     &per_cpu(cpu_sibling_map, cpu));
+        cpumask_copy(per_cpu(cpu_core_mask, cpu),
+                     per_cpu(cpu_sibling_mask, cpu));
         c[cpu].booted_cores = 1;
         return;
     }
@@ -277,18 +277,18 @@ static void set_cpu_sibling_map(int cpu)
     {
         if ( c[cpu].phys_proc_id == c[i].phys_proc_id )
         {
-            cpu_set(i, per_cpu(cpu_core_map, cpu));
-            cpu_set(cpu, per_cpu(cpu_core_map, i));
+            cpumask_set_cpu(i, per_cpu(cpu_core_mask, cpu));
+            cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, i));
             /*
              *  Does this new cpu bringup a new core?
              */
-            if ( cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1 )
+            if ( cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) == 1 )
             {
                 /*
                  * for each core in package, increment
                  * the booted_cores for this new cpu
                  */
-                if ( first_cpu(per_cpu(cpu_sibling_map, i)) == i )
+                if ( cpumask_first(per_cpu(cpu_sibling_mask, i)) == i )
                     c[cpu].booted_cores++;
                 /*
                  * increment the core count for all
@@ -641,13 +641,14 @@ static void cpu_smpboot_free(unsigned in
 {
     unsigned int order;
 
+    free_cpumask_var(per_cpu(cpu_sibling_mask, cpu));
+    free_cpumask_var(per_cpu(cpu_core_mask, cpu));
+
     order = get_order_from_pages(NR_RESERVED_GDT_PAGES);
     free_xenheap_pages(per_cpu(gdt_table, cpu), order);
-    per_cpu(gdt_table, cpu) = NULL;
 
 #ifdef __x86_64__
     free_xenheap_pages(per_cpu(compat_gdt_table, cpu), order);
-    per_cpu(compat_gdt_table, cpu) = NULL;
 #endif
 
     order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
@@ -696,7 +697,9 @@ static int cpu_smpboot_alloc(unsigned in
         goto oom;
     memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES * sizeof(idt_entry_t));
 
-    return 0;
+    if ( zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) &&
+         zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
+        return 0;
 
  oom:
     cpu_smpboot_free(cpu);
@@ -744,6 +747,10 @@ void __init smp_prepare_cpus(unsigned in
 
     stack_base[0] = stack_start.esp;
 
+    if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
+         !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)) )
+        panic("No memory for boot CPU sibling/core maps\n");
+
     set_cpu_sibling_map(0);
 
     /*
@@ -760,8 +767,6 @@ void __init smp_prepare_cpus(unsigned in
             printk(KERN_NOTICE "Local APIC not detected."
                    " Using dummy APIC emulation.\n");
         map_cpu_to_logical_apicid();
-        cpu_set(0, per_cpu(cpu_sibling_map, 0));
-        cpu_set(0, per_cpu(cpu_core_map, 0));
         return;
     }
 
@@ -792,13 +797,6 @@ void __init smp_prepare_cpus(unsigned in
     setup_local_APIC();
     map_cpu_to_logical_apicid();
 
-    /*
-     * construct cpu_sibling_map, so that we can tell sibling CPUs
-     * efficiently.
-     */
-    cpu_set(0, per_cpu(cpu_sibling_map, 0));
-    cpu_set(0, per_cpu(cpu_core_map, 0));
-
     smpboot_setup_io_apic();
 
     setup_boot_APIC_clock();
@@ -816,18 +814,18 @@ remove_siblinginfo(int cpu)
     int sibling;
     struct cpuinfo_x86 *c = cpu_data;
 
-    for_each_cpu_mask ( sibling, per_cpu(cpu_core_map, cpu) )
+    for_each_cpu_mask ( sibling, *per_cpu(cpu_core_mask, cpu) )
     {
-        cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+        cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, sibling));
         /* Last thread sibling in this cpu core going down. */
-        if ( cpumask_weight(&per_cpu(cpu_sibling_map, cpu)) == 1 )
+        if ( cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) == 1 )
             c[sibling].booted_cores--;
     }
    
-    for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-        cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, sibling));
-    cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
-    cpumask_clear(&per_cpu(cpu_core_map, cpu));
+    for_each_cpu_mask(sibling, *per_cpu(cpu_sibling_mask, cpu))
+        cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, sibling));
+    cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
+    cpumask_clear(per_cpu(cpu_core_mask, cpu));
     c[cpu].phys_proc_id = BAD_APICID;
     c[cpu].cpu_core_id = BAD_APICID;
     c[cpu].compute_unit_id = BAD_APICID;
--- 2011-10-18.orig/xen/arch/x86/sysctl.c       2011-09-21 16:44:47.000000000 
+0200
+++ 2011-10-18/xen/arch/x86/sysctl.c    2011-10-14 15:48:20.000000000 +0200
@@ -76,9 +76,9 @@ long arch_do_sysctl(
 
         memset(pi, 0, sizeof(*pi));
         pi->threads_per_core =
-            cpus_weight(per_cpu(cpu_sibling_map, 0));
+            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
         pi->cores_per_socket =
-            cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
+            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
         pi->nr_cpus = num_online_cpus();
         pi->nr_nodes = num_online_nodes();
         pi->max_node_id = MAX_NUMNODES-1;
--- 2011-10-18.orig/xen/common/domctl.c 2011-10-18 13:32:43.000000000 +0200
+++ 2011-10-18/xen/common/domctl.c      2011-10-18 13:41:58.000000000 +0200
@@ -171,7 +171,7 @@ static unsigned int default_vcpu0_locati
      * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
      * favour high numbered CPUs in the event of a tie.
      */
-    cpumask_copy(&cpu_exclude_map, &per_cpu(cpu_sibling_map, 0));
+    cpumask_copy(&cpu_exclude_map, per_cpu(cpu_sibling_mask, 0));
     cpu = cpumask_first(&cpu_exclude_map);
     if ( cpumask_weight(&cpu_exclude_map) > 1 )
         cpu = cpumask_next(cpu, &cpu_exclude_map);
@@ -179,11 +179,11 @@ static unsigned int default_vcpu0_locati
     {
         if ( cpumask_test_cpu(i, &cpu_exclude_map) )
             continue;
-        if ( (i == cpumask_first(&per_cpu(cpu_sibling_map, i))) &&
-             (cpumask_weight(&per_cpu(cpu_sibling_map, i)) > 1) )
+        if ( (i == cpumask_first(per_cpu(cpu_sibling_mask, i))) &&
+             (cpumask_weight(per_cpu(cpu_sibling_mask, i)) > 1) )
             continue;
         cpumask_or(&cpu_exclude_map, &cpu_exclude_map,
-                   &per_cpu(cpu_sibling_map, i));
+                   per_cpu(cpu_sibling_mask, i));
         if ( !cnt || cnt[i] <= cnt[cpu] )
             cpu = i;
     }
--- 2011-10-18.orig/xen/common/keyhandler.c     2011-10-13 13:27:55.000000000 
+0200
+++ 2011-10-18/xen/common/keyhandler.c  2011-10-14 16:04:31.000000000 +0200
@@ -210,7 +210,7 @@ static struct keyhandler reboot_machine_
 static void cpuset_print(char *set, int size, const cpumask_t *mask)
 {
     *set++ = '{';
-    set += cpulist_scnprintf(set, size-2, *mask);
+    set += cpulist_scnprintf(set, size-2, mask);
     *set++ = '}';
     *set++ = '\0';
 }
--- 2011-10-18.orig/xen/common/sched_credit.c   2011-10-12 08:38:35.000000000 
+0200
+++ 2011-10-18/xen/common/sched_credit.c        2011-10-14 16:01:03.000000000 
+0200
@@ -502,23 +502,23 @@ _csched_cpu_pick(const struct scheduler 
 
         nxt = cpumask_cycle(cpu, &cpus);
 
-        if ( cpumask_test_cpu(cpu, &per_cpu(cpu_core_map, nxt)) )
+        if ( cpumask_test_cpu(cpu, per_cpu(cpu_core_mask, nxt)) )
         {
             /* We're on the same socket, so check the busy-ness of threads.
              * Migrate if # of idlers is less at all */
-            ASSERT( cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
+            ASSERT( cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu)) );
             migrate_factor = 1;
-            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_sibling_map, cpu));
-            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_sibling_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, per_cpu(cpu_sibling_mask, cpu));
+            cpumask_and(&nxt_idlers, &idlers, per_cpu(cpu_sibling_mask, nxt));
         }
         else
         {
             /* We're on different sockets, so check the busy-ness of cores.
              * Migrate only if the other core is twice as idle */
-            ASSERT( !cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
+            ASSERT( !cpumask_test_cpu(nxt, per_cpu(cpu_core_mask, cpu)) );
             migrate_factor = 2;
-            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_core_map, cpu));
-            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_core_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, per_cpu(cpu_core_mask, cpu));
+            cpumask_and(&nxt_idlers, &idlers, per_cpu(cpu_core_mask, nxt));
         }
 
         weight_cpu = cpumask_weight(&cpu_idlers);
@@ -531,7 +531,7 @@ _csched_cpu_pick(const struct scheduler 
             cpumask_and(&nxt_idlers, &cpus, &nxt_idlers);
             spc = CSCHED_PCPU(nxt);
             cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
-            cpumask_andnot(&cpus, &cpus, &per_cpu(cpu_sibling_map, cpu));
+            cpumask_andnot(&cpus, &cpus, per_cpu(cpu_sibling_mask, cpu));
         }
         else
         {
@@ -1419,9 +1419,9 @@ csched_dump_pcpu(const struct scheduler 
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_map, cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
     printk(" sort=%d, sibling=%s, ", spc->runq_sort_last, cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_map, cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
     /* current VCPU */
@@ -1481,7 +1481,7 @@ csched_dump(const struct scheduler *ops)
            prv->ticks_per_tslice,
            vcpu_migration_delay);
 
-    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
+    cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), &prv->idlers);
     printk("idlers: %s\n", idlers_buf);
 
     printk("active vcpus:\n");
--- 2011-10-18.orig/xen/common/sched_credit2.c  2011-10-11 18:03:39.000000000 
+0200
+++ 2011-10-18/xen/common/sched_credit2.c       2011-10-14 15:46:58.000000000 
+0200
@@ -1767,9 +1767,9 @@ csched_dump_pcpu(const struct scheduler 
 
     runq = &RQD(ops, cpu)->runq;
 
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_map,cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
     printk(" sibling=%s, ", cpustr);
-    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_map,cpu));
+    cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
     /* current VCPU */
--- 2011-10-18.orig/xen/include/asm-ia64/linux-xen/asm/smp.h    2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/include/asm-ia64/linux-xen/asm/smp.h 2011-10-14 
15:43:35.000000000 +0200
@@ -62,8 +62,8 @@ extern char no_int_routing __devinitdata
 extern cpumask_t cpu_online_map;
 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
 
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
 extern int smp_num_siblings;
 extern int smp_num_cpucores;
 extern void __iomem *ipi_base_addr;
--- 2011-10-18.orig/xen/include/asm-x86/smp.h   2011-10-20 14:46:19.000000000 
+0200
+++ 2011-10-18/xen/include/asm-x86/smp.h        2011-10-14 15:43:54.000000000 
+0200
@@ -25,8 +25,8 @@
  */
  
 extern void smp_alloc_memory(void);
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
 
 void smp_send_nmi_allbutself(void);
 
--- 2011-10-18.orig/xen/include/xen/cpumask.h   2011-10-19 17:30:16.000000000 
+0200
+++ 2011-10-18/xen/include/xen/cpumask.h        2011-10-19 17:30:43.000000000 
+0200
@@ -320,20 +320,16 @@ static inline const cpumask_t *cpumask_o
 #define cpus_addr(src) ((src).bits)
 #define cpumask_bits(maskp) ((maskp)->bits)
 
-#define cpumask_scnprintf(buf, len, src) \
-       __cpumask_scnprintf((buf), (len), &(src), nr_cpu_ids)
-static inline int __cpumask_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
+static inline int cpumask_scnprintf(char *buf, int len,
+                                   const cpumask_t *srcp)
 {
-       return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+       return bitmap_scnprintf(buf, len, srcp->bits, nr_cpu_ids);
 }
 
-#define cpulist_scnprintf(buf, len, src) \
-       __cpulist_scnprintf((buf), (len), &(src), nr_cpu_ids)
-static inline int __cpulist_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
+static inline int cpulist_scnprintf(char *buf, int len,
+                                   const cpumask_t *srcp)
 {
-       return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
+       return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpu_ids);
 }
 
 /*


Attachment: sibling-core-map-alloc.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.