WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] cpupools: allocate CPU masks dynamically

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] cpupools: allocate CPU masks dynamically
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Tue, 25 Oct 2011 01:55:13 +0100
Delivery-date: Mon, 24 Oct 2011 17:57:05 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1319183015 -7200
# Node ID 53528bab2eb423c352d6d43963b9bb7ee16abf18
# Parent  2682094bc243f96d1187271595ecefee333ec11d
cpupools: allocate CPU masks dynamically

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 2682094bc243 -r 53528bab2eb4 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/arch/x86/domain_build.c       Fri Oct 21 09:43:35 2011 +0200
@@ -885,10 +885,10 @@
 
     printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
 
-    cpu = first_cpu(cpupool0->cpu_valid);
+    cpu = cpumask_first(cpupool0->cpu_valid);
     for ( i = 1; i < opt_dom0_max_vcpus; i++ )
     {
-        cpu = cycle_cpu(cpu, cpupool0->cpu_valid);
+        cpu = cpumask_cycle(cpu, cpupool0->cpu_valid);
         (void)alloc_vcpu(d, i, cpu);
     }
 
diff -r 2682094bc243 -r 53528bab2eb4 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/arch/x86/smpboot.c    Fri Oct 21 09:43:35 2011 +0200
@@ -850,8 +850,8 @@
     remove_siblinginfo(cpu);
 
     /* It's now safe to remove this processor from the online map */
-    cpu_clear(cpu, cpupool0->cpu_valid);
-    cpu_clear(cpu, cpu_online_map);
+    cpumask_clear_cpu(cpu, cpupool0->cpu_valid);
+    cpumask_clear_cpu(cpu, &cpu_online_map);
     fixup_irqs();
 
     if ( cpu_disable_scheduler(cpu) )
diff -r 2682094bc243 -r 53528bab2eb4 xen/common/cpupool.c
--- a/xen/common/cpupool.c      Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/common/cpupool.c      Fri Oct 21 09:43:35 2011 +0200
@@ -39,11 +39,18 @@
 
 static struct cpupool *alloc_cpupool_struct(void)
 {
-    return xzalloc(struct cpupool);
+    struct cpupool *c = xzalloc(struct cpupool);
+
+    if ( c && zalloc_cpumask_var(&c->cpu_valid) )
+        return c;
+    xfree(c);
+    return NULL;
 }
 
 static void free_cpupool_struct(struct cpupool *c)
 {
+    if ( c )
+        free_cpumask_var(c->cpu_valid);
     xfree(c);
 }
 
@@ -191,7 +198,7 @@
         spin_unlock(&cpupool_lock);
         return -ENOENT;
     }
-    if ( (c->n_dom != 0) || cpus_weight(c->cpu_valid) )
+    if ( (c->n_dom != 0) || cpumask_weight(c->cpu_valid) )
     {
         spin_unlock(&cpupool_lock);
         return -EBUSY;
@@ -232,7 +239,7 @@
         cpupool_put(cpupool_cpu_moving);
         cpupool_cpu_moving = NULL;
     }
-    cpu_set(cpu, c->cpu_valid);
+    cpumask_set_cpu(cpu, c->cpu_valid);
     return 0;
 }
 
@@ -296,10 +303,10 @@
         goto out;
 
     ret = 0;
-    if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
+    if ( !cpumask_test_cpu(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
         goto out;
 
-    if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
+    if ( (c->n_dom > 0) && (cpumask_weight(c->cpu_valid) == 1) &&
          (cpu != cpupool_moving_cpu) )
     {
         for_each_domain(d)
@@ -326,15 +333,15 @@
     cpupool_moving_cpu = cpu;
     atomic_inc(&c->refcnt);
     cpupool_cpu_moving = c;
-    cpu_clear(cpu, c->cpu_valid);
+    cpumask_clear_cpu(cpu, c->cpu_valid);
     spin_unlock(&cpupool_lock);
 
     work_cpu = smp_processor_id();
     if ( work_cpu == cpu )
     {
-        work_cpu = first_cpu(cpupool0->cpu_valid);
+        work_cpu = cpumask_first(cpupool0->cpu_valid);
         if ( work_cpu == cpu )
-            work_cpu = next_cpu(cpu, cpupool0->cpu_valid);
+            work_cpu = cpumask_next(cpu, cpupool0->cpu_valid);
     }
     return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c);
 
@@ -361,7 +368,7 @@
         return 0;
     spin_lock(&cpupool_lock);
     c = cpupool_find_by_id(poolid);
-    if ( (c != NULL) && cpus_weight(c->cpu_valid) )
+    if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
     {
         c->n_dom++;
         n_dom = c->n_dom;
@@ -418,7 +425,7 @@
     int ret = 0;
        
     spin_lock(&cpupool_lock);
-    if ( !cpu_isset(cpu, cpupool0->cpu_valid))
+    if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid))
         ret = -EBUSY;
     else
         cpu_set(cpu, cpupool_locked_cpus);
@@ -473,7 +480,7 @@
         op->cpupool_id = c->cpupool_id;
         op->sched_id = c->sched->sched_id;
         op->n_dom = c->n_dom;
-        ret = cpumask_to_xenctl_cpumap(&op->cpumap, &c->cpu_valid);
+        ret = cpumask_to_xenctl_cpumap(&op->cpumap, c->cpu_valid);
         cpupool_put(c);
     }
     break;
@@ -516,7 +523,7 @@
             break;
         cpu = op->cpu;
         if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
-            cpu = last_cpu(c->cpu_valid);
+            cpu = cpumask_last(c->cpu_valid);
         ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL;
         cpupool_put(c);
     }
@@ -550,7 +557,7 @@
         ret = -ENOENT;
         spin_lock(&cpupool_lock);
         c = cpupool_find_by_id(op->cpupool_id);
-        if ( (c != NULL) && cpus_weight(c->cpu_valid) )
+        if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
         {
             d->cpupool->n_dom--;
             ret = sched_move_domain(d, c);
diff -r 2682094bc243 -r 53528bab2eb4 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/common/domctl.c       Fri Oct 21 09:43:35 2011 +0200
@@ -502,7 +502,7 @@
             goto maxvcpu_out;
 
         ret = -ENOMEM;
-        online = (d->cpupool == NULL) ? &cpu_online_map : 
&d->cpupool->cpu_valid;
+        online = (d->cpupool == NULL) ? &cpu_online_map : 
d->cpupool->cpu_valid;
         if ( max > d->max_vcpus )
         {
             struct vcpu **vcpus;
diff -r 2682094bc243 -r 53528bab2eb4 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/common/sched_credit.c Fri Oct 21 09:43:35 2011 +0200
@@ -73,7 +73,7 @@
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define RUNQ(_cpu)          (&(CSCHED_PCPU(_cpu)->runq))
 #define CSCHED_CPUONLINE(_pool)    \
-    (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+    (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
 
 
 /*
diff -r 2682094bc243 -r 53528bab2eb4 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/common/sched_credit2.c        Fri Oct 21 09:43:35 2011 +0200
@@ -176,7 +176,7 @@
 #define CSCHED_VCPU(_vcpu)  ((struct csched_vcpu *) (_vcpu)->sched_priv)
 #define CSCHED_DOM(_dom)    ((struct csched_dom *) (_dom)->sched_priv)
 #define CSCHED_CPUONLINE(_pool)    \
-    (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+    (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
 /* CPU to runq_id macro */
 #define c2r(_ops, _cpu)     (CSCHED_PRIV(_ops)->runq_map[(_cpu)])
 /* CPU to runqueue struct macro */
diff -r 2682094bc243 -r 53528bab2eb4 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/common/sched_sedf.c   Fri Oct 21 09:43:35 2011 +0200
@@ -22,7 +22,7 @@
     } while ( 0 )
 
 #define SEDF_CPUONLINE(_pool)                                             \
-    (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+    (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
 
 #ifndef NDEBUG
 #define SEDF_STATS
diff -r 2682094bc243 -r 53528bab2eb4 xen/common/schedule.c
--- a/xen/common/schedule.c     Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/common/schedule.c     Fri Oct 21 09:43:35 2011 +0200
@@ -74,7 +74,7 @@
 #define VCPU2OP(_v)   (DOM2OP((_v)->domain))
 #define VCPU2ONLINE(_v)                                                    \
          (((_v)->domain->cpupool == NULL) ? &cpu_online_map                \
-         : &(_v)->domain->cpupool->cpu_valid)
+         : (_v)->domain->cpupool->cpu_valid)
 
 static inline void trace_runstate_change(struct vcpu *v, int new_state)
 {
@@ -258,7 +258,7 @@
 
     domain_pause(d);
 
-    new_p = first_cpu(c->cpu_valid);
+    new_p = cpumask_first(c->cpu_valid);
     for_each_vcpu ( d, v )
     {
         migrate_timer(&v->periodic_timer, new_p);
@@ -273,7 +273,7 @@
         v->sched_priv = vcpu_priv[v->vcpu_id];
         evtchn_move_pirqs(v);
 
-        new_p = cycle_cpu(new_p, c->cpu_valid);
+        new_p = cpumask_cycle(new_p, c->cpu_valid);
 
         SCHED_OP(VCPU2OP(v), insert_vcpu, v);
     }
@@ -431,13 +431,13 @@
             if ( pick_called &&
                  (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
                  cpumask_test_cpu(new_cpu, v->cpu_affinity) &&
-                 cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+                 cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
 
             /* Select a new CPU. */
             new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
             if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
-                 cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+                 cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
         }
@@ -549,7 +549,7 @@
         {
             vcpu_schedule_lock_irq(v);
 
-            cpumask_and(&online_affinity, v->cpu_affinity, &c->cpu_valid);
+            cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
             if ( cpus_empty(online_affinity) &&
                  cpumask_test_cpu(cpu, v->cpu_affinity) )
             {
@@ -1446,7 +1446,7 @@
     cpumask_t        *cpus;
 
     sched = (c == NULL) ? &ops : c->sched;
-    cpus = (c == NULL) ? &cpupool_free_cpus : &c->cpu_valid;
+    cpus = (c == NULL) ? &cpupool_free_cpus : c->cpu_valid;
     printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
     SCHED_OP(sched, dump_settings);
 
diff -r 2682094bc243 -r 53528bab2eb4 xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h        Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/include/xen/sched-if.h        Fri Oct 21 09:43:35 2011 +0200
@@ -192,7 +192,7 @@
 struct cpupool
 {
     int              cpupool_id;
-    cpumask_t        cpu_valid;      /* all cpus assigned to pool */
+    cpumask_var_t    cpu_valid;      /* all cpus assigned to pool */
     struct cpupool   *next;
     unsigned int     n_dom;
     struct scheduler *sched;
diff -r 2682094bc243 -r 53528bab2eb4 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri Oct 21 09:42:47 2011 +0200
+++ b/xen/include/xen/sched.h   Fri Oct 21 09:43:35 2011 +0200
@@ -664,7 +664,7 @@
 void schedule_dump(struct cpupool *c);
 extern void dump_runq(unsigned char key);
 
-#define num_cpupool_cpus(c) (cpus_weight((c)->cpu_valid))
+#define num_cpupool_cpus(c) cpumask_weight((c)->cpu_valid)
 
 #endif /* __SCHED_H__ */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] cpupools: allocate CPU masks dynamically, Xen patchbot-unstable <=