[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 09/10] xen/nodemask: Sanitise the remainder of the nodemask API



The nodemask API differs from the cpumask API because each wrapper to bitmap
operations is further wrapped by a macro which takes the address of the
nodemask objects.

This results in code which is slightly confusing to read as it doesn't follow
C's calling conventions, and prohibits the use of slightly more complicated
constructs for specifying parameters.

Drop all wrapping macros, rename the nodemask static inline functions to drop
the double underscores, and feed MAX_NUMNODES into appropriate locations.

Furthermore, the naming is inconsistent.  As we're changing all callers
anyway, rationalise all the naming to be of the form nodemask_*(), and update
the types to per Xen's latest expectations.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

v3:
 * Split various bits out into earlier patches
 * Rename the APIs to be consistent.
v2:
 * New
---
 xen/arch/x86/dom0_build.c  |   8 +-
 xen/arch/x86/numa.c        |   4 +-
 xen/arch/x86/srat.c        |   9 ++-
 xen/common/domain.c        |   4 +-
 xen/common/page_alloc.c    |  26 +++----
 xen/common/sched_credit.c  |   2 +-
 xen/common/sysctl.c        |   2 +-
 xen/include/xen/nodemask.h | 186 ++++++++++++++++++++-------------------------
 8 files changed, 109 insertions(+), 132 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index c625e64d03..cb0f693d76 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -247,10 +247,10 @@ unsigned int __init dom0_max_vcpus(void)
     for ( i = 0; i < dom0_nr_pxms; ++i )
         if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE )
             __nodemask_set(node, &dom0_nodes);
-    nodes_and(dom0_nodes, dom0_nodes, node_online_map);
-    if ( nodes_empty(dom0_nodes) )
+    nodemask_and(&dom0_nodes, &dom0_nodes, &node_online_map);
+    if ( nodemask_empty(&dom0_nodes) )
         dom0_nodes = node_online_map;
-    for_each_node_mask ( node, dom0_nodes )
+    for_each_node_mask ( node, &dom0_nodes )
         cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node));
     cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid);
     if ( cpumask_empty(&dom0_cpus) )
@@ -344,7 +344,7 @@ unsigned long __init dom0_compute_nr_pages(
     if ( !dom0_mem_set && CONFIG_DOM0_MEM[0] )
         parse_dom0_mem(CONFIG_DOM0_MEM);
 
-    for_each_node_mask ( node, dom0_nodes )
+    for_each_node_mask ( node, &dom0_nodes )
         avail += avail_domheap_pages_region(node, 0, 0) +
                  initial_images_nrpages(node);
 
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 7473f83b7b..d3db213432 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -186,13 +186,13 @@ void __init numa_init_array(void)
        mapping. To avoid this fill in the mapping for all possible
        CPUs, as the number of CPUs is not known yet.
        We round robin the existing nodes. */
-    rr = first_node(node_online_map);
+    rr = nodemask_first(&node_online_map);
     for ( i = 0; i < nr_cpu_ids; i++ )
     {
         if ( cpu_to_node[i] != NUMA_NO_NODE )
             continue;
         numa_set_node(i, rr);
-        rr = cycle_node(rr, node_online_map);
+        rr = nodemask_cycle(rr, &node_online_map);
     }
 }
 
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 5f44ac27f1..9e37e24d70 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -332,7 +332,7 @@ acpi_numa_memory_affinity_init(const struct 
acpi_srat_mem_affinity *ma)
        if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) {
                struct node *nd = &nodes[node];
 
-               if (!node_test_and_set(node, memory_nodes_parsed)) {
+               if (!nodemask_test_and_set(node, &memory_nodes_parsed)) {
                        nd->start = start;
                        nd->end = end;
                } else {
@@ -376,7 +376,7 @@ static int __init nodes_cover_memory(void)
 
                do {
                        found = 0;
-                       for_each_node_mask(j, memory_nodes_parsed)
+                       for_each_node_mask( j, &memory_nodes_parsed )
                                if (start < nodes[j].end
                                    && end > nodes[j].start) {
                                        if (start >= nodes[j].start) {
@@ -480,10 +480,11 @@ int __init acpi_scan_nodes(u64 start, u64 end)
                return -1;
        }
 
-       nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed);
+       nodemask_or(&all_nodes_parsed, &memory_nodes_parsed,
+                   &processor_nodes_parsed);
 
        /* Finally register nodes */
-       for_each_node_mask(i, all_nodes_parsed)
+       for_each_node_mask( i, &all_nodes_parsed )
        {
                u64 size = nodes[i].end - nodes[i].start;
                if ( size == 0 )
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 5dbc68cbc3..3228b08b3a 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -630,7 +630,7 @@ void domain_update_node_affinity(struct domain *d)
 int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity)
 {
     /* Being affine with no nodes is just wrong */
-    if ( nodes_empty(*affinity) )
+    if ( nodemask_empty(affinity) )
         return -EINVAL;
 
     spin_lock(&d->node_affinity_lock);
@@ -639,7 +639,7 @@ int domain_set_node_affinity(struct domain *d, const 
nodemask_t *affinity)
      * Being/becoming explicitly affine to all nodes is not particularly
      * useful. Let's take it as the `reset node affinity` command.
      */
-    if ( nodes_full(*affinity) )
+    if ( nodemask_full(affinity) )
     {
         d->auto_node_affinity = 1;
         goto out;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 77e649d065..97539ffd69 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -821,12 +821,12 @@ static struct page_info *get_free_buddy(unsigned int 
zone_lo,
      * may have bit set outside of node_online_map.  Clamp it.
      */
     if ( d )
-        nodes_and(nodemask, nodemask, d->node_affinity);
+        nodemask_and(&nodemask, &nodemask, &d->node_affinity);
 
     if ( node == NUMA_NO_NODE )
     {
         if ( d != NULL )
-            node = cycle_node(d->last_alloc_node, nodemask);
+            node = nodemask_cycle(d->last_alloc_node, &nodemask);
 
         if ( node >= MAX_NUMNODES )
             node = cpu_to_node(smp_processor_id());
@@ -882,19 +882,19 @@ static struct page_info *get_free_buddy(unsigned int 
zone_lo,
         {
             /* Very first node may be caller-specified and outside nodemask. */
             ASSERT(!nodemask_retry);
-            first = node = first_node(nodemask);
+            first = node = nodemask_first(&nodemask);
             if ( node < MAX_NUMNODES )
                 continue;
         }
-        else if ( (node = next_node(node, nodemask)) >= MAX_NUMNODES )
-            node = first_node(nodemask);
+        else if ( (node = nodemask_next(node, &nodemask)) >= MAX_NUMNODES )
+            node = nodemask_first(&nodemask);
         if ( node == first )
         {
             /* When we have tried all in nodemask, we fall back to others. */
             if ( (memflags & MEMF_exact_node) || nodemask_retry++ )
                 return NULL;
-            nodes_andnot(nodemask, node_online_map, nodemask);
-            first = node = first_node(nodemask);
+            nodemask_andnot(&nodemask, &node_online_map, &nodemask);
+            first = node = nodemask_first(&nodemask);
             if ( node >= MAX_NUMNODES )
                 return NULL;
         }
@@ -1171,7 +1171,7 @@ static unsigned int node_to_scrub(bool get_node)
         node = 0;
 
     if ( node_need_scrub[node] &&
-         (!get_node || !node_test_and_set(node, node_scrubbing)) )
+         (!get_node || !nodemask_test_and_set(node, &node_scrubbing)) )
         return node;
 
     /*
@@ -1182,7 +1182,7 @@ static unsigned int node_to_scrub(bool get_node)
     for ( ; ; )
     {
         do {
-            node = cycle_node(node, node_online_map);
+            node = nodemask_cycle(node, &node_online_map);
         } while ( !cpumask_empty(&node_to_cpumask(node)) &&
                   (node != local_node) );
 
@@ -1205,10 +1205,10 @@ static unsigned int node_to_scrub(bool get_node)
              * then we'd need to take this lock every time we come in here.
              */
             if ( (dist < shortest || closest == NUMA_NO_NODE) &&
-                 !node_test_and_set(node, node_scrubbing) )
+                 !nodemask_test_and_set(node, &node_scrubbing) )
             {
                 if ( closest != NUMA_NO_NODE )
-                    node_clear(closest, node_scrubbing);
+                    nodemask_clear(closest, &node_scrubbing);
                 shortest = dist;
                 closest = node;
             }
@@ -1360,7 +1360,7 @@ bool scrub_free_pages(void)
     spin_unlock(&heap_lock);
 
  out_nolock:
-    node_clear(node, node_scrubbing);
+    nodemask_clear(node, &node_scrubbing);
     return node_to_scrub(false) != NUMA_NO_NODE;
 }
 
@@ -2010,7 +2010,7 @@ static void __init scrub_heap_pages(void)
             continue;
 
         last_distance = INT_MAX;
-        best_node = first_node(node_online_map);
+        best_node = nodemask_first(&node_online_map);
         /* Figure out which NODE CPUs are close. */
         for_each_online_node ( j )
         {
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 81dee5e472..3e569d6970 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1810,7 +1810,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
             } while( peer_cpu != first_cpu );
 
  next_node:
-            peer_node = cycle_node(peer_node, node_online_map);
+            peer_node = nodemask_cycle(peer_node, &node_online_map);
         } while( peer_node != node );
     }
 
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 765effde8d..c8c6805bb4 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -280,7 +280,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) 
u_sysctl)
         bool_t do_meminfo = !guest_handle_is_null(ni->meminfo);
         bool_t do_distance = !guest_handle_is_null(ni->distance);
 
-        num_nodes = last_node(node_online_map) + 1;
+        num_nodes = nodemask_last(&node_online_map) + 1;
 
         if ( do_meminfo || do_distance )
         {
diff --git a/xen/include/xen/nodemask.h b/xen/include/xen/nodemask.h
index 1605c1bcc5..ed918e4a8d 100644
--- a/xen/include/xen/nodemask.h
+++ b/xen/include/xen/nodemask.h
@@ -10,30 +10,30 @@
  *
  * The available nodemask operations are:
  *
- * void node_set(node, mask)           turn on bit 'node' in mask
+ * void nodemask_set(node, mask)       turn on bit 'node' in mask
  * void __nodemask_set(node, mask)     turn on bit 'node' in mask (unlocked)
- * void node_clear(node, mask)         turn off bit 'node' in mask
+ * void nodemask_clear(node, mask)     turn off bit 'node' in mask
  * void __nodemask_clear(node, mask)   turn off bit 'node' in mask (unlocked)
  * bool nodemask_test(node, mask)      true iff bit 'node' set in mask
- * int node_test_and_set(node, mask)   test and set bit 'node' in mask
+ * bool nodemask_test_and_set(node, mask) test and set bit 'node' in mask
  *
- * void nodes_and(dst, src1, src2)     dst = src1 & src2  [intersection]
- * void nodes_or(dst, src1, src2)      dst = src1 | src2  [union]
- * void nodes_xor(dst, src1, src2)     dst = src1 ^ src2
- * void nodes_andnot(dst, src1, src2)  dst = src1 & ~src2
- * void nodes_complement(dst, src)     dst = ~src
+ * void nodemask_and(dst, src1, src2)  dst = src1 & src2  [intersection]
+ * void nodemask_or(dst, src1, src2)   dst = src1 | src2  [union]
+ * void nodemask_xor(dst, src1, src2)  dst = src1 ^ src2
+ * void nodemask_andnot(dst, src1, src2)dst = src1 & ~src2
+ * void nodemask_complement(dst, src)  dst = ~src
  *
- * int nodes_equal(mask1, mask2)       Does mask1 == mask2?
- * int nodes_intersects(mask1, mask2)  Do mask1 and mask2 intersect?
- * int nodes_subset(mask1, mask2)      Is mask1 a subset of mask2?
- * int nodes_empty(mask)               Is mask empty (no bits sets)?
- * int nodes_full(mask)                        Is mask full (all bits sets)?
- * int nodes_weight(mask)              Hamming weight - number of set bits
+ * bool nodemask_equal(mask1, mask2)   Does mask1 == mask2?
+ * bool nodemask_intersects(mask1, mask2) Do mask1 and mask2 intersect?
+ * bool nodemask_subset(mask1, mask2)  Is mask1 a subset of mask2?
+ * bool nodemask_empty(mask)           Is mask empty (no bits sets)?
+ * bool nodemask_full(mask)            Is mask full (all bits sets)?
+ * unsigned int nodemask_weight(mask)  Hamming weight - number of set bits
  *
- * int first_node(mask)                        Number lowest set bit, or 
MAX_NUMNODES
- * int next_node(node, mask)           Next node past 'node', or MAX_NUMNODES
- * int last_node(mask)                 Number highest set bit, or MAX_NUMNODES
- * int cycle_node(node, mask)          Next node cycling from 'node', or
+ * node nodemask_first(mask)           Number lowest set bit, or MAX_NUMNODES
+ * node nodemask_next(node, mask)      Next node past 'node', or MAX_NUMNODES
+ * node nodemask_last(mask)            Number highest set bit, or MAX_NUMNODES
+ * node nodemask_cycle(node, mask)     Next node cycling from 'node', or
  *                                     MAX_NUMNODES
  *
  * nodemask_t NODEMASK_OF(node)                Initializer - bit 'node' set
@@ -43,7 +43,7 @@
  *
  * for_each_node_mask(node, mask)      for-loop node over mask
  *
- * int num_online_nodes()              Number of online Nodes
+ * unsigned int num_online_nodes()     Number of online Nodes
  *
  * bool node_online(node)              Is this node online?
  *
@@ -96,10 +96,9 @@ typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } 
nodemask_t;
 
 #endif /* MAX_NUMNODES */
 
-#define node_set(node, dst) __node_set((node), &(dst))
-static inline void __node_set(int node, volatile nodemask_t *dstp)
+static inline void nodemask_set(unsigned int node, nodemask_t *dst)
 {
-       set_bit(node, dstp->bits);
+    set_bit(node, dst->bits);
 }
 
 static inline void __nodemask_set(unsigned int node, nodemask_t *dst)
@@ -107,10 +106,9 @@ static inline void __nodemask_set(unsigned int node, 
nodemask_t *dst)
     __set_bit(node, dst->bits);
 }
 
-#define node_clear(node, dst) __node_clear((node), &(dst))
-static inline void __node_clear(int node, volatile nodemask_t *dstp)
+static inline void nodemask_clear(unsigned int node, nodemask_t *dst)
 {
-       clear_bit(node, dstp->bits);
+    clear_bit(node, dst->bits);
 }
 
 static inline void __nodemask_clear(unsigned int node, nodemask_t *dst)
@@ -123,139 +121,117 @@ static inline bool nodemask_test(unsigned int node, 
const nodemask_t *dst)
     return test_bit(node, dst->bits);
 }
 
-#define node_test_and_set(node, nodemask) \
-                       __node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static inline bool nodemask_test_and_set(unsigned int node, nodemask_t *dst)
 {
-       return test_and_set_bit(node, addr->bits);
+    return test_and_set_bit(node, dst->bits);
 }
 
-#define nodes_and(dst, src1, src2) \
-                       __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodemask_and(nodemask_t *dst, const nodemask_t *src1,
+                                const nodemask_t *src2)
 {
-       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+    bitmap_and(dst->bits, src1->bits, src2->bits, MAX_NUMNODES);
 }
 
-#define nodes_or(dst, src1, src2) \
-                       __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodemask_or(nodemask_t *dst, const nodemask_t *src1,
+                               const nodemask_t *src2)
 {
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+    bitmap_or(dst->bits, src1->bits, src2->bits, MAX_NUMNODES);
 }
 
-#define nodes_xor(dst, src1, src2) \
-                       __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodemask_xor(nodemask_t *dst, const nodemask_t *src1,
+                                const nodemask_t *src2)
 {
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+    bitmap_xor(dst->bits, src1->bits, src2->bits, MAX_NUMNODES);
 }
 
-#define nodes_andnot(dst, src1, src2) \
-                       __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline void nodemask_andnot(nodemask_t *dst, const nodemask_t *src1,
+                                   const nodemask_t *src2)
 {
-       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+    bitmap_andnot(dst->bits, src1->bits, src2->bits, MAX_NUMNODES);
 }
 
-#define nodes_complement(dst, src) \
-                       __nodes_complement(&(dst), &(src), MAX_NUMNODES)
-static inline void __nodes_complement(nodemask_t *dstp,
-                                       const nodemask_t *srcp, int nbits)
+static inline void nodemask_complement(nodemask_t *dst, const nodemask_t *src)
 {
-       bitmap_complement(dstp->bits, srcp->bits, nbits);
+    bitmap_complement(dst->bits, src->bits, MAX_NUMNODES);
 }
 
-#define nodes_equal(src1, src2) \
-                       __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline bool nodemask_equal(const nodemask_t *src1,
+                                  const nodemask_t *src2)
 {
-       return bitmap_equal(src1p->bits, src2p->bits, nbits);
+    return bitmap_equal(src1->bits, src2->bits, MAX_NUMNODES);
 }
 
-#define nodes_intersects(src1, src2) \
-                       __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline bool nodemask_intersects(const nodemask_t *src1,
+                                       const nodemask_t *src2)
 {
-       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+    return bitmap_intersects(src1->bits, src2->bits, MAX_NUMNODES);
 }
 
-#define nodes_subset(src1, src2) \
-                       __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
-                                       const nodemask_t *src2p, int nbits)
+static inline bool nodemask_subset(const nodemask_t *src1,
+                                   const nodemask_t *src2)
 {
-       return bitmap_subset(src1p->bits, src2p->bits, nbits);
+    return bitmap_subset(src1->bits, src2->bits, MAX_NUMNODES);
 }
 
-#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, int nbits)
+static inline bool nodemask_empty(const nodemask_t *src)
 {
-       return bitmap_empty(srcp->bits, nbits);
+    return bitmap_empty(src->bits, MAX_NUMNODES);
 }
 
-#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, int nbits)
+static inline bool nodemask_full(const nodemask_t *src)
 {
-       return bitmap_full(srcp->bits, nbits);
+    return bitmap_full(src->bits, MAX_NUMNODES);
 }
 
-#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, int nbits)
+static inline unsigned int nodemask_weight(const nodemask_t *src)
 {
-       return bitmap_weight(srcp->bits, nbits);
+    return bitmap_weight(src->bits, MAX_NUMNODES);
 }
 
 /* FIXME: better would be to fix all architectures to never return
           > MAX_NUMNODES, then the silly min_ts could be dropped. */
 
-#define first_node(src) __first_node(&(src), MAX_NUMNODES)
-static inline int __first_node(const nodemask_t *srcp, int nbits)
+static inline unsigned int nodemask_first(const nodemask_t *src)
 {
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+    return min_t(unsigned int, MAX_NUMNODES,
+                 find_first_bit(src->bits, MAX_NUMNODES));
 }
 
-#define next_node(n, src) __next_node((n), &(src), MAX_NUMNODES)
-static inline int __next_node(int n, const nodemask_t *srcp, int nbits)
+static inline unsigned int nodemask_next(unsigned int n, const nodemask_t *src)
 {
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+    return min_t(unsigned int, MAX_NUMNODES,
+                 find_next_bit(src->bits, MAX_NUMNODES, n + 1));
 }
 
-#define last_node(src) __last_node(&(src), MAX_NUMNODES)
-static inline int __last_node(const nodemask_t *srcp, int nbits)
+static inline unsigned int nodemask_last(const nodemask_t *src)
 {
-       int node, pnode = nbits;
-       for (node = __first_node(srcp, nbits);
-            node < nbits;
-            node = __next_node(node, srcp, nbits))
-               pnode = node;
-       return pnode;
+    unsigned int node, pnode = MAX_NUMNODES;
+
+    for ( node = nodemask_first(src);
+          node < MAX_NUMNODES; node = nodemask_next(node, src) )
+        pnode = node;
+
+    return pnode;
 }
 
-#define cycle_node(n, src) __cycle_node((n), &(src), MAX_NUMNODES)
-static inline int __cycle_node(int n, const nodemask_t *maskp, int nbits)
+static inline unsigned int nodemask_cycle(unsigned int n, const nodemask_t 
*src)
 {
-    int nxt = __next_node(n, maskp, nbits);
+    unsigned int nxt = nodemask_next(n, src);
+
+    if ( nxt == MAX_NUMNODES )
+        nxt = nodemask_first(src);
 
-    if (nxt == nbits)
-        nxt = __first_node(maskp, nbits);
     return nxt;
 }
 
 #if MAX_NUMNODES > 1
 #define for_each_node_mask(node, mask)                 \
-       for ((node) = first_node(mask);                 \
+       for ((node) = nodemask_first(mask);             \
                (node) < MAX_NUMNODES;                  \
-               (node) = next_node((node), (mask)))
+               (node) = nodemask_next(node, mask))
 #else /* MAX_NUMNODES == 1 */
 #define for_each_node_mask(node, mask)                 \
-       if (!nodes_empty(mask))                         \
+       if ( !nodemask_empty(mask) )                    \
                for ((node) = 0; (node) < 1; (node)++)
 #endif /* MAX_NUMNODES */
 
@@ -267,16 +243,16 @@ static inline int __cycle_node(int n, const nodemask_t 
*maskp, int nbits)
 extern nodemask_t node_online_map;
 
 #if MAX_NUMNODES > 1
-#define num_online_nodes()     nodes_weight(node_online_map)
+#define num_online_nodes()     nodemask_weight(&node_online_map)
 #define node_online(node)      nodemask_test(node, &node_online_map)
 #else
-#define num_online_nodes()     1
+#define num_online_nodes()     1U
 #define node_online(node)      ((node) == 0)
 #endif
 
-#define node_set_online(node)     set_bit((node), node_online_map.bits)
-#define node_set_offline(node)    clear_bit((node), node_online_map.bits)
+#define node_set_online(node)     set_bit(node, node_online_map.bits)
+#define node_set_offline(node)    clear_bit(node, node_online_map.bits)
 
-#define for_each_online_node(node) for_each_node_mask((node), node_online_map)
+#define for_each_online_node(node) for_each_node_mask(node, &node_online_map)
 
 #endif /* __LINUX_NODEMASK_H */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.