WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1/6] eliminate cpus_xyz()

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 1/6] eliminate cpus_xyz()
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Mon, 07 Nov 2011 09:56:26 +0000
Delivery-date: Mon, 07 Nov 2011 02:00:30 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/ia64/linux-xen/iosapic.c
+++ b/xen/arch/ia64/linux-xen/iosapic.c
@@ -709,7 +709,7 @@ get_target_cpu (unsigned int gsi, int ve
                                cpu_clear(numa_cpu, cpu_mask);
                }
 
-               num_cpus = cpus_weight(cpu_mask);
+               num_cpus = cpumask_weight(&cpu_mask);
 
                if (!num_cpus)
                        goto skip_numa_setup;
--- a/xen/arch/ia64/linux-xen/smp.c
+++ b/xen/arch/ia64/linux-xen/smp.c
@@ -62,7 +62,7 @@ void smp_send_event_check_mask(const cpu
     int cpu;
 
     /*  Not for me.  */
-    if (cpus_subset(*mask, *cpumask_of(smp_processor_id())))
+    if (cpumask_subset(mask, cpumask_of(smp_processor_id())))
         return;
 
     //printf("smp_send_event_check_mask called\n");
@@ -444,7 +444,7 @@ on_selected_cpus(const cpumask_t *select
                  void *info, int wait)
 {
        struct call_data_struct data;
-       unsigned int cpu, nr_cpus = cpus_weight(*selected);
+       unsigned int cpu, nr_cpus = cpumask_weight(selected);
 
        ASSERT(local_irq_is_enabled());
 
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -3196,7 +3196,7 @@ int get_page_type(struct page_info *page
                 cpumask_copy(&mask, 
page_get_owner(page)->domain_dirty_cpumask);
                 tlbflush_filter(mask, page->tlbflush_timestamp);
 
-                if ( unlikely(!cpus_empty(mask)) )
+                if ( unlikely(!cpumask_empty(&mask)) )
                 {
                     perfc_incr(need_flush_tlb_flush);
                     flush_tlb_mask(&mask);
--- a/xen/arch/ia64/xen/vhpt.c
+++ b/xen/arch/ia64/xen/vhpt.c
@@ -556,7 +556,7 @@ void flush_tlb_mask(const cpumask_t *mas
     if (cpu_isset(cpu, *mask))
         flush_tlb_vhpt_all (NULL);
 
-    if (cpus_subset(*mask, *cpumask_of(cpu)))
+    if (cpumask_subset(mask, cpumask_of(cpu)))
         return;
 
     for_each_cpu_mask (cpu, *mask)
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -863,7 +863,7 @@ static void intel_machine_check(struct c
          */
         if (atomic_read(&found_error) == 0)
             mc_panic("MCE: No CPU found valid MCE, need reset\n");
-        if (!cpus_empty(mce_fatal_cpus))
+        if (!cpumask_empty(&mce_fatal_cpus))
         {
             char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
             ebufp = ebuf + strlen(ebuf);
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1611,8 +1611,9 @@ void context_switch(struct vcpu *prev, s
 
     cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);
     /* Allow at most one CPU at a time to be dirty. */
-    ASSERT(cpus_weight(dirty_mask) <= 1);
-    if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) )
+    ASSERT(cpumask_weight(&dirty_mask) <= 1);
+    if ( unlikely(!cpumask_test_cpu(cpu, &dirty_mask) &&
+                  !cpumask_empty(&dirty_mask)) )
     {
         /* Other cpus call __sync_local_execstate from flush ipi handler. */
         flush_tlb_mask(&dirty_mask);
--- a/xen/arch/x86/genapic/delivery.c
+++ b/xen/arch/x86/genapic/delivery.c
@@ -38,7 +38,7 @@ const cpumask_t *vector_allocation_cpuma
 
 unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask)
 {
-       return cpus_addr(*cpumask)[0]&0xFF;
+       return cpumask_bits(cpumask)[0]&0xFF;
 }
 
 /*
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -711,7 +711,7 @@ unsigned int set_desc_affinity(struct ir
     unsigned long flags;
     cpumask_t dest_mask;
 
-    if (!cpus_intersects(*mask, cpu_online_map))
+    if (!cpumask_intersects(mask, &cpu_online_map))
         return BAD_APICID;
 
     irq = desc->irq;
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2436,7 +2436,7 @@ static int __get_page_type(struct page_i
                 /* Don't flush if the timestamp is old enough */
                 tlbflush_filter(mask, page->tlbflush_timestamp);
 
-                if ( unlikely(!cpus_empty(mask)) &&
+                if ( unlikely(!cpumask_empty(&mask)) &&
                      /* Shadow mode: track only writable pages. */
                      (!shadow_mode_enabled(page_get_owner(page)) ||
                       ((nx & PGT_type_mask) == PGT_writable_page)) )
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1524,7 +1524,7 @@ mfn_t shadow_alloc(struct domain *d,  
          * we need to be sure that no TLB holds a pointer to it. */
         cpumask_copy(&mask, d->domain_dirty_cpumask);
         tlbflush_filter(mask, sp->tlbflush_timestamp);
-        if ( unlikely(!cpus_empty(mask)) )
+        if ( unlikely(!cpumask_empty(&mask)) )
         {
             perfc_incr(shadow_alloc_tlbflush);
             flush_tlb_mask(&mask);
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -140,11 +140,11 @@ void send_IPI_self_x2apic(int vector)
 
 void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
 {
-    unsigned long mask = cpus_addr(*cpumask)[0];
+    unsigned long mask = cpumask_bits(cpumask)[0];
     unsigned long cfg;
     unsigned long flags;
 
-    mask &= cpus_addr(cpu_online_map)[0];
+    mask &= cpumask_bits(&cpu_online_map)[0];
     mask &= ~(1UL << smp_processor_id());
     if ( mask == 0 )
         return;
@@ -237,7 +237,7 @@ void flush_area_mask(const cpumask_t *ma
     if ( cpu_isset(smp_processor_id(), *mask) )
         flush_area_local(va, flags);
 
-    if ( !cpus_subset(*mask, *cpumask_of(smp_processor_id())) )
+    if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
     {
         spin_lock(&flush_lock);
         cpumask_and(&flush_cpumask, mask, &cpu_online_map);
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -371,7 +371,7 @@ void domain_update_node_affinity(struct 
         cpumask_or(&cpumask, &cpumask, v->cpu_affinity);
 
     for_each_online_node ( node )
-        if ( cpus_intersects(node_to_cpumask(node), cpumask) )
+        if ( cpumask_intersects(&node_to_cpumask(node), &cpumask) )
             node_set(node, nodemask);
 
     d->node_affinity = nodemask;
--- a/xen/common/perfc.c
+++ b/xen/common/perfc.c
@@ -163,11 +163,11 @@ static int perfc_copy_info(XEN_GUEST_HAN
     unsigned int i, j, v;
 
     /* We only copy the name and array-size information once. */
-    if ( !cpus_equal(cpu_online_map, perfc_cpumap) )
+    if ( !cpumask_equal(&cpu_online_map, &perfc_cpumap) )
     {
         unsigned int nr_cpus;
         perfc_cpumap = cpu_online_map;
-        nr_cpus = cpus_weight(perfc_cpumap);
+        nr_cpus = cpumask_weight(&perfc_cpumap);
 
         perfc_nbr_vals = 0;
 
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -121,7 +121,7 @@ static int rcu_barrier_action(void *_cpu
      */
     call_rcu(&data.head, rcu_barrier_callback);
 
-    while ( atomic_read(data.cpu_count) != cpus_weight(cpu_online_map) )
+    while ( atomic_read(data.cpu_count) != num_online_cpus() )
     {
         process_pending_softirqs();
         cpu_relax();
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1246,7 +1246,7 @@ csched_load_balance(struct csched_privat
     cpumask_clear_cpu(cpu, &workers);
     peer_cpu = cpu;
 
-    while ( !cpus_empty(workers) )
+    while ( !cpumask_empty(&workers) )
     {
         peer_cpu = cpumask_cycle(peer_cpu, &workers);
         cpumask_clear_cpu(peer_cpu, &workers);
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1009,7 +1009,7 @@ choose_cpu(const struct scheduler *ops, 
     struct csched_vcpu *svc = CSCHED_VCPU(vc);
     s_time_t min_avgload;
 
-    BUG_ON(cpus_empty(prv->active_queues));
+    BUG_ON(cpumask_empty(&prv->active_queues));
 
     /* Locking:
      * - vc->processor is already locked
@@ -1098,7 +1098,7 @@ choose_cpu(const struct scheduler *ops, 
         new_cpu = vc->processor;
     else
     {
-        BUG_ON(cpus_empty(prv->rqd[min_rqi].active));
+        BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
         new_cpu = first_cpu(prv->rqd[min_rqi].active);
     }
 
@@ -1258,9 +1258,9 @@ retry:
         if ( st.orqd->b_avgload > load_max )
             load_max = st.orqd->b_avgload;
 
-        cpus_max=cpus_weight(st.lrqd->active);
-        if ( cpus_weight(st.orqd->active) > cpus_max )
-            cpus_max = cpus_weight(st.orqd->active);
+        cpus_max = cpumask_weight(&st.lrqd->active);
+        if ( cpumask_weight(&st.orqd->active) > cpus_max )
+            cpus_max = cpumask_weight(&st.orqd->active);
 
         /* If we're under 100% capacaty, only shift if load difference
          * is > 1.  otherwise, shift if under 12.5% */
@@ -1801,7 +1801,7 @@ csched_dump(const struct scheduler *ops)
 
     printk("Active queues: %d\n"
            "\tdefault-weight     = %d\n",
-           cpus_weight(prv->active_queues),
+           cpumask_weight(&prv->active_queues),
            CSCHED_DEFAULT_WEIGHT);
     for_each_cpu_mask(i, prv->active_queues)
     {
@@ -1815,7 +1815,7 @@ csched_dump(const struct scheduler *ops)
                "\tinstload           = %d\n"
                "\taveload            = %3"PRI_stime"\n",
                i,
-               cpus_weight(prv->rqd[i].active),
+               cpumask_weight(&prv->rqd[i].active),
                prv->rqd[i].max_weight,
                prv->rqd[i].load,
                fraction);
@@ -1852,7 +1852,7 @@ static void activate_runqueue(struct csc
 
     rqd = prv->rqd + rqi;
 
-    BUG_ON(!cpus_empty(rqd->active));
+    BUG_ON(!cpumask_empty(&rqd->active));
 
     rqd->max_weight = 1;
     rqd->id = rqi;
@@ -1869,7 +1869,7 @@ static void deactivate_runqueue(struct c
 
     rqd = prv->rqd + rqi;
 
-    BUG_ON(!cpus_empty(rqd->active));
+    BUG_ON(!cpumask_empty(&rqd->active));
     
     rqd->id = -1;
 
@@ -1980,7 +1980,7 @@ csched_free_pdata(const struct scheduler
     cpu_clear(cpu, rqd->idle);
     cpu_clear(cpu, rqd->active);
 
-    if ( cpus_empty(rqd->active) )
+    if ( cpumask_empty(&rqd->active) )
     {
         printk(" No cpus left on runqueue, disabling\n");
         deactivate_runqueue(prv, rqi);
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -550,7 +550,7 @@ int cpu_disable_scheduler(unsigned int c
             vcpu_schedule_lock_irq(v);
 
             cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
-            if ( cpus_empty(online_affinity) &&
+            if ( cpumask_empty(&online_affinity) &&
                  cpumask_test_cpu(cpu, v->cpu_affinity) )
             {
                 printk("Breaking vcpu affinity for domain %d vcpu %d\n",
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h
+++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h
@@ -147,7 +147,7 @@ static inline void per_cpu_scan_finalize
        int cpu;
        int next_nid = 0;
 
-       low_cpu = cpus_weight(early_cpu_possible_map);
+       low_cpu = cpumask_weight(&early_cpu_possible_map);
 
        high_cpu = max(low_cpu, min_cpus);
        high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
--- a/xen/include/asm-ia64/linux/topology.h
+++ b/xen/include/asm-ia64/linux/topology.h
@@ -42,7 +42,7 @@
        ({                                                                      
\
                cpumask_t __tmp__;                                              
\
                __tmp__ = node_to_cpumask(node);                                
\
-               cpus_weight(__tmp__);                                           
\
+               cpumask_weight(&__tmp__);                                       
\
        })
 #endif
 
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -26,12 +26,12 @@
  * void cpumask_andnot(dst, src1, src2)        dst = src1 & ~src2
  * void cpumask_complement(dst, src)   dst = ~src
  *
- * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2)       Is mask1 a subset of mask2?
- * int cpus_empty(mask)                        Is mask empty (no bits sets)?
- * int cpus_full(mask)                 Is mask full (all bits sets)?
- * int cpus_weight(mask)               Hamming weigh - number of set bits
+ * int cpumask_equal(mask1, mask2)     Does mask1 == mask2?
+ * int cpumask_intersects(mask1, mask2)        Do mask1 and mask2 intersect?
+ * int cpumask_subset(mask1, mask2)    Is mask1 a subset of mask2?
+ * int cpumask_empty(mask)             Is mask empty (no bits sets)?
+ * int cpumask_full(mask)              Is mask full (all bits sets)?
+ * int cpumask_weight(mask)            Hamming weigh - number of set bits
  *
  * void cpumask_shift_right(dst, src, n) Shift right
  * void cpumask_shift_left(dst, src, n)        Shift left
@@ -42,9 +42,7 @@
  * int cycle_cpu(cpu, mask)            Next cpu cycling from 'cpu', or NR_CPUS
  *
  * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
- * CPU_MASK_ALL                                Initializer - all bits set
- * CPU_MASK_NONE                       Initializer - no bits set
- * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
+ * unsigned long *cpumask_bits(mask)   Array of unsigned long's in mask
  *
  * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
  * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
@@ -168,51 +166,37 @@ static inline void cpumask_complement(cp
        bitmap_complement(dstp->bits, srcp->bits, nr_cpumask_bits);
 }
 
-#define cpumask_equal(src1, src2) __cpus_equal(src1, src2, nr_cpu_ids)
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), nr_cpu_ids)
-static inline int __cpus_equal(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline int cpumask_equal(const cpumask_t *src1p,
+                               const cpumask_t *src2p)
 {
-       return bitmap_equal(src1p->bits, src2p->bits, nbits);
+       return bitmap_equal(src1p->bits, src2p->bits, nr_cpu_ids);
 }
 
-#define cpumask_intersects(src1, src2) \
-       __cpus_intersects(src1, src2, nr_cpu_ids)
-#define cpus_intersects(src1, src2) \
-       __cpus_intersects(&(src1), &(src2), nr_cpu_ids)
-static inline int __cpus_intersects(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline int cpumask_intersects(const cpumask_t *src1p,
+                                    const cpumask_t *src2p)
 {
-       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+       return bitmap_intersects(src1p->bits, src2p->bits, nr_cpu_ids);
 }
 
-#define cpumask_subset(src1, src2) __cpus_subset(src1, src2, nr_cpu_ids)
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), nr_cpu_ids)
-static inline int __cpus_subset(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline int cpumask_subset(const cpumask_t *src1p,
+                                const cpumask_t *src2p)
 {
-       return bitmap_subset(src1p->bits, src2p->bits, nbits);
+       return bitmap_subset(src1p->bits, src2p->bits, nr_cpu_ids);
 }
 
-#define cpumask_empty(src) __cpus_empty(src, nr_cpu_ids)
-#define cpus_empty(src) __cpus_empty(&(src), nr_cpu_ids)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+static inline int cpumask_empty(const cpumask_t *srcp)
 {
-       return bitmap_empty(srcp->bits, nbits);
+       return bitmap_empty(srcp->bits, nr_cpu_ids);
 }
 
-#define cpumask_full(cpumask) __cpus_full(cpumask, nr_cpu_ids)
-#define cpus_full(cpumask) __cpus_full(&(cpumask), nr_cpu_ids)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
+static inline int cpumask_full(const cpumask_t *srcp)
 {
-       return bitmap_full(srcp->bits, nbits);
+       return bitmap_full(srcp->bits, nr_cpu_ids);
 }
 
-#define cpumask_weight(cpumask) __cpus_weight(cpumask, nr_cpu_ids)
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+static inline int cpumask_weight(const cpumask_t *srcp)
 {
-       return bitmap_weight(srcp->bits, nbits);
+       return bitmap_weight(srcp->bits, nr_cpu_ids);
 }
 
 static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp)
@@ -317,7 +301,6 @@ static inline const cpumask_t *cpumask_o
 } }
 #endif /* __ia64__ */
 
-#define cpus_addr(src) ((src).bits)
 #define cpumask_bits(maskp) ((maskp)->bits)
 
 static inline int cpumask_scnprintf(char *buf, int len,
@@ -458,9 +441,9 @@ extern cpumask_t cpu_online_map;
 extern cpumask_t cpu_present_map;
 
 #if NR_CPUS > 1
-#define num_online_cpus()      cpus_weight(cpu_online_map)
-#define num_possible_cpus()    cpus_weight(cpu_possible_map)
-#define num_present_cpus()     cpus_weight(cpu_present_map)
+#define num_online_cpus()      cpumask_weight(&cpu_online_map)
+#define num_possible_cpus()    cpumask_weight(&cpu_possible_map)
+#define num_present_cpus()     cpumask_weight(&cpu_present_map)
 #define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
 #define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
 #define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)


Attachment: eliminate-cpus_xyz.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 1/6] eliminate cpus_xyz(), Jan Beulich <=