WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2/6] eliminate cpu_test_xyz()

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 2/6] eliminate cpu_test_xyz()
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Mon, 07 Nov 2011 09:56:57 +0000
Delivery-date: Mon, 07 Nov 2011 02:04:34 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/ia64/linux-xen/smp.c
+++ b/xen/arch/ia64/linux-xen/smp.c
@@ -68,7 +68,7 @@ void smp_send_event_check_mask(const cpu
     //printf("smp_send_event_check_mask called\n");
 
     for (cpu = 0; cpu < NR_CPUS; ++cpu)
-        if (cpu_isset(cpu, *mask) && cpu != smp_processor_id())
+        if (cpumask_test_cpu(cpu, mask) && cpu != smp_processor_id())
            platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
 #endif
--- a/xen/arch/ia64/linux-xen/smpboot.c
+++ b/xen/arch/ia64/linux-xen/smpboot.c
@@ -548,13 +548,13 @@ do_rest:
         */
        Dprintk("Waiting on callin_map ...");
        for (timeout = 0; timeout < 100000; timeout++) {
-               if (cpu_isset(cpu, cpu_callin_map))
+               if (cpumask_test_cpu(cpu, &cpu_callin_map))
                        break;  /* It has booted */
                udelay(100);
        }
        Dprintk("\n");
 
-       if (!cpu_isset(cpu, cpu_callin_map)) {
+       if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
                printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, 
sapicid);
                ia64_cpu_to_sapicid[cpu] = -1;
                cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
@@ -818,7 +818,7 @@ __cpu_up (unsigned int cpu)
         * Already booted cpu? not valid anymore since we dont
         * do idle loop tightspin anymore.
         */
-       if (cpu_isset(cpu, cpu_callin_map))
+       if (cpumask_test_cpu(cpu, &cpu_callin_map))
                return -EINVAL;
 
        if (!per_cpu(cpu_sibling_mask, cpu) &&
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -140,7 +140,7 @@ static void flush_cache_for_context_swit
 
        if (is_idle_vcpu(next) ||
            __test_and_clear_bit(cpu, &next->arch.cache_coherent_map)) {
-               if (cpu_test_and_clear(cpu, cpu_cache_coherent_map)) {
+               if (cpumask_test_and_clear_cpu(cpu, &cpu_cache_coherent_map)) {
                        unsigned long flags;
                        u64 progress = 0;
                        s64 status;
--- a/xen/arch/ia64/xen/regionreg.c
+++ b/xen/arch/ia64/xen/regionreg.c
@@ -320,8 +320,8 @@ int set_one_rr_efi(unsigned long rr, uns
        else {
                if (current && VMX_DOMAIN(current))
                        vpd = __get_cpu_var(inserted_vpd);
-               ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
-                                percpu_set), vpd);
+               ia64_new_rr7_efi(val, cpumask_test_cpu(smp_processor_id(),
+                                                      &percpu_set), vpd);
        }
 
        return 1;
@@ -342,8 +342,8 @@ set_one_rr_efi_restore(unsigned long rr,
                   and strcut domain are initialized. */
                if (unlikely(current == NULL || current->domain == NULL ||
                             is_idle_vcpu(current)))
-                       ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
-                                                       percpu_set),
+                       ia64_new_rr7_efi(val, 
cpumask_test_cpu(smp_processor_id(),
+                                                              &percpu_set),
                                         0UL);
                else if (VMX_DOMAIN(current))
                        __vmx_switch_rr7_vcpu(current, val);
--- a/xen/arch/ia64/xen/vhpt.c
+++ b/xen/arch/ia64/xen/vhpt.c
@@ -553,7 +553,7 @@ void flush_tlb_mask(const cpumask_t *mas
     int cpu;
 
     cpu = smp_processor_id();
-    if (cpu_isset(cpu, *mask))
+    if (cpumask_test_cpu(cpu, mask))
         flush_tlb_vhpt_all (NULL);
 
     if (cpumask_subset(mask, cpumask_of(cpu)))
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -624,7 +624,7 @@ void __cpuinit cpu_init(void)
                .limit = LAST_RESERVED_GDT_BYTE
        };
 
-       if (cpu_test_and_set(cpu, cpu_initialized)) {
+       if (cpumask_test_and_set_cpu(cpu, &cpu_initialized)) {
                printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
                for (;;) local_irq_enable();
        }
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -234,7 +234,7 @@ void flush_area_mask(const cpumask_t *ma
 {
     ASSERT(local_irq_is_enabled());
 
-    if ( cpu_isset(smp_processor_id(), *mask) )
+    if ( cpumask_test_cpu(smp_processor_id(), mask) )
         flush_area_local(va, flags);
 
     if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -138,7 +138,7 @@ static void synchronize_tsc_master(unsig
         return;
 
     if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
-         !cpu_isset(slave, tsc_sync_cpu_mask) )
+         !cpumask_test_cpu(slave, &tsc_sync_cpu_mask) )
         return;
 
     for ( i = 1; i <= 5; i++ )
@@ -162,7 +162,7 @@ static void synchronize_tsc_slave(unsign
         return;
 
     if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
-         !cpu_isset(slave, tsc_sync_cpu_mask) )
+         !cpumask_test_cpu(slave, &tsc_sync_cpu_mask) )
         return;
 
     for ( i = 1; i <= 5; i++ )
@@ -956,7 +956,7 @@ int __cpu_up(unsigned int cpu)
         return ret;
 
     set_cpu_state(CPU_STATE_ONLINE);
-    while ( !cpu_isset(cpu, cpu_online_map) )
+    while ( !cpu_online(cpu) )
     {
         cpu_relax();
         process_pending_softirqs();
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1580,7 +1580,7 @@ void pit_broadcast_exit(void)
 {
     int cpu = smp_processor_id();
 
-    if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
+    if ( cpumask_test_and_clear_cpu(cpu, &pit_broadcast_mask) )
         reprogram_timer(this_cpu(timer_deadline));
 }
 
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -299,7 +299,7 @@ int cpupool_unassign_cpu(struct cpupool 
     ret = -EBUSY;
     if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
         goto out;
-    if ( cpu_isset(cpu, cpupool_locked_cpus) )
+    if ( cpumask_test_cpu(cpu, &cpupool_locked_cpus) )
         goto out;
 
     ret = 0;
@@ -499,7 +499,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
         if ( cpu >= nr_cpu_ids )
             goto addcpu_out;
         ret = -EBUSY;
-        if ( !cpu_isset(cpu, cpupool_free_cpus) )
+        if ( !cpumask_test_cpu(cpu, &cpupool_free_cpus) )
             goto addcpu_out;
         c = cpupool_find_by_id(op->cpupool_id);
         ret = -ENOENT;
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -169,7 +169,7 @@ void kexec_crash_save_cpu(void)
     ELF_Prstatus *prstatus;
     crash_xen_core_t *xencore;
 
-    if ( cpu_test_and_set(cpu, crash_saved_cpus) )
+    if ( cpumask_test_and_set_cpu(cpu, &crash_saved_cpus) )
         return;
 
     prstatus = (ELF_Prstatus *)ELFNOTE_DESC(note);
@@ -187,7 +187,7 @@ crash_xen_info_t *kexec_crash_save_info(
     crash_xen_info_t info;
     crash_xen_info_t *out = (crash_xen_info_t *)ELFNOTE_DESC(xen_crash_note);
 
-    BUG_ON(!cpu_test_and_set(cpu, crash_saved_cpus));
+    BUG_ON(!cpumask_test_and_set_cpu(cpu, &crash_saved_cpus));
 
     memset(&info, 0, sizeof(info));
     info.xen_major_version = xen_major_version();
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1366,7 +1366,7 @@ csched_vcpu_migrate(
     struct csched_runqueue_data *trqd;
 
     /* Check if new_cpu is valid */
-    BUG_ON(!cpu_isset(new_cpu, CSCHED_PRIV(ops)->initialized));
+    BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED_PRIV(ops)->initialized));
 
     trqd = RQD(ops, new_cpu);
 
@@ -1602,10 +1602,10 @@ csched_schedule(
              scurr->vcpu->vcpu_id,
              now);
 
-    BUG_ON(!cpu_isset(cpu, CSCHED_PRIV(ops)->initialized));
+    BUG_ON(!cpumask_test_cpu(cpu, &CSCHED_PRIV(ops)->initialized));
 
     rqd = RQD(ops, cpu);
-    BUG_ON(!cpu_isset(cpu, rqd->active));
+    BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
     /* Protected by runqueue lock */        
 
@@ -1637,7 +1637,7 @@ csched_schedule(
     BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
 
     /* Clear "tickled" bit now that we've been scheduled */
-    if ( cpu_isset(cpu, rqd->tickled) )
+    if ( cpumask_test_cpu(cpu, &rqd->tickled) )
         cpu_clear(cpu, rqd->tickled);
 
     /* Update credits */
@@ -1708,7 +1708,7 @@ csched_schedule(
         }
 
         /* Clear the idle mask if necessary */
-        if ( cpu_isset(cpu, rqd->idle) )
+        if ( cpumask_test_cpu(cpu, &rqd->idle) )
             cpu_clear(cpu, rqd->idle);
 
         snext->start_time = now;
@@ -1724,7 +1724,7 @@ csched_schedule(
     else
     {
         /* Update the idle mask if necessary */
-        if ( !cpu_isset(cpu, rqd->idle) )
+        if ( !cpumask_test_cpu(cpu, &rqd->idle) )
             cpu_set(cpu, rqd->idle);
         /* Make sure avgload gets updated periodically even
          * if there's no activity */
@@ -1885,7 +1885,7 @@ static void init_pcpu(const struct sched
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    if ( cpu_isset(cpu, prv->initialized) )
+    if ( cpumask_test_cpu(cpu, &prv->initialized) )
     {
         printk("%s: Strange, cpu %d already initialized!\n", __func__, cpu);
         spin_unlock_irqrestore(&prv->lock, flags);
@@ -1912,7 +1912,7 @@ static void init_pcpu(const struct sched
     rqd=prv->rqd + rqi;
 
     printk("Adding cpu %d to runqueue %d\n", cpu, rqi);
-    if ( ! cpu_isset(rqi, prv->active_queues) )
+    if ( ! cpumask_test_cpu(rqi, &prv->active_queues) )
     {
         printk(" First cpu on runqueue, activating\n");
         activate_runqueue(prv, rqi);
@@ -1963,7 +1963,7 @@ csched_free_pdata(const struct scheduler
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    BUG_ON( !cpu_isset(cpu, prv->initialized));
+    BUG_ON(!cpumask_test_cpu(cpu, &prv->initialized));
     
     /* Find the old runqueue and remove this cpu from it */
     rqi = prv->runq_map[cpu];
@@ -1973,7 +1973,7 @@ csched_free_pdata(const struct scheduler
     /* No need to save IRQs here, they're already disabled */
     spin_lock(&rqd->lock);
 
-    BUG_ON(!cpu_isset(cpu, rqd->idle));
+    BUG_ON(!cpumask_test_cpu(cpu, &rqd->idle));
 
     printk("Removing cpu %d from runqueue %d\n", cpu, rqi);
 
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -820,7 +820,7 @@ static struct task_slice sedf_do_schedul
     /* Tasklet work (which runs in idle VCPU context) overrides all else. */
     if ( tasklet_work_scheduled ||
          (list_empty(runq) && list_empty(waitq)) ||
-         unlikely(!cpu_isset(cpu, *SEDF_CPUONLINE(per_cpu(cpupool, cpu)))) )
+         unlikely(!cpumask_test_cpu(cpu, SEDF_CPUONLINE(per_cpu(cpupool, 
cpu)))) )
     {
         ret.task = IDLETASK(cpu);
         ret.time = SECONDS(1);
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -323,7 +323,7 @@ int trace_will_trace_event(u32 event)
                 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
         return 0;
 
-    if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
+    if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) )
         return 0;
 
     return 1;
@@ -711,7 +711,7 @@ void __trace_var(u32 event, bool_t cycle
                 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
         return;
 
-    if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
+    if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) )
         return;
 
     /* Read tb_init_done /before/ t_bufs. */
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -19,6 +19,7 @@
  * void cpumask_clear(mask)            clear all bits
  * int cpumask_test_cpu(cpu, mask)     true iff bit 'cpu' set in mask
  * int cpumask_test_and_set_cpu(cpu, mask) test and set bit 'cpu' in mask
+ * int cpumask_test_and_clear_cpu(cpu, mask) test and clear bit 'cpu' in mask
  *
  * void cpumask_and(dst, src1, src2)   dst = src1 & src2  [intersection]
  * void cpumask_or(dst, src1, src2)    dst = src1 | src2  [union]
@@ -64,12 +65,12 @@
  * for_each_present_cpu(cpu)           for-loop cpu over cpu_present_map
  *
  * Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
+ * 1) The 'type-checked' form of cpumask_test_cpu() causes gcc (3.3.2, anyway)
  *    to generate slightly worse code.  Note for example the additional
  *    40 lines of assembly code compiling the "for each possible cpu"
  *    loops buried in the disk_stat_read() macros calls when compiling
  *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
- *    one-line #define for cpu_isset(), instead of wrapping an inline
+ *    one-line #define for cpumask_test_cpu(), instead of wrapping an inline
  *    inside a macro, the way we do the other calls.
  */
 
@@ -121,17 +122,12 @@ static inline void cpumask_clear(cpumask
 /* No static inline type checking - see Subtlety (1) above. */
 #define cpumask_test_cpu(cpu, cpumask) \
        test_bit(cpumask_check(cpu), (cpumask)->bits)
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
 
-#define cpu_test_and_set(cpu, cpumask) \
-       cpumask_test_and_set_cpu(cpu, &(cpumask))
 static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
 {
        return test_and_set_bit(cpumask_check(cpu), addr->bits);
 }
 
-#define cpu_test_and_clear(cpu, cpumask) \
-       cpumask_test_and_clear_cpu(cpu, &(cpumask))
 static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
 {
        return test_and_clear_bit(cpumask_check(cpu), addr->bits);
@@ -444,9 +440,9 @@ extern cpumask_t cpu_present_map;
 #define num_online_cpus()      cpumask_weight(&cpu_online_map)
 #define num_possible_cpus()    cpumask_weight(&cpu_possible_map)
 #define num_present_cpus()     cpumask_weight(&cpu_present_map)
-#define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
-#define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
-#define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
+#define cpu_online(cpu)                cpumask_test_cpu(cpu, &cpu_online_map)
+#define cpu_possible(cpu)      cpumask_test_cpu(cpu, &cpu_possible_map)
+#define cpu_present(cpu)       cpumask_test_cpu(cpu, &cpu_present_map)
 #else
 #define num_online_cpus()      1
 #define num_possible_cpus()    1


Attachment: eliminate-cpu_test_xyz.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 2/6] eliminate cpu_test_xyz(), Jan Beulich <=