WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 5/6] eliminate first_cpu() etc

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 5/6] eliminate first_cpu() etc
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Mon, 07 Nov 2011 09:59:11 +0000
Delivery-date: Mon, 07 Nov 2011 02:13:46 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This includes the conversion from for_each_cpu_mask() to for_each-cpu().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/ia64/linux-xen/iosapic.c
+++ b/xen/arch/ia64/linux-xen/iosapic.c
@@ -704,7 +704,7 @@ get_target_cpu (unsigned int gsi, int ve
 
                cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
 
-               for_each_cpu_mask(numa_cpu, cpu_mask) {
+               for_each_cpu(numa_cpu, &cpu_mask) {
                        if (!cpu_online(numa_cpu))
                                cpumask_clear_cpu(numa_cpu, &cpu_mask);
                }
@@ -717,8 +717,8 @@ get_target_cpu (unsigned int gsi, int ve
                /* Use vector assigment to distribute across cpus in node */
                cpu_index = vector % num_cpus;
 
-               for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
-                       numa_cpu = next_cpu(numa_cpu, cpu_mask);
+               for (numa_cpu = cpumask_first(&cpu_mask) ; i < cpu_index ; i++)
+                       numa_cpu = cpumask_next(numa_cpu, &cpu_mask);
 
                if (numa_cpu != NR_CPUS)
                        return cpu_physical_id(numa_cpu);
--- a/xen/arch/ia64/linux-xen/mca.c
+++ b/xen/arch/ia64/linux-xen/mca.c
@@ -1415,7 +1415,7 @@ ia64_mca_cmc_poll (void *dummy)
 #endif
 {
        /* Trigger a CMC interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
 }
 
 /*
@@ -1505,7 +1505,7 @@ ia64_mca_cpe_poll (void *dummy)
 #endif
 {
        /* Trigger a CPE interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
 }
 
 #endif /* CONFIG_ACPI */
--- a/xen/arch/ia64/linux-xen/smp.c
+++ b/xen/arch/ia64/linux-xen/smp.c
@@ -462,7 +462,7 @@ on_selected_cpus(const cpumask_t *select
        call_data = &data;
        wmb();
 
-       for_each_cpu_mask(cpu, *selected)
+       for_each_cpu(cpu, selected)
                send_IPI_single(cpu, IPI_CALL_FUNC);
 
        while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
--- a/xen/arch/ia64/linux-xen/smpboot.c
+++ b/xen/arch/ia64/linux-xen/smpboot.c
@@ -687,9 +687,9 @@ clear_cpu_sibling_map(int cpu)
 {
        int i;
 
-       for_each_cpu_mask(i, *per_cpu(cpu_sibling_mask, cpu))
+       for_each_cpu(i, per_cpu(cpu_sibling_mask, cpu))
                cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, i));
-       for_each_cpu_mask(i, *per_cpu(cpu_core_mask, cpu))
+       for_each_cpu(i, per_cpu(cpu_core_mask, cpu))
                cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, i));
 
        cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
--- a/xen/arch/ia64/vmx/vacpi.c
+++ b/xen/arch/ia64/vmx/vacpi.c
@@ -191,7 +191,7 @@ void vacpi_init(struct domain *d)
        s->last_gtime = NOW();
 
        /* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
-       init_timer(&s->timer, pmt_timer_callback, d, first_cpu(cpu_online_map));
+       init_timer(&s->timer, pmt_timer_callback, d, 
cpumask_first(&cpu_online_map));
        pmt_timer_callback(d);
 }
 
--- a/xen/arch/ia64/xen/dom0_ops.c
+++ b/xen/arch/ia64/xen/dom0_ops.c
@@ -618,7 +618,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
         XEN_GUEST_HANDLE_64(uint32) arr;
         uint32_t i, val, max_array_ent = ti->max_cpu_index;
 
-        ti->max_cpu_index = last_cpu(cpu_online_map);
+        ti->max_cpu_index = cpumask_last(&cpu_online_map);
         max_array_ent = min(max_array_ent, ti->max_cpu_index);
 
         arr = ti->cpu_to_core;
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -501,7 +501,7 @@ int vcpu_initialise(struct vcpu *v)
 
        if (!VMX_DOMAIN(v))
                init_timer(&v->arch.hlt_timer, hlt_timer_fn, v,
-                          first_cpu(cpu_online_map));
+                          cpumask_any(&cpu_online_map));
 
        return 0;
 }
--- a/xen/arch/ia64/xen/vhpt.c
+++ b/xen/arch/ia64/xen/vhpt.c
@@ -463,7 +463,7 @@ __domain_flush_vtlb_track_entry(struct d
                                local_purge = 0;
                }
        } else {
-               for_each_cpu_mask(cpu, entry->pcpu_dirty_mask) {
+               for_each_cpu(cpu, &entry->pcpu_dirty_mask) {
                        /* Invalidate VHPT entries.  */
                        cpu_flush_vhpt_range(cpu, vaddr, 1L << ps);
 
@@ -559,7 +559,7 @@ void flush_tlb_mask(const cpumask_t *mas
     if (cpumask_subset(mask, cpumask_of(cpu)))
         return;
 
-    for_each_cpu_mask (cpu, *mask)
+    for_each_cpu (cpu, mask)
         if (cpu != smp_processor_id())
             smp_call_function_single
                 (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -251,7 +251,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mas
     cpumask_and(&target, mask, &cpuidle_mwait_flags);
 
     /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
-    for_each_cpu_mask(cpu, target)
+    for_each_cpu(cpu, &target)
         mwait_wakeup(cpu) = 0;
 
     cpumask_andnot(mask, mask, &target);
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -487,7 +487,7 @@ static int acpi_cpufreq_target(struct cp
         return -EAGAIN;
     }
 
-    for_each_cpu_mask(j, online_policy_cpus)
+    for_each_cpu(j, &online_policy_cpus)
         cpufreq_statistic_update(j, perf->state, next_perf_state);
 
     perf->state = next_perf_state;
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -130,7 +130,7 @@ static int powernow_cpufreq_target(struc
 
     on_selected_cpus(cmd.mask, transition_pstate, &cmd, 1);
 
-    for_each_cpu_mask(j, online_policy_cpus)
+    for_each_cpu(j, &online_policy_cpus)
         cpufreq_statistic_update(j, perf->state, next_perf_state);
 
     perf->state = next_perf_state;
--- a/xen/arch/x86/genapic/x2apic.c
+++ b/xen/arch/x86/genapic/x2apic.c
@@ -72,7 +72,7 @@ static void __send_IPI_mask_x2apic(
 
     local_irq_save(flags);
 
-    for_each_cpu_mask ( cpu, *cpumask )
+    for_each_cpu ( cpu, cpumask )
     {
         if ( !cpu_online(cpu) || (cpu == smp_processor_id()) )
             continue;
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -182,7 +182,7 @@ again:
     now = NOW();
 
     /* find all expired events */
-    for_each_cpu_mask(cpu, *ch->cpumask)
+    for_each_cpu(cpu, ch->cpumask)
     {
         s_time_t deadline;
 
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -125,7 +125,7 @@ static int __init __bind_irq_vector(int 
     if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED )
         return -EBUSY;
     trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, irq, vector, &online_mask);
-    for_each_cpu_mask(cpu, online_mask)
+    for_each_cpu(cpu, &online_mask)
         per_cpu(vector_irq, cpu)[vector] = irq;
     desc->arch.vector = vector;
     cpumask_copy(desc->arch.cpu_mask, &online_mask);
@@ -223,7 +223,7 @@ static void __clear_irq_vector(int irq)
     vector = desc->arch.vector;
     cpumask_and(&tmp_mask, desc->arch.cpu_mask, &cpu_online_map);
 
-    for_each_cpu_mask(cpu, tmp_mask) {
+    for_each_cpu(cpu, &tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[vector] == irq );
         per_cpu(vector_irq, cpu)[vector] = -1;
     }
@@ -248,7 +248,7 @@ static void __clear_irq_vector(int irq)
     old_vector = desc->arch.old_vector;
     cpumask_and(&tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map);
 
-    for_each_cpu_mask(cpu, tmp_mask) {
+    for_each_cpu(cpu, &tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq );
         TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
         per_cpu(vector_irq, cpu)[old_vector] = -1;
@@ -455,7 +455,7 @@ static int __assign_irq_vector(
     else
         irq_used_vectors = irq_get_used_vector_mask(irq);
 
-    for_each_cpu_mask(cpu, *mask) {
+    for_each_cpu(cpu, mask) {
         int new_cpu;
         int vector, offset;
 
@@ -485,7 +485,7 @@ next:
             && test_bit(vector, irq_used_vectors) )
             goto next;
 
-        for_each_cpu_mask(new_cpu, tmp_mask)
+        for_each_cpu(new_cpu, &tmp_mask)
             if (per_cpu(vector_irq, new_cpu)[vector] != -1)
                 goto next;
         /* Found one! */
@@ -497,7 +497,7 @@ next:
             desc->arch.old_vector = desc->arch.vector;
         }
         trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
-        for_each_cpu_mask(new_cpu, tmp_mask)
+        for_each_cpu(new_cpu, &tmp_mask)
             per_cpu(vector_irq, new_cpu)[vector] = irq;
         desc->arch.vector = vector;
         cpumask_copy(desc->arch.cpu_mask, &tmp_mask);
--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -125,7 +125,7 @@ static long do_microcode_update(void *_i
     if ( error )
         info->error = error;
 
-    info->cpu = next_cpu(info->cpu, cpu_online_map);
+    info->cpu = cpumask_next(info->cpu, &cpu_online_map);
     if ( info->cpu < nr_cpu_ids )
         return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
 
@@ -158,7 +158,7 @@ int microcode_update(XEN_GUEST_HANDLE(co
 
     info->buffer_size = len;
     info->error = 0;
-    info->cpu = first_cpu(cpu_online_map);
+    info->cpu = cpumask_first(&cpu_online_map);
 
     return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
 }
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -366,7 +366,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
             goto out;
         guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
 
-        for_each_cpu_mask ( cpu, *cpumap )
+        for_each_cpu ( cpu, cpumap )
         {
             if ( idle_vcpu[cpu] == NULL )
                 cpumask_clear_cpu(cpu, cpumap);
@@ -460,7 +460,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
                 g_info->flags |= XEN_PCPU_FLAGS_ONLINE;
         }
 
-        g_info->max_present = last_cpu(cpu_present_map);
+        g_info->max_present = cpumask_last(&cpu_present_map);
 
         put_cpu_maps();
 
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -229,9 +229,9 @@ static void __init normalise_cpu_order(v
          * Find remaining CPU with longest-prefix match on APIC ID.
          * Among identical longest-prefix matches, pick the smallest APIC ID.
          */
-        for ( j = next_cpu(i, cpu_present_map);
+        for ( j = cpumask_next(i, &cpu_present_map);
               j < nr_cpu_ids;
-              j = next_cpu(j, cpu_present_map) )
+              j = cpumask_next(j, &cpu_present_map) )
         {
             diff = x86_cpu_to_apicid[j] ^ apicid;
             while ( diff & (diff-1) )
@@ -248,12 +248,12 @@ static void __init normalise_cpu_order(v
         /* If no match then there must be no CPUs remaining to consider. */
         if ( min_cpu >= nr_cpu_ids )
         {
-            BUG_ON(next_cpu(i, cpu_present_map) < nr_cpu_ids);
+            BUG_ON(cpumask_next(i, &cpu_present_map) < nr_cpu_ids);
             break;
         }
 
         /* Switch the best-matching CPU with the next CPU in logical order. */
-        j = next_cpu(i, cpu_present_map);
+        j = cpumask_next(i, &cpu_present_map);
         apicid = x86_cpu_to_apicid[min_cpu];
         x86_cpu_to_apicid[min_cpu] = x86_cpu_to_apicid[j];
         x86_cpu_to_apicid[j] = apicid;
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -182,7 +182,7 @@ void send_IPI_mask_phys(const cpumask_t 
 
     local_irq_save(flags);
 
-    for_each_cpu_mask ( query_cpu, *mask )
+    for_each_cpu ( query_cpu, mask )
     {
         if ( !cpu_online(query_cpu) || (query_cpu == smp_processor_id()) )
             continue;
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -248,7 +248,7 @@ static void set_cpu_sibling_map(int cpu)
 
     if ( c[cpu].x86_num_siblings > 1 )
     {
-        for_each_cpu_mask ( i, cpu_sibling_setup_map )
+        for_each_cpu ( i, &cpu_sibling_setup_map )
         {
             if ( cpu_has(c, X86_FEATURE_TOPOEXT) ) {
                 if ( (c[cpu].phys_proc_id == c[i].phys_proc_id) &&
@@ -273,7 +273,7 @@ static void set_cpu_sibling_map(int cpu)
         return;
     }
 
-    for_each_cpu_mask ( i, cpu_sibling_setup_map )
+    for_each_cpu ( i, &cpu_sibling_setup_map )
     {
         if ( c[cpu].phys_proc_id == c[i].phys_proc_id )
         {
@@ -814,7 +814,7 @@ remove_siblinginfo(int cpu)
     int sibling;
     struct cpuinfo_x86 *c = cpu_data;
 
-    for_each_cpu_mask ( sibling, *per_cpu(cpu_core_mask, cpu) )
+    for_each_cpu ( sibling, per_cpu(cpu_core_mask, cpu) )
     {
         cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, sibling));
         /* Last thread sibling in this cpu core going down. */
@@ -822,7 +822,7 @@ remove_siblinginfo(int cpu)
             c[sibling].booted_cores--;
     }
    
-    for_each_cpu_mask(sibling, *per_cpu(cpu_sibling_mask, cpu))
+    for_each_cpu(sibling, per_cpu(cpu_sibling_mask, cpu))
         cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, sibling));
     cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
     cpumask_clear(per_cpu(cpu_core_mask, cpu));
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -103,7 +103,7 @@ long arch_do_sysctl(
         uint32_t i, max_cpu_index, last_online_cpu;
         xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
 
-        last_online_cpu = last_cpu(cpu_online_map);
+        last_online_cpu = cpumask_last(&cpu_online_map);
         max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
         ti->max_cpu_index = last_online_cpu;
 
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -205,7 +205,7 @@ void enable_nonboot_cpus(void)
 
     printk("Enabling non-boot CPUs  ...\n");
 
-    for_each_cpu_mask ( cpu, frozen_cpus )
+    for_each_cpu ( cpu, &frozen_cpus )
     {
         if ( (error = cpu_up(cpu)) )
         {
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -494,7 +494,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
                         op->cpupool_id, cpu);
         spin_lock(&cpupool_lock);
         if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
-            cpu = first_cpu(cpupool_free_cpus);
+            cpu = cpumask_first(&cpupool_free_cpus);
         ret = -EINVAL;
         if ( cpu >= nr_cpu_ids )
             goto addcpu_out;
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -190,7 +190,7 @@ static unsigned int default_vcpu0_locati
     cpu = cpumask_first(&cpu_exclude_map);
     if ( cpumask_weight(&cpu_exclude_map) > 1 )
         cpu = cpumask_next(cpu, &cpu_exclude_map);
-    for_each_cpu_mask(i, *online)
+    for_each_cpu(i, online)
     {
         if ( cpumask_test_cpu(i, &cpu_exclude_map) )
             continue;
@@ -541,7 +541,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
 
             cpu = (i == 0) ?
                 default_vcpu0_location(online) :
-                cycle_cpu(d->vcpu[i-1]->processor, *online);
+                cpumask_cycle(d->vcpu[i-1]->processor, online);
 
             if ( alloc_vcpu(d, i, cpu) == NULL )
                 goto maxvcpu_out;
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -128,7 +128,7 @@ static void dump_registers(unsigned char
         return;
 
     /* Normal handling: synchronously dump the remaining CPUs' states. */
-    for_each_cpu_mask ( cpu, dump_execstate_mask )
+    for_each_cpu ( cpu, &dump_execstate_mask )
     {
         smp_send_state_dump(cpu);
         while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
--- a/xen/common/perfc.c
+++ b/xen/common/perfc.c
@@ -211,14 +211,14 @@ static int perfc_copy_info(XEN_GUEST_HAN
         {
         case TYPE_SINGLE:
         case TYPE_S_SINGLE:
-            for_each_cpu_mask ( cpu, perfc_cpumap )
+            for_each_cpu ( cpu, &perfc_cpumap )
                 perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
             ++j;
             break;
         case TYPE_ARRAY:
         case TYPE_S_ARRAY:
             memset(perfc_vals + v, 0, perfc_d[i].nr_vals * 
sizeof(*perfc_vals));
-            for_each_cpu_mask ( cpu, perfc_cpumap )
+            for_each_cpu ( cpu, &perfc_cpumap )
             {
                 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
                 unsigned int k;
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -521,7 +521,7 @@ runq_tickle(const struct scheduler *ops,
     cpumask_andnot(&mask, &rqd->active, &rqd->idle);
     cpumask_andnot(&mask, &mask, &rqd->tickled);
 
-    for_each_cpu_mask(i, mask)
+    for_each_cpu(i, &mask)
     {
         struct csched_vcpu * cur;
 
@@ -1051,7 +1051,7 @@ choose_cpu(const struct scheduler *ops, 
         else
         {
             d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id);
-            new_cpu = first_cpu(svc->migrate_rqd->active);
+            new_cpu = cpumask_first(&svc->migrate_rqd->active);
             goto out_up;
         }
     }
@@ -1061,7 +1061,7 @@ choose_cpu(const struct scheduler *ops, 
     min_avgload = MAX_LOAD;
 
     /* Find the runqueue with the lowest instantaneous load */
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         struct csched_runqueue_data *rqd;
         s_time_t rqd_avgload;
@@ -1099,7 +1099,7 @@ choose_cpu(const struct scheduler *ops, 
     else
     {
         BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
-        new_cpu = first_cpu(prv->rqd[min_rqi].active);
+        new_cpu = cpumask_first(&prv->rqd[min_rqi].active);
     }
 
 out_up:
@@ -1179,7 +1179,7 @@ void migrate(const struct scheduler *ops
             on_runq=1;
         }
         __runq_deassign(svc);
-        svc->vcpu->processor = first_cpu(trqd->active);
+        svc->vcpu->processor = cpumask_first(&trqd->active);
         __runq_assign(svc, trqd);
         if ( on_runq )
         {
@@ -1219,7 +1219,7 @@ retry:
 
     st.load_delta = 0;
 
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         s_time_t delta;
         
@@ -1618,7 +1618,7 @@ csched_schedule(
         {
             int rq;
             other_rqi = -2;
-            for_each_cpu_mask ( rq, CSCHED_PRIV(ops)->active_queues )
+            for_each_cpu ( rq, &CSCHED_PRIV(ops)->active_queues )
             {
                 if ( scurr->rqd == &CSCHED_PRIV(ops)->rqd[rq] )
                 {
@@ -1803,7 +1803,7 @@ csched_dump(const struct scheduler *ops)
            "\tdefault-weight     = %d\n",
            cpumask_weight(&prv->active_queues),
            CSCHED_DEFAULT_WEIGHT);
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         s_time_t fraction;
         
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -442,7 +442,7 @@ static int sedf_pick_cpu(const struct sc
 
     online = SEDF_CPUONLINE(v->domain->cpupool);
     cpumask_and(&online_affinity, v->cpu_affinity, online);
-    return first_cpu(online_affinity);
+    return cpumask_first(&online_affinity);
 }
 
 /*
@@ -1322,7 +1322,7 @@ static int sedf_adjust_weights(struct cp
 {
     struct vcpu *p;
     struct domain      *d;
-    unsigned int        cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
+    unsigned int        cpu, nr_cpus = cpumask_last(&cpu_online_map) + 1;
     int                *sumw = xzalloc_array(int, nr_cpus);
     s_time_t           *sumt = xzalloc_array(s_time_t, nr_cpus);
 
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1450,7 +1450,7 @@ void schedule_dump(struct cpupool *c)
     printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
     SCHED_OP(sched, dump_settings);
 
-    for_each_cpu_mask (i, *cpus)
+    for_each_cpu (i, cpus)
     {
         pcpu_schedule_lock(i);
         printk("CPU[%02d] ", i);
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -74,7 +74,7 @@ void cpumask_raise_softirq(const cpumask
     cpumask_t send_mask;
 
     cpumask_clear(&send_mask);
-    for_each_cpu_mask(cpu, *mask)
+    for_each_cpu(cpu, mask)
         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
             cpumask_set_cpu(cpu, &send_mask);
 
--- a/xen/common/stop_machine.c
+++ b/xen/common/stop_machine.c
@@ -101,7 +101,7 @@ int stop_machine_run(int (*fn)(void *), 
 
     smp_wmb();
 
-    for_each_cpu_mask ( i, allbutself )
+    for_each_cpu ( i, &allbutself )
         tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
 
     stopmachine_set_state(STOPMACHINE_PREPARE);
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -548,7 +548,7 @@ static struct keyhandler dump_timerq_key
 
 static void migrate_timers_from_cpu(unsigned int old_cpu)
 {
-    unsigned int new_cpu = first_cpu(cpu_online_map);
+    unsigned int new_cpu = cpumask_any(&cpu_online_map);
     struct timers *old_ts, *new_ts;
     struct timer *t;
     bool_t notify = 0;
--- a/xen/drivers/acpi/pmstat.c
+++ b/xen/drivers/acpi/pmstat.c
@@ -223,7 +223,7 @@ static int get_cpufreq_para(struct xen_s
 
     if ( !(affected_cpus = xzalloc_array(uint32_t, op->u.get_para.cpu_num)) )
         return -ENOMEM;
-    for_each_cpu_mask(cpu, *policy->cpus)
+    for_each_cpu(cpu, policy->cpus)
         affected_cpus[j++] = cpu;
     ret = copy_to_guest(op->u.get_para.affected_cpus,
                        affected_cpus, op->u.get_para.cpu_num);
--- a/xen/drivers/cpufreq/cpufreq_ondemand.c
+++ b/xen/drivers/cpufreq/cpufreq_ondemand.c
@@ -122,7 +122,7 @@ static void dbs_check_cpu(struct cpu_dbs
         return;
 
     /* Get Idle Time */
-    for_each_cpu_mask(j, *policy->cpus) {
+    for_each_cpu(j, policy->cpus) {
         uint64_t idle_ns, total_idle_ns;
         uint64_t load, load_freq, freq_avg;
         struct cpu_dbs_info_s *j_dbs_info;
@@ -233,7 +233,7 @@ int cpufreq_governor_dbs(struct cpufreq_
 
         dbs_enable++;
 
-        for_each_cpu_mask(j, *policy->cpus) {
+        for_each_cpu(j, policy->cpus) {
             struct cpu_dbs_info_s *j_dbs_info;
             j_dbs_info = &per_cpu(cpu_dbs_info, j);
             j_dbs_info->cur_policy = policy;
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1033,7 +1033,7 @@ static void dma_msi_set_affinity(struct 
     msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
     msg.address_lo |= MSI_PHYSICAL_MODE << 2;
     msg.address_lo |= MSI_REDIRECTION_HINT_MODE << 3;
-    dest = cpu_physical_id(first_cpu(mask));
+    dest = cpu_physical_id(cpumask_first(mask));
     msg.address_lo |= dest << MSI_TARGET_CPU_SHIFT;
 #endif
 
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h
+++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h
@@ -139,7 +139,7 @@ extern int __initdata nid_to_pxm_map[MAX
 #ifdef CONFIG_ACPI_NUMA
 extern cpumask_t early_cpu_possible_map;
 #define for_each_possible_early_cpu(cpu)  \
-       for_each_cpu_mask((cpu), early_cpu_possible_map)
+       for_each_cpu(cpu, &early_cpu_possible_map)
 
 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
 {
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -52,7 +52,7 @@ static inline int NEED_FLUSH(u32 cpu_sta
 #define tlbflush_filter(mask, page_timestamp)                           \
 do {                                                                    \
     unsigned int cpu;                                                   \
-    for_each_cpu_mask ( cpu, mask )                                     \
+    for_each_cpu ( cpu, &(mask) )                                       \
         if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) \
             cpumask_clear_cpu(cpu, &(mask));                            \
 } while ( 0 )
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -37,18 +37,19 @@
  * void cpumask_shift_right(dst, src, n) Shift right
  * void cpumask_shift_left(dst, src, n)        Shift left
  *
- * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
- * int last_cpu(mask)                  Number highest set bit, or NR_CPUS
- * int cycle_cpu(cpu, mask)            Next cpu cycling from 'cpu', or NR_CPUS
+ * int cpumask_first(mask)             Number lowest set bit, or NR_CPUS
+ * int cpumask_next(cpu, mask)         Next cpu past 'cpu', or NR_CPUS
+ * int cpumask_last(mask)              Number highest set bit, or NR_CPUS
+ * int cpumask_any(mask)               Any cpu in mask, or NR_CPUS
+ * int cpumask_cycle(cpu, mask)                Next cpu cycling from 'cpu', or 
NR_CPUS
  *
- * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
+ * const cpumask_t *cpumask_of(cpu)    Return cpumask with bit 'cpu' set
  * unsigned long *cpumask_bits(mask)   Array of unsigned long's in mask
  *
  * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
  * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
  *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
+ * for_each_cpu(cpu, mask)             for-loop cpu over mask
  *
  * int num_online_cpus()               Number of online CPUs
  * int num_possible_cpus()             Number of all possible CPUs
@@ -210,42 +211,43 @@ static inline void cpumask_shift_left(cp
        bitmap_shift_left(dstp->bits, srcp->bits, n, nr_cpumask_bits);
 }
 
-#define cpumask_first(src) __first_cpu(src, nr_cpu_ids)
-#define first_cpu(src) __first_cpu(&(src), nr_cpu_ids)
-static inline int __first_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_first(const cpumask_t *srcp)
 {
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+       return min_t(int, nr_cpu_ids, find_first_bit(srcp->bits, nr_cpu_ids));
 }
 
-#define cpumask_next(n, src) __next_cpu(n, src, nr_cpu_ids)
-#define next_cpu(n, src) __next_cpu((n), &(src), nr_cpu_ids)
-static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
+static inline int cpumask_next(int n, const cpumask_t *srcp)
 {
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpumask_check(n);
+
+       return min_t(int, nr_cpu_ids,
+                     find_next_bit(srcp->bits, nr_cpu_ids, n + 1));
 }
 
-#define cpumask_last(src) __last_cpu(src, nr_cpu_ids)
-#define last_cpu(src) __last_cpu(&(src), nr_cpu_ids)
-static inline int __last_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_last(const cpumask_t *srcp)
 {
-       int cpu, pcpu = nbits;
-       for (cpu = __first_cpu(srcp, nbits);
-            cpu < nbits;
-            cpu = __next_cpu(cpu, srcp, nbits))
+       int cpu, pcpu = nr_cpu_ids;
+
+       for (cpu = cpumask_first(srcp);
+            cpu < nr_cpu_ids;
+            cpu = cpumask_next(cpu, srcp))
                pcpu = cpu;
        return pcpu;
 }
 
-#define cpumask_cycle(n, src) __cycle_cpu(n, src, nr_cpu_ids)
-#define cycle_cpu(n, src) __cycle_cpu((n), &(src), nr_cpu_ids)
-static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
-{
-    int nxt = __next_cpu(n, srcp, nbits);
-    if (nxt == nbits)
-        nxt = __first_cpu(srcp, nbits);
+static inline int cpumask_cycle(int n, const cpumask_t *srcp)
+{
+    int nxt = cpumask_next(n, srcp);
+
+    if (nxt == nr_cpu_ids)
+        nxt = cpumask_first(srcp);
     return nxt;
 }
 
+#define cpumask_any(srcp) cpumask_first(srcp)
+
 /*
  * Special-case data structure for "single bit set only" constant CPU masks.
  *
@@ -262,8 +264,6 @@ static inline const cpumask_t *cpumask_o
        return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
 }
 
-#define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
-
 #if defined(__ia64__) /* XXX needs cleanup */
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
@@ -366,12 +366,13 @@ static inline void free_cpumask_var(cpum
 #endif
 
 #if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < nr_cpu_ids;             \
-               (cpu) = next_cpu((cpu), (mask)))
+#define for_each_cpu(cpu, mask)                        \
+       for ((cpu) = cpumask_first(mask);       \
+            (cpu) < nr_cpu_ids;                \
+            (cpu) = cpumask_next(cpu, mask))
 #else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_cpu(cpu, mask)                        \
+       for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)(mask))
 #endif /* NR_CPUS */
 
 /*
@@ -458,18 +459,9 @@ extern cpumask_t cpu_present_map;
     ((present) ? cpumask_set_cpu(cpu, &cpu_present_map) \
                : cpumask_clear_cpu(cpu, &cpu_present_map))
 
-#define any_online_cpu(mask)                   \
-({                                             \
-       int cpu;                                \
-       for_each_cpu_mask(cpu, (mask))          \
-               if (cpu_online(cpu))            \
-                       break;                  \
-       cpu;                                    \
-})
-
-#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)   for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu)  for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu(cpu, &cpu_possible_map)
+#define for_each_online_cpu(cpu)   for_each_cpu(cpu, &cpu_online_map)
+#define for_each_present_cpu(cpu)  for_each_cpu(cpu, &cpu_present_map)
 
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;


Attachment: eliminate-first_cpu-etc.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 5/6] eliminate first_cpu() etc, Jan Beulich <=