WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] remove unused parameter of on_selected_cpus() & Co

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] remove unused parameter of on_selected_cpus() & Co
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Mon, 25 May 2009 14:35:09 +0100
Delivery-date: Mon, 25 May 2009 06:35:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Remove the unused "retry" parameter of on_selected_cpus(),
on_each_cpu(), smp_call_function(), and smp_call_function_single().

IA64 changes compile-tested only.

Will only apply cleanly with the earlier sent cpumask passing patch
applied first.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2009-05-19.orig/xen/arch/ia64/linux-xen/mca.c       2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/ia64/linux-xen/mca.c    2009-05-25 08:57:39.000000000 
+0200
@@ -956,7 +956,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
 static void
 ia64_mca_cmc_vector_disable_keventd(void *unused)
 {
-       on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
+       on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
 }
 
 /*
@@ -968,7 +968,7 @@ ia64_mca_cmc_vector_disable_keventd(void
 static void
 ia64_mca_cmc_vector_enable_keventd(void *unused)
 {
-       on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
+       on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
 }
 #endif /* !XEN */
 
--- 2009-05-19.orig/xen/arch/ia64/linux-xen/perfmon.c   2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/ia64/linux-xen/perfmon.c        2009-05-25 
09:06:23.000000000 +0200
@@ -1895,7 +1895,7 @@ pfm_syswide_cleanup_other_cpu(pfm_contex
        int ret;
 
        DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
-       ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, 
ctx, 0, 1);
+       ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, 
ctx, 1);
        DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
 }
 #endif /* CONFIG_SMP */
@@ -6895,7 +6895,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_h
        }
 
        /* save the current system wide pmu states */
-       ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
+       ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
        if (ret) {
                DPRINT(("on_each_cpu() failed: %d\n", ret));
                goto cleanup_reserve;
@@ -6940,7 +6940,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_ha
 
        pfm_alt_intr_handler = NULL;
 
-       ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
+       ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
        if (ret) {
                DPRINT(("on_each_cpu() failed: %d\n", ret));
        }
@@ -7499,7 +7499,7 @@ xenpfm_context_load(XEN_GUEST_HANDLE(pfa
 
        BUG_ON(in_irq());
        spin_lock(&xenpfm_context_lock);
-       smp_call_function(&xenpfm_context_load_cpu, &arg, 1, 1);
+       smp_call_function(&xenpfm_context_load_cpu, &arg, 1);
        xenpfm_context_load_cpu(&arg);
        spin_unlock(&xenpfm_context_lock);
        for_each_online_cpu(cpu) {
@@ -7553,7 +7553,7 @@ xenpfm_context_unload(void)
                return error;
        }
 
-       smp_call_function(&xenpfm_context_unload_cpu, &arg, 1, 1);
+       smp_call_function(&xenpfm_context_unload_cpu, &arg, 1);
        xenpfm_context_unload_cpu(&arg);
        spin_unlock(&xenpfm_context_lock);
        for_each_online_cpu(cpu) {
--- 2009-05-19.orig/xen/arch/ia64/linux-xen/smp.c       2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/ia64/linux-xen/smp.c    2009-05-25 09:06:01.000000000 
+0200
@@ -274,7 +274,7 @@ smp_send_reschedule (int cpu)
 void
 smp_flush_tlb_all (void)
 {
-       on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+       on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
 }
 
 void
@@ -297,7 +297,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
         * anyhow, and once a CPU is interrupted, the cost of 
local_flush_tlb_all() is
         * rather trivial.
         */
-       on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+       on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
 }
 #endif
 
@@ -314,7 +314,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
  */
 
 int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, 
int nonatomic,
+smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
                          int wait)
 {
        struct call_data_struct data;
@@ -372,7 +372,6 @@ EXPORT_SYMBOL(smp_call_function_single);
  *  [SUMMARY]  Run a function on all other CPUs.
  *  <func>     The function to run. This must be fast and non-blocking.
  *  <info>     An arbitrary pointer to pass to the function.
- *  <nonatomic>        currently unused.
  *  <wait>     If true, wait (atomically) until function has completed on 
other CPUs.
  *  [RETURNS]   0 on success, else a negative status code.
  *
@@ -383,7 +382,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  * hardware interrupt handler or from a bottom half handler.
  */
 int
-smp_call_function (void (*func) (void *info), void *info, int nonatomic, int 
wait)
+smp_call_function (void (*func) (void *info), void *info, int wait)
 {
        struct call_data_struct data;
        int cpus = num_online_cpus()-1;
@@ -438,7 +437,7 @@ EXPORT_SYMBOL(smp_call_function);
 #ifdef XEN
 int
 on_selected_cpus(const cpumask_t *selected, void (*func) (void *info),
-                 void *info, int retry, int wait)
+                 void *info, int wait)
 {
        struct call_data_struct data;
        unsigned int cpu, nr_cpus = cpus_weight(*selected);
--- 2009-05-19.orig/xen/arch/ia64/linux-xen/smpboot.c   2009-04-29 
10:33:23.000000000 +0200
+++ 2009-05-19/xen/arch/ia64/linux-xen/smpboot.c        2009-05-25 
09:06:15.000000000 +0200
@@ -307,7 +307,7 @@ ia64_sync_itc (unsigned int master)
 
        go[MASTER] = 1;
 
-       if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
+       if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
                printk(KERN_ERR "sync_itc: failed to get attention of CPU 
%u!\n", master);
                return;
        }
--- 2009-05-19.orig/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c 2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c      2009-05-25 
08:57:39.000000000 +0200
@@ -240,7 +240,7 @@ sn2_global_tlb_purge(unsigned long start
                flush_data.end = end;
                flush_data.nbits = nbits;
                on_selected_cpus(&selected_cpus, sn_flush_ptcga_cpu,
-                                &flush_data, 1, 1);
+                                &flush_data, 1);
        }
        spin_unlock(&sn2_ptcg_lock2);
 }
--- 2009-05-19.orig/xen/arch/ia64/vmx/vmmu.c    2008-12-03 10:55:24.000000000 
+0100
+++ 2009-05-19/xen/arch/ia64/vmx/vmmu.c 2009-05-25 09:06:45.000000000 +0200
@@ -448,8 +448,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u6
             if (cpu != current->processor) {
                 spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock);
                 /* Flush VHPT on remote processors. */
-                smp_call_function_single(cpu, &ptc_ga_remote_func,
-                                         &args, 0, 1);
+                smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 1);
             } else {
                 ptc_ga_remote_func(&args);
             }
--- 2009-05-19.orig/xen/arch/ia64/vmx/vtlb.c    2009-01-08 10:30:05.000000000 
+0100
+++ 2009-05-19/xen/arch/ia64/vmx/vtlb.c 2009-05-25 09:06:50.000000000 +0200
@@ -643,7 +643,7 @@ void vmx_vcpu_flush_vtlb_all(VCPU *v)
     if (v->processor == smp_processor_id())
         __thash_purge_all(v);
     else
-        smp_call_function_single(v->processor, __thash_purge_all, v, 1, 1);
+        smp_call_function_single(v->processor, __thash_purge_all, v, 1);
     vcpu_unpause(v);
 }
 
--- 2009-05-19.orig/xen/arch/ia64/xen/cpufreq/cpufreq.c 2009-01-08 
10:30:05.000000000 +0100
+++ 2009-05-19/xen/arch/ia64/xen/cpufreq/cpufreq.c      2009-05-25 
09:08:06.000000000 +0200
@@ -95,8 +95,7 @@ acpi_cpufreq_get (unsigned int cpu)
        if (cpu == smp_processor_id())
                processor_get_freq((void*)&freq);
        else
-               smp_call_function_single(cpu, processor_get_freq,
-                                        (void *)&freq, 0, 1);
+               smp_call_function_single(cpu, processor_get_freq, &freq, 1);
 
        return freq;
 }
@@ -143,8 +142,7 @@ processor_set_freq (struct acpi_cpufreq_
        if (cpu == smp_processor_id())
                processor_set_pstate((void *)&value);
        else
-               smp_call_function_single(cpu, processor_set_pstate,
-                               (void *)&value, 0, 1);
+               smp_call_function_single(cpu, processor_set_pstate, &value, 1);
 
        if (value) {
                printk(KERN_WARNING "Transition failed\n");
--- 2009-05-19.orig/xen/arch/ia64/xen/flushtlb.c        2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/ia64/xen/flushtlb.c     2009-05-25 08:57:39.000000000 
+0200
@@ -70,7 +70,7 @@ void
 new_tlbflush_clock_period(void)
 {
     /* flush all vhpt of physical cpu and mTLB */
-    on_each_cpu(tlbflush_clock_local_flush, NULL, 1, 1);
+    on_each_cpu(tlbflush_clock_local_flush, NULL, 1);
 
     /*
      * if global TLB shootdown is finished, increment tlbflush_time
--- 2009-05-19.orig/xen/arch/ia64/xen/fw_emul.c 2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/ia64/xen/fw_emul.c      2009-05-25 09:08:30.000000000 
+0200
@@ -281,7 +281,7 @@ sal_emulator (long index, unsigned long 
                                IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
                                ret = smp_call_function_single(e->cpuid,
                                                               
get_state_info_on,
-                                                              &arg, 0, 1);
+                                                              &arg, 1);
                                if (ret < 0) {
                                        printk("SAL_GET_STATE_INFO "
                                               "smp_call_function_single error:"
@@ -344,7 +344,7 @@ sal_emulator (long index, unsigned long 
                                int ret;
                                IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: 
remote\n");
                                ret = smp_call_function_single(e->cpuid,
-                                       clear_state_info_on, &arg, 0, 1);
+                                       clear_state_info_on, &arg, 1);
                                if (ret < 0) {
                                        printk("sal_emulator: "
                                               "SAL_CLEAR_STATE_INFO "
@@ -845,8 +845,7 @@ xen_pal_emulator(unsigned long index, u6
                                .progress = 0,
                                .status = 0
                        };
-                       smp_call_function(remote_pal_cache_flush,
-                                         (void *)&args, 1, 1);
+                       smp_call_function(remote_pal_cache_flush, &args, 1);
                        if (args.status != 0)
                                panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
                                             "remote status %lx", args.status);
@@ -945,7 +944,7 @@ xen_pal_emulator(unsigned long index, u6
                        /* must be performed on all remote processors 
                           in the coherence domain. */
                        smp_call_function(remote_pal_prefetch_visibility,
-                                         (void *)in1, 1, 1);
+                                         (void *)in1, 1);
                        status = 1; /* no more necessary on remote processor */
                }
                break;
@@ -953,7 +952,7 @@ xen_pal_emulator(unsigned long index, u6
                status = ia64_pal_mc_drain();
                /* FIXME: All vcpus likely call PAL_MC_DRAIN.
                   That causes the congestion. */
-               smp_call_function(remote_pal_mc_drain, NULL, 1, 1);
+               smp_call_function(remote_pal_mc_drain, NULL, 1);
                break;
            case PAL_BRAND_INFO:
                if (in1 == 0) {
--- 2009-05-19.orig/xen/arch/ia64/xen/vhpt.c    2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/ia64/xen/vhpt.c 2009-05-25 09:11:02.000000000 +0200
@@ -307,7 +307,7 @@ void domain_flush_vtlb_all(struct domain
                        // takes care of mTLB flush.
                        smp_call_function_single(v->processor,
                                                 __vcpu_flush_vtlb_all,
-                                                v, 1, 1);
+                                                v, 1);
        }
        perfc_incr(domain_flush_vtlb_all);
 }
@@ -513,9 +513,9 @@ void domain_flush_tlb_vhpt(struct domain
 {
        /* Very heavy...  */
        if (HAS_PERVCPU_VHPT(d) || is_hvm_domain(d))
-               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
        else
-               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
+               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
        cpus_clear (d->domain_dirty_cpumask);
 }
 
@@ -532,7 +532,7 @@ void flush_tlb_for_log_dirty(struct doma
                        thash_purge_all(v);
                }
                smp_call_function((void (*)(void *))local_flush_tlb_all, 
-                                       NULL, 1, 1);
+                                       NULL, 1);
        } else if (HAS_PERVCPU_VHPT(d)) {
                for_each_vcpu (d, v) {
                        if (!v->is_initialised)
@@ -541,9 +541,9 @@ void flush_tlb_for_log_dirty(struct doma
                        vcpu_purge_tr_entry(&PSCBX(v,itlb));
                        vcpu_vhpt_flush(v);
                }
-               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
        } else {
-               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
+               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
        }
        cpus_clear (d->domain_dirty_cpumask);
 }
@@ -562,7 +562,7 @@ void flush_tlb_mask(const cpumask_t *mas
     for_each_cpu_mask (cpu, *mask)
         if (cpu != smp_processor_id())
             smp_call_function_single
-                (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
+                (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
 }
 
 #ifdef PERF_COUNTERS
--- 2009-05-19.orig/xen/arch/x86/acpi/cpufreq/cpufreq.c 2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/acpi/cpufreq/cpufreq.c      2009-05-25 
08:57:39.000000000 +0200
@@ -186,7 +186,7 @@ static void drv_read(struct drv_cmd *cmd
     if (likely(cpu_isset(smp_processor_id(), cmd->mask)))
         do_drv_read((void *)cmd);
     else
-        on_selected_cpus(&cmd->mask, do_drv_read, (void *)cmd, 0, 1);
+        on_selected_cpus(&cmd->mask, do_drv_read, cmd, 1);
 }
 
 static void drv_write(struct drv_cmd *cmd)
@@ -195,7 +195,7 @@ static void drv_write(struct drv_cmd *cm
         cpu_isset(smp_processor_id(), cmd->mask))
         do_drv_write((void *)cmd);
     else
-        on_selected_cpus(&cmd->mask, do_drv_write, (void *)cmd, 0, 0);
+        on_selected_cpus(&cmd->mask, do_drv_write, cmd, 0);
 }
 
 static u32 get_cur_val(cpumask_t mask)
@@ -303,7 +303,7 @@ static unsigned int get_measured_perf(un
         read_measured_perf_ctrs((void *)&readin);
     } else {
         on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs,
-                        (void *)&readin, 0, 1);
+                        &readin, 1);
     }
 
     cur.aperf.whole = readin.aperf.whole - saved->aperf.whole;
--- 2009-05-19.orig/xen/arch/x86/acpi/cpufreq/powernow.c        2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/acpi/cpufreq/powernow.c     2009-05-25 
08:57:39.000000000 +0200
@@ -121,7 +121,7 @@ static int powernow_cpufreq_target(struc
 
     cmd.val = next_perf_state;
 
-    on_selected_cpus(&cmd.mask, transition_pstate, (void *) &cmd, 0, 0);
+    on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 0);
 
     perf->state = next_perf_state;
     policy->cur = freqs.new;
--- 2009-05-19.orig/xen/arch/x86/cpu/amd.c      2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/x86/cpu/amd.c   2009-05-25 08:57:39.000000000 +0200
@@ -246,7 +246,7 @@ static void check_disable_c1e(unsigned i
 {
        /* C1E is sometimes enabled during entry to ACPI mode. */
        if ((port == acpi_smi_cmd) && (value == acpi_enable_value))
-               on_each_cpu(disable_c1e, NULL, 1, 1);
+               on_each_cpu(disable_c1e, NULL, 1);
 }
 
 static void __devinit init_amd(struct cpuinfo_x86 *c)
--- 2009-05-19.orig/xen/arch/x86/cpu/mcheck/amd_nonfatal.c      2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/cpu/mcheck/amd_nonfatal.c   2009-05-25 
08:57:39.000000000 +0200
@@ -133,7 +133,7 @@ void mce_amd_checkregs(void *info)
  */
 static void mce_amd_work_fn(void *data)
 {
-       on_each_cpu(mce_amd_checkregs, data, 1, 1);
+       on_each_cpu(mce_amd_checkregs, data, 1);
 
        if (adjust > 0) {
                if (!guest_enabled_event(dom0->vcpu[0], VIRQ_MCA) ) {
--- 2009-05-19.orig/xen/arch/x86/cpu/mcheck/mce.c       2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/cpu/mcheck/mce.c    2009-05-25 08:57:39.000000000 
+0200
@@ -1162,8 +1162,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
                        if (log_cpus == NULL)
                                return x86_mcerr("do_mca cpuinfo", -ENOMEM);
 
-                       if (on_each_cpu(do_mc_get_cpu_info, log_cpus,
-                           1, 1) != 0) {
+                       if (on_each_cpu(do_mc_get_cpu_info, log_cpus, 1)) {
                                xfree(log_cpus);
                                return x86_mcerr("do_mca cpuinfo", -EIO);
                        }
@@ -1206,7 +1205,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
                add_taint(TAINT_ERROR_INJECT);
 
                on_selected_cpus(cpumask_of(target), x86_mc_msrinject,
-                                mc_msrinject, 1, 1);
+                                mc_msrinject, 1);
 
                break;
 
@@ -1226,7 +1225,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
                add_taint(TAINT_ERROR_INJECT);
 
                on_selected_cpus(cpumask_of(target), x86_mc_mceinject,
-                                mc_mceinject, 1, 1);
+                                mc_mceinject, 1);
                break;
 
        default:
--- 2009-05-19.orig/xen/arch/x86/cpu/mcheck/mce_intel.c 2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/cpu/mcheck/mce_intel.c      2009-05-25 
08:57:39.000000000 +0200
@@ -632,7 +632,7 @@ static void __cpu_mcheck_distribute_cmci
 void cpu_mcheck_distribute_cmci(void)
 {
     if (cmci_support && !mce_disabled)
-        on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0, 0);
+        on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0);
 }
 
 static void clear_cmci(void)
--- 2009-05-19.orig/xen/arch/x86/cpu/mcheck/non-fatal.c 2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/cpu/mcheck/non-fatal.c      2009-05-25 
08:57:39.000000000 +0200
@@ -69,7 +69,7 @@ static void mce_checkregs (void *info)
 
 static void mce_work_fn(void *data)
 { 
-       on_each_cpu(mce_checkregs, NULL, 1, 1);
+       on_each_cpu(mce_checkregs, NULL, 1);
 
        if (variable_period) {
                if (adjust)
--- 2009-05-19.orig/xen/arch/x86/cpu/mtrr/main.c        2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/cpu/mtrr/main.c     2009-05-25 08:57:39.000000000 
+0200
@@ -229,7 +229,7 @@ static void set_mtrr(unsigned int reg, u
        atomic_set(&data.gate,0);
 
        /*  Start the ball rolling on other CPUs  */
-       if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
+       if (smp_call_function(ipi_handler, &data, 0) != 0)
                panic("mtrr: timed out waiting for other CPUs\n");
 
        local_irq_save(flags);
@@ -688,7 +688,7 @@ void mtrr_save_state(void)
        if (cpu == 0)
                mtrr_save_fixed_ranges(NULL);
        else
-               on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 
1, 1);
+               on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 
1);
        put_cpu();
 }
 
--- 2009-05-19.orig/xen/arch/x86/hvm/hvm.c      2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/x86/hvm/hvm.c   2009-05-25 08:57:39.000000000 +0200
@@ -971,7 +971,7 @@ int hvm_set_cr0(unsigned long value)
             if ( !v->domain->arch.hvm_domain.is_in_uc_mode )
             {
                 /* Flush physical caches. */
-                on_each_cpu(local_flush_cache, NULL, 1, 1);
+                on_each_cpu(local_flush_cache, NULL, 1);
                 hvm_set_uc_mode(v, 1);
             }
             spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
--- 2009-05-19.orig/xen/arch/x86/hvm/svm/svm.c  2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/x86/hvm/svm/svm.c       2009-05-25 08:57:40.000000000 
+0200
@@ -1173,7 +1173,7 @@ static void wbinvd_ipi(void *info)
 static void svm_wbinvd_intercept(void)
 {
     if ( has_arch_pdevs(current->domain) )
-        on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+        on_each_cpu(wbinvd_ipi, NULL, 1);
 }
 
 static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
--- 2009-05-19.orig/xen/arch/x86/hvm/vmx/vmcs.c 2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/x86/hvm/vmx/vmcs.c      2009-05-25 08:57:40.000000000 
+0200
@@ -264,7 +264,7 @@ static void vmx_clear_vmcs(struct vcpu *
     int cpu = v->arch.hvm_vmx.active_cpu;
 
     if ( cpu != -1 )
-        on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1, 1);
+        on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1);
 }
 
 static void vmx_load_vmcs(struct vcpu *v)
@@ -899,7 +899,7 @@ void vmx_do_resume(struct vcpu *v)
         {
             int cpu = v->arch.hvm_vmx.active_cpu;
             if ( cpu != -1 )
-                on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1, 1);
+                on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1);
         }
 
         vmx_clear_vmcs(v);
--- 2009-05-19.orig/xen/arch/x86/hvm/vmx/vmx.c  2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/x86/hvm/vmx/vmx.c       2009-05-25 08:57:40.000000000 
+0200
@@ -1219,7 +1219,7 @@ void ept_sync_domain(struct domain *d)
     if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
     {
         ASSERT(local_irq_is_enabled());
-        on_each_cpu(__ept_sync_domain, d, 1, 1);
+        on_each_cpu(__ept_sync_domain, d, 1);
     }
 }
 
@@ -2130,7 +2130,7 @@ static void vmx_wbinvd_intercept(void)
         return;
 
     if ( cpu_has_wbinvd_exiting )
-        on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+        on_each_cpu(wbinvd_ipi, NULL, 1);
     else
         wbinvd();
 }
--- 2009-05-19.orig/xen/arch/x86/irq.c  2009-05-25 09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/irq.c       2009-05-25 08:57:40.000000000 +0200
@@ -516,7 +516,7 @@ static void __pirq_guest_eoi(struct doma
     }
 
     if ( !cpus_empty(cpu_eoi_map) )
-        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
 }
 
 int pirq_guest_eoi(struct domain *d, int irq)
@@ -755,7 +755,7 @@ static irq_guest_action_t *__pirq_guest_
         {
             cpu_eoi_map = action->cpu_eoi_map;
             spin_unlock_irq(&desc->lock);
-            on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+            on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
             spin_lock_irq(&desc->lock);
         }
         break;
@@ -793,7 +793,7 @@ static irq_guest_action_t *__pirq_guest_
     {
         BUG_ON(action->ack_type != ACKTYPE_EOI);
         spin_unlock_irq(&desc->lock);
-        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 1);
+        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1);
         spin_lock_irq(&desc->lock);
     }
 
--- 2009-05-19.orig/xen/arch/x86/machine_kexec.c        2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/machine_kexec.c     2009-05-25 08:57:40.000000000 
+0200
@@ -100,7 +100,7 @@ void machine_reboot_kexec(xen_kexec_imag
     if ( reboot_cpu_id != smp_processor_id() )
     {
         on_selected_cpus(cpumask_of(reboot_cpu_id), __machine_reboot_kexec,
-                         image, 1, 0);
+                         image, 0);
         for (;;)
                 ; /* nothing */
     }
--- 2009-05-19.orig/xen/arch/x86/oprofile/nmi_int.c     2009-05-25 
09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/oprofile/nmi_int.c  2009-05-25 08:57:40.000000000 
+0200
@@ -186,7 +186,7 @@ static void nmi_cpu_setup(void * dummy)
 
 int nmi_setup_events(void)
 {
-       on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_setup, NULL, 1);
        return 0;
 }
 
@@ -207,7 +207,7 @@ int nmi_reserve_counters(void)
        /* We need to serialize save and setup for HT because the subset
         * of msrs are distinct for save and setup operations
         */
-       on_each_cpu(nmi_save_registers, NULL, 0, 1);
+       on_each_cpu(nmi_save_registers, NULL, 1);
        return 0;
 }
 
@@ -256,7 +256,7 @@ static void nmi_cpu_shutdown(void * dumm
  
 void nmi_release_counters(void)
 {
-       on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_shutdown, NULL, 1);
        release_lapic_nmi();
        free_msrs();
 }
@@ -274,7 +274,7 @@ static void nmi_cpu_start(void * dummy)
 
 int nmi_start(void)
 {
-       on_each_cpu(nmi_cpu_start, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_start, NULL, 1);
        return 0;
 }
  
@@ -306,7 +306,7 @@ static void nmi_cpu_stop(void * dummy)
  
 void nmi_stop(void)
 {
-       on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_stop, NULL, 1);
 }
 
 
--- 2009-05-19.orig/xen/arch/x86/shutdown.c     2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/x86/shutdown.c  2009-05-25 08:57:40.000000000 +0200
@@ -91,7 +91,7 @@ void machine_halt(void)
     watchdog_disable();
     console_start_sync();
     local_irq_enable();
-    smp_call_function(__machine_halt, NULL, 1, 0);
+    smp_call_function(__machine_halt, NULL, 0);
     __machine_halt(NULL);
 }
 
@@ -311,7 +311,7 @@ void machine_restart(unsigned int delay_
     {
         /* Send IPI to the boot CPU (logical cpu 0). */
         on_selected_cpus(cpumask_of(0), __machine_restart,
-                         &delay_millisecs, 1, 0);
+                         &delay_millisecs, 0);
         for ( ; ; )
             halt();
     }
--- 2009-05-19.orig/xen/arch/x86/smp.c  2009-05-25 09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/smp.c       2009-05-25 08:57:40.000000000 +0200
@@ -235,19 +235,17 @@ static struct call_data_struct *call_dat
 int smp_call_function(
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait)
 {
     cpumask_t allbutself = cpu_online_map;
     cpu_clear(smp_processor_id(), allbutself);
-    return on_selected_cpus(&allbutself, func, info, retry, wait);
+    return on_selected_cpus(&allbutself, func, info, wait);
 }
 
 int on_selected_cpus(
     const cpumask_t *selected,
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait)
 {
     struct call_data_struct data;
@@ -333,7 +331,7 @@ void smp_send_stop(void)
 {
     int timeout = 10;
 
-    smp_call_function(stop_this_cpu, NULL, 1, 0);
+    smp_call_function(stop_this_cpu, NULL, 0);
 
     /* Wait 10ms for all other CPUs to go offline. */
     while ( (num_online_cpus() > 1) && (timeout-- > 0) )
--- 2009-05-19.orig/xen/arch/x86/time.c 2009-05-25 09:04:30.000000000 +0200
+++ 2009-05-19/xen/arch/x86/time.c      2009-05-25 08:57:40.000000000 +0200
@@ -1193,7 +1193,7 @@ static void time_calibration(void *unuse
                      opt_consistent_tscs
                      ? time_calibration_tsc_rendezvous
                      : time_calibration_std_rendezvous,
-                     &r, 0, 1);
+                     &r, 1);
 }
 
 void init_percpu_time(void)
--- 2009-05-19.orig/xen/arch/x86/x86_32/traps.c 2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/arch/x86/x86_32/traps.c      2009-05-25 08:57:40.000000000 
+0200
@@ -403,7 +403,7 @@ static long register_guest_callback(stru
     case CALLBACKTYPE_sysenter_deprecated:
         if ( !cpu_has_sep )
             ret = -EINVAL;
-        else if ( on_each_cpu(do_update_sysenter, &reg->address, 1, 1) != 0 )
+        else if ( on_each_cpu(do_update_sysenter, &reg->address, 1) != 0 )
             ret = -EIO;
         break;
 
--- 2009-05-19.orig/xen/common/gdbstub.c        2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/common/gdbstub.c     2009-05-25 08:57:40.000000000 +0200
@@ -672,7 +672,7 @@ static void gdb_smp_pause(void)
 
     atomic_set(&gdb_smp_paused_count, 0);
 
-    smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0, 0);
+    smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0);
 
     /* Wait 100ms for all other CPUs to enter pause loop */
     while ( (atomic_read(&gdb_smp_paused_count) < (num_online_cpus() - 1)) 
--- 2009-05-19.orig/xen/common/keyhandler.c     2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/common/keyhandler.c  2009-05-25 08:57:40.000000000 +0200
@@ -119,7 +119,7 @@ static void dump_registers(unsigned char
         if ( cpu == smp_processor_id() )
             continue;
         printk("\n*** Dumping CPU%d host state: ***\n", cpu);
-        on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1, 1);
+        on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
     }
 
     printk("\n");
@@ -263,7 +263,7 @@ static void read_clocks(unsigned char ke
 
     spin_lock(&lock);
 
-    smp_call_function(read_clocks_slave, NULL, 0, 0);
+    smp_call_function(read_clocks_slave, NULL, 0);
 
     local_irq_disable();
     read_clocks_cpumask = cpu_online_map;
--- 2009-05-19.orig/xen/include/asm-ia64/linux-xen/asm/smp.h    2009-02-04 
11:33:19.000000000 +0100
+++ 2009-05-19/xen/include/asm-ia64/linux-xen/asm/smp.h 2009-05-25 
09:05:19.000000000 +0200
@@ -127,8 +127,8 @@ extern void __init smp_build_cpu_map(voi
 extern void __init init_smp_config (void);
 extern void smp_do_timer (struct pt_regs *regs);
 
-extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
-                                    int retry, int wait);
+extern int smp_call_function_single (int cpuid, void (*func) (void *info),
+                                    void *info, int wait);
 extern void smp_send_reschedule (int cpu);
 #ifdef XEN
 extern void lock_ipi_calllock(unsigned long *flags);
--- 2009-05-19.orig/xen/include/xen/smp.h       2009-05-25 09:04:30.000000000 
+0200
+++ 2009-05-19/xen/include/xen/smp.h    2009-05-25 08:59:54.000000000 +0200
@@ -34,7 +34,6 @@ extern void smp_cpus_done(unsigned int m
 extern int smp_call_function(
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait);
 
 /* 
@@ -44,7 +43,6 @@ extern int on_selected_cpus(
     const cpumask_t *selected,
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait);
 
 /*
@@ -59,10 +57,9 @@ void smp_prepare_boot_cpu(void);
 static inline int on_each_cpu(
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait)
 {
-    return on_selected_cpus(&cpu_online_map, func, info, retry, wait);
+    return on_selected_cpus(&cpu_online_map, func, info, wait);
 }
 
 #define smp_processor_id() raw_smp_processor_id()



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] remove unused parameter of on_selected_cpus() & Co, Jan Beulich <=