debuggers.hg
changeset 19690:7dfc0a20fa59
Remove unused 'retry' parameter from on_selected_cpus() etc.
Remove the unused "retry" parameter of on_selected_cpus(),
on_each_cpu(), smp_call_function(), and smp_call_function_single().
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Remove the unused "retry" parameter of on_selected_cpus(),
on_each_cpu(), smp_call_function(), and smp_call_function_single().
Signed-off-by: Jan Beulich <jbeulich@novell.com>
line diff
1.1 --- a/xen/arch/ia64/linux-xen/mca.c Wed May 27 11:15:08 2009 +0100 1.2 +++ b/xen/arch/ia64/linux-xen/mca.c Wed May 27 11:16:27 2009 +0100 1.3 @@ -956,7 +956,7 @@ ia64_mca_cmc_vector_enable (void *dummy) 1.4 static void 1.5 ia64_mca_cmc_vector_disable_keventd(void *unused) 1.6 { 1.7 - on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); 1.8 + on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); 1.9 } 1.10 1.11 /* 1.12 @@ -968,7 +968,7 @@ ia64_mca_cmc_vector_disable_keventd(void 1.13 static void 1.14 ia64_mca_cmc_vector_enable_keventd(void *unused) 1.15 { 1.16 - on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); 1.17 + on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); 1.18 } 1.19 #endif /* !XEN */ 1.20
2.1 --- a/xen/arch/ia64/linux-xen/perfmon.c Wed May 27 11:15:08 2009 +0100 2.2 +++ b/xen/arch/ia64/linux-xen/perfmon.c Wed May 27 11:16:27 2009 +0100 2.3 @@ -1895,7 +1895,7 @@ pfm_syswide_cleanup_other_cpu(pfm_contex 2.4 int ret; 2.5 2.6 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); 2.7 - ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); 2.8 + ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); 2.9 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); 2.10 } 2.11 #endif /* CONFIG_SMP */ 2.12 @@ -6895,7 +6895,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_h 2.13 } 2.14 2.15 /* save the current system wide pmu states */ 2.16 - ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); 2.17 + ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); 2.18 if (ret) { 2.19 DPRINT(("on_each_cpu() failed: %d\n", ret)); 2.20 goto cleanup_reserve; 2.21 @@ -6940,7 +6940,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_ha 2.22 2.23 pfm_alt_intr_handler = NULL; 2.24 2.25 - ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); 2.26 + ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); 2.27 if (ret) { 2.28 DPRINT(("on_each_cpu() failed: %d\n", ret)); 2.29 } 2.30 @@ -7499,7 +7499,7 @@ xenpfm_context_load(XEN_GUEST_HANDLE(pfa 2.31 2.32 BUG_ON(in_irq()); 2.33 spin_lock(&xenpfm_context_lock); 2.34 - smp_call_function(&xenpfm_context_load_cpu, &arg, 1, 1); 2.35 + smp_call_function(&xenpfm_context_load_cpu, &arg, 1); 2.36 xenpfm_context_load_cpu(&arg); 2.37 spin_unlock(&xenpfm_context_lock); 2.38 for_each_online_cpu(cpu) { 2.39 @@ -7553,7 +7553,7 @@ xenpfm_context_unload(void) 2.40 return error; 2.41 } 2.42 2.43 - smp_call_function(&xenpfm_context_unload_cpu, &arg, 1, 1); 2.44 + smp_call_function(&xenpfm_context_unload_cpu, &arg, 1); 2.45 xenpfm_context_unload_cpu(&arg); 2.46 spin_unlock(&xenpfm_context_lock); 2.47 for_each_online_cpu(cpu) {
3.1 --- a/xen/arch/ia64/linux-xen/smp.c Wed May 27 11:15:08 2009 +0100 3.2 +++ b/xen/arch/ia64/linux-xen/smp.c Wed May 27 11:16:27 2009 +0100 3.3 @@ -274,7 +274,7 @@ smp_send_reschedule (int cpu) 3.4 void 3.5 smp_flush_tlb_all (void) 3.6 { 3.7 - on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); 3.8 + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); 3.9 } 3.10 3.11 void 3.12 @@ -297,7 +297,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) 3.13 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is 3.14 * rather trivial. 3.15 */ 3.16 - on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); 3.17 + on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); 3.18 } 3.19 #endif 3.20 3.21 @@ -314,7 +314,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) 3.22 */ 3.23 3.24 int 3.25 -smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, 3.26 +smp_call_function_single (int cpuid, void (*func) (void *info), void *info, 3.27 int wait) 3.28 { 3.29 struct call_data_struct data; 3.30 @@ -372,7 +372,6 @@ EXPORT_SYMBOL(smp_call_function_single); 3.31 * [SUMMARY] Run a function on all other CPUs. 3.32 * <func> The function to run. This must be fast and non-blocking. 3.33 * <info> An arbitrary pointer to pass to the function. 3.34 - * <nonatomic> currently unused. 3.35 * <wait> If true, wait (atomically) until function has completed on other CPUs. 3.36 * [RETURNS] 0 on success, else a negative status code. 3.37 * 3.38 @@ -383,7 +382,7 @@ EXPORT_SYMBOL(smp_call_function_single); 3.39 * hardware interrupt handler or from a bottom half handler. 3.40 */ 3.41 int 3.42 -smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) 3.43 +smp_call_function (void (*func) (void *info), void *info, int wait) 3.44 { 3.45 struct call_data_struct data; 3.46 int cpus = num_online_cpus()-1; 3.47 @@ -438,7 +437,7 @@ EXPORT_SYMBOL(smp_call_function); 3.48 #ifdef XEN 3.49 int 3.50 on_selected_cpus(const cpumask_t *selected, void (*func) (void *info), 3.51 - void *info, int retry, int wait) 3.52 + void *info, int wait) 3.53 { 3.54 struct call_data_struct data; 3.55 unsigned int cpu, nr_cpus = cpus_weight(*selected);
4.1 --- a/xen/arch/ia64/linux-xen/smpboot.c Wed May 27 11:15:08 2009 +0100 4.2 +++ b/xen/arch/ia64/linux-xen/smpboot.c Wed May 27 11:16:27 2009 +0100 4.3 @@ -307,7 +307,7 @@ ia64_sync_itc (unsigned int master) 4.4 4.5 go[MASTER] = 1; 4.6 4.7 - if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { 4.8 + if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { 4.9 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); 4.10 return; 4.11 }
5.1 --- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c Wed May 27 11:15:08 2009 +0100 5.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c Wed May 27 11:16:27 2009 +0100 5.3 @@ -240,7 +240,7 @@ sn2_global_tlb_purge(unsigned long start 5.4 flush_data.end = end; 5.5 flush_data.nbits = nbits; 5.6 on_selected_cpus(&selected_cpus, sn_flush_ptcga_cpu, 5.7 - &flush_data, 1, 1); 5.8 + &flush_data, 1); 5.9 } 5.10 spin_unlock(&sn2_ptcg_lock2); 5.11 }
6.1 --- a/xen/arch/ia64/vmx/vmmu.c Wed May 27 11:15:08 2009 +0100 6.2 +++ b/xen/arch/ia64/vmx/vmmu.c Wed May 27 11:16:27 2009 +0100 6.3 @@ -448,8 +448,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u6 6.4 if (cpu != current->processor) { 6.5 spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock); 6.6 /* Flush VHPT on remote processors. */ 6.7 - smp_call_function_single(cpu, &ptc_ga_remote_func, 6.8 - &args, 0, 1); 6.9 + smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 1); 6.10 } else { 6.11 ptc_ga_remote_func(&args); 6.12 }
7.1 --- a/xen/arch/ia64/vmx/vtlb.c Wed May 27 11:15:08 2009 +0100 7.2 +++ b/xen/arch/ia64/vmx/vtlb.c Wed May 27 11:16:27 2009 +0100 7.3 @@ -643,7 +643,7 @@ void vmx_vcpu_flush_vtlb_all(VCPU *v) 7.4 if (v->processor == smp_processor_id()) 7.5 __thash_purge_all(v); 7.6 else 7.7 - smp_call_function_single(v->processor, __thash_purge_all, v, 1, 1); 7.8 + smp_call_function_single(v->processor, __thash_purge_all, v, 1); 7.9 vcpu_unpause(v); 7.10 } 7.11
8.1 --- a/xen/arch/ia64/xen/cpufreq/cpufreq.c Wed May 27 11:15:08 2009 +0100 8.2 +++ b/xen/arch/ia64/xen/cpufreq/cpufreq.c Wed May 27 11:16:27 2009 +0100 8.3 @@ -95,8 +95,7 @@ acpi_cpufreq_get (unsigned int cpu) 8.4 if (cpu == smp_processor_id()) 8.5 processor_get_freq((void*)&freq); 8.6 else 8.7 - smp_call_function_single(cpu, processor_get_freq, 8.8 - (void *)&freq, 0, 1); 8.9 + smp_call_function_single(cpu, processor_get_freq, &freq, 1); 8.10 8.11 return freq; 8.12 } 8.13 @@ -143,8 +142,7 @@ processor_set_freq (struct acpi_cpufreq_ 8.14 if (cpu == smp_processor_id()) 8.15 processor_set_pstate((void *)&value); 8.16 else 8.17 - smp_call_function_single(cpu, processor_set_pstate, 8.18 - (void *)&value, 0, 1); 8.19 + smp_call_function_single(cpu, processor_set_pstate, &value, 1); 8.20 8.21 if (value) { 8.22 printk(KERN_WARNING "Transition failed\n");
9.1 --- a/xen/arch/ia64/xen/flushtlb.c Wed May 27 11:15:08 2009 +0100 9.2 +++ b/xen/arch/ia64/xen/flushtlb.c Wed May 27 11:16:27 2009 +0100 9.3 @@ -70,7 +70,7 @@ void 9.4 new_tlbflush_clock_period(void) 9.5 { 9.6 /* flush all vhpt of physical cpu and mTLB */ 9.7 - on_each_cpu(tlbflush_clock_local_flush, NULL, 1, 1); 9.8 + on_each_cpu(tlbflush_clock_local_flush, NULL, 1); 9.9 9.10 /* 9.11 * if global TLB shootdown is finished, increment tlbflush_time
10.1 --- a/xen/arch/ia64/xen/fw_emul.c Wed May 27 11:15:08 2009 +0100 10.2 +++ b/xen/arch/ia64/xen/fw_emul.c Wed May 27 11:16:27 2009 +0100 10.3 @@ -281,7 +281,7 @@ sal_emulator (long index, unsigned long 10.4 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n"); 10.5 ret = smp_call_function_single(e->cpuid, 10.6 get_state_info_on, 10.7 - &arg, 0, 1); 10.8 + &arg, 1); 10.9 if (ret < 0) { 10.10 printk("SAL_GET_STATE_INFO " 10.11 "smp_call_function_single error:" 10.12 @@ -344,7 +344,7 @@ sal_emulator (long index, unsigned long 10.13 int ret; 10.14 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n"); 10.15 ret = smp_call_function_single(e->cpuid, 10.16 - clear_state_info_on, &arg, 0, 1); 10.17 + clear_state_info_on, &arg, 1); 10.18 if (ret < 0) { 10.19 printk("sal_emulator: " 10.20 "SAL_CLEAR_STATE_INFO " 10.21 @@ -845,8 +845,7 @@ xen_pal_emulator(unsigned long index, u6 10.22 .progress = 0, 10.23 .status = 0 10.24 }; 10.25 - smp_call_function(remote_pal_cache_flush, 10.26 - (void *)&args, 1, 1); 10.27 + smp_call_function(remote_pal_cache_flush, &args, 1); 10.28 if (args.status != 0) 10.29 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, " 10.30 "remote status %lx", args.status); 10.31 @@ -945,7 +944,7 @@ xen_pal_emulator(unsigned long index, u6 10.32 /* must be performed on all remote processors 10.33 in the coherence domain. */ 10.34 smp_call_function(remote_pal_prefetch_visibility, 10.35 - (void *)in1, 1, 1); 10.36 + (void *)in1, 1); 10.37 status = 1; /* no more necessary on remote processor */ 10.38 } 10.39 break; 10.40 @@ -953,7 +952,7 @@ xen_pal_emulator(unsigned long index, u6 10.41 status = ia64_pal_mc_drain(); 10.42 /* FIXME: All vcpus likely call PAL_MC_DRAIN. 10.43 That causes the congestion. */ 10.44 - smp_call_function(remote_pal_mc_drain, NULL, 1, 1); 10.45 + smp_call_function(remote_pal_mc_drain, NULL, 1); 10.46 break; 10.47 case PAL_BRAND_INFO: 10.48 if (in1 == 0) {
11.1 --- a/xen/arch/ia64/xen/vhpt.c Wed May 27 11:15:08 2009 +0100 11.2 +++ b/xen/arch/ia64/xen/vhpt.c Wed May 27 11:16:27 2009 +0100 11.3 @@ -307,7 +307,7 @@ void domain_flush_vtlb_all(struct domain 11.4 // takes care of mTLB flush. 11.5 smp_call_function_single(v->processor, 11.6 __vcpu_flush_vtlb_all, 11.7 - v, 1, 1); 11.8 + v, 1); 11.9 } 11.10 perfc_incr(domain_flush_vtlb_all); 11.11 } 11.12 @@ -513,9 +513,9 @@ void domain_flush_tlb_vhpt(struct domain 11.13 { 11.14 /* Very heavy... */ 11.15 if (HAS_PERVCPU_VHPT(d) || is_hvm_domain(d)) 11.16 - on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); 11.17 + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); 11.18 else 11.19 - on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1); 11.20 + on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1); 11.21 cpus_clear (d->domain_dirty_cpumask); 11.22 } 11.23 11.24 @@ -532,7 +532,7 @@ void flush_tlb_for_log_dirty(struct doma 11.25 thash_purge_all(v); 11.26 } 11.27 smp_call_function((void (*)(void *))local_flush_tlb_all, 11.28 - NULL, 1, 1); 11.29 + NULL, 1); 11.30 } else if (HAS_PERVCPU_VHPT(d)) { 11.31 for_each_vcpu (d, v) { 11.32 if (!v->is_initialised) 11.33 @@ -541,9 +541,9 @@ void flush_tlb_for_log_dirty(struct doma 11.34 vcpu_purge_tr_entry(&PSCBX(v,itlb)); 11.35 vcpu_vhpt_flush(v); 11.36 } 11.37 - on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); 11.38 + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); 11.39 } else { 11.40 - on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1); 11.41 + on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1); 11.42 } 11.43 cpus_clear (d->domain_dirty_cpumask); 11.44 } 11.45 @@ -562,7 +562,7 @@ void flush_tlb_mask(const cpumask_t *mas 11.46 for_each_cpu_mask (cpu, *mask) 11.47 if (cpu != smp_processor_id()) 11.48 smp_call_function_single 11.49 - (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1); 11.50 + (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1); 11.51 } 11.52 11.53 #ifdef PERF_COUNTERS
12.1 --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c Wed May 27 11:15:08 2009 +0100 12.2 +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c Wed May 27 11:16:27 2009 +0100 12.3 @@ -186,7 +186,7 @@ static void drv_read(struct drv_cmd *cmd 12.4 if (likely(cpu_isset(smp_processor_id(), cmd->mask))) 12.5 do_drv_read((void *)cmd); 12.6 else 12.7 - on_selected_cpus(&cmd->mask, do_drv_read, (void *)cmd, 0, 1); 12.8 + on_selected_cpus(&cmd->mask, do_drv_read, cmd, 1); 12.9 } 12.10 12.11 static void drv_write(struct drv_cmd *cmd) 12.12 @@ -195,7 +195,7 @@ static void drv_write(struct drv_cmd *cm 12.13 cpu_isset(smp_processor_id(), cmd->mask)) 12.14 do_drv_write((void *)cmd); 12.15 else 12.16 - on_selected_cpus(&cmd->mask, do_drv_write, (void *)cmd, 0, 0); 12.17 + on_selected_cpus(&cmd->mask, do_drv_write, cmd, 0); 12.18 } 12.19 12.20 static u32 get_cur_val(cpumask_t mask) 12.21 @@ -303,7 +303,7 @@ static unsigned int get_measured_perf(un 12.22 read_measured_perf_ctrs((void *)&readin); 12.23 } else { 12.24 on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs, 12.25 - (void *)&readin, 0, 1); 12.26 + &readin, 1); 12.27 } 12.28 12.29 cur.aperf.whole = readin.aperf.whole - saved->aperf.whole;
13.1 --- a/xen/arch/x86/acpi/cpufreq/powernow.c Wed May 27 11:15:08 2009 +0100 13.2 +++ b/xen/arch/x86/acpi/cpufreq/powernow.c Wed May 27 11:16:27 2009 +0100 13.3 @@ -121,7 +121,7 @@ static int powernow_cpufreq_target(struc 13.4 13.5 cmd.val = next_perf_state; 13.6 13.7 - on_selected_cpus(&cmd.mask, transition_pstate, (void *) &cmd, 0, 0); 13.8 + on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 0); 13.9 13.10 perf->state = next_perf_state; 13.11 policy->cur = freqs.new;
14.1 --- a/xen/arch/x86/cpu/amd.c Wed May 27 11:15:08 2009 +0100 14.2 +++ b/xen/arch/x86/cpu/amd.c Wed May 27 11:16:27 2009 +0100 14.3 @@ -246,7 +246,7 @@ static void check_disable_c1e(unsigned i 14.4 { 14.5 /* C1E is sometimes enabled during entry to ACPI mode. */ 14.6 if ((port == acpi_smi_cmd) && (value == acpi_enable_value)) 14.7 - on_each_cpu(disable_c1e, NULL, 1, 1); 14.8 + on_each_cpu(disable_c1e, NULL, 1); 14.9 } 14.10 14.11 static void __devinit init_amd(struct cpuinfo_x86 *c)
15.1 --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Wed May 27 11:15:08 2009 +0100 15.2 +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Wed May 27 11:16:27 2009 +0100 15.3 @@ -133,7 +133,7 @@ void mce_amd_checkregs(void *info) 15.4 */ 15.5 static void mce_amd_work_fn(void *data) 15.6 { 15.7 - on_each_cpu(mce_amd_checkregs, data, 1, 1); 15.8 + on_each_cpu(mce_amd_checkregs, data, 1); 15.9 15.10 if (adjust > 0) { 15.11 if (!guest_enabled_event(dom0->vcpu[0], VIRQ_MCA) ) {
16.1 --- a/xen/arch/x86/cpu/mcheck/mce.c Wed May 27 11:15:08 2009 +0100 16.2 +++ b/xen/arch/x86/cpu/mcheck/mce.c Wed May 27 11:16:27 2009 +0100 16.3 @@ -1162,8 +1162,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u 16.4 if (log_cpus == NULL) 16.5 return x86_mcerr("do_mca cpuinfo", -ENOMEM); 16.6 16.7 - if (on_each_cpu(do_mc_get_cpu_info, log_cpus, 16.8 - 1, 1) != 0) { 16.9 + if (on_each_cpu(do_mc_get_cpu_info, log_cpus, 1)) { 16.10 xfree(log_cpus); 16.11 return x86_mcerr("do_mca cpuinfo", -EIO); 16.12 } 16.13 @@ -1206,7 +1205,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u 16.14 add_taint(TAINT_ERROR_INJECT); 16.15 16.16 on_selected_cpus(cpumask_of(target), x86_mc_msrinject, 16.17 - mc_msrinject, 1, 1); 16.18 + mc_msrinject, 1); 16.19 16.20 break; 16.21 16.22 @@ -1226,7 +1225,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u 16.23 add_taint(TAINT_ERROR_INJECT); 16.24 16.25 on_selected_cpus(cpumask_of(target), x86_mc_mceinject, 16.26 - mc_mceinject, 1, 1); 16.27 + mc_mceinject, 1); 16.28 break; 16.29 16.30 default:
17.1 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c Wed May 27 11:15:08 2009 +0100 17.2 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Wed May 27 11:16:27 2009 +0100 17.3 @@ -632,7 +632,7 @@ static void __cpu_mcheck_distribute_cmci 17.4 void cpu_mcheck_distribute_cmci(void) 17.5 { 17.6 if (cmci_support && !mce_disabled) 17.7 - on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0, 0); 17.8 + on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0); 17.9 } 17.10 17.11 static void clear_cmci(void)
18.1 --- a/xen/arch/x86/cpu/mcheck/non-fatal.c Wed May 27 11:15:08 2009 +0100 18.2 +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c Wed May 27 11:16:27 2009 +0100 18.3 @@ -69,7 +69,7 @@ static void mce_checkregs (void *info) 18.4 18.5 static void mce_work_fn(void *data) 18.6 { 18.7 - on_each_cpu(mce_checkregs, NULL, 1, 1); 18.8 + on_each_cpu(mce_checkregs, NULL, 1); 18.9 18.10 if (variable_period) { 18.11 if (adjust)
19.1 --- a/xen/arch/x86/cpu/mtrr/main.c Wed May 27 11:15:08 2009 +0100 19.2 +++ b/xen/arch/x86/cpu/mtrr/main.c Wed May 27 11:16:27 2009 +0100 19.3 @@ -229,7 +229,7 @@ static void set_mtrr(unsigned int reg, u 19.4 atomic_set(&data.gate,0); 19.5 19.6 /* Start the ball rolling on other CPUs */ 19.7 - if (smp_call_function(ipi_handler, &data, 1, 0) != 0) 19.8 + if (smp_call_function(ipi_handler, &data, 0) != 0) 19.9 panic("mtrr: timed out waiting for other CPUs\n"); 19.10 19.11 local_irq_save(flags); 19.12 @@ -688,7 +688,7 @@ void mtrr_save_state(void) 19.13 if (cpu == 0) 19.14 mtrr_save_fixed_ranges(NULL); 19.15 else 19.16 - on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1, 1); 19.17 + on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1); 19.18 put_cpu(); 19.19 } 19.20
20.1 --- a/xen/arch/x86/hvm/hvm.c Wed May 27 11:15:08 2009 +0100 20.2 +++ b/xen/arch/x86/hvm/hvm.c Wed May 27 11:16:27 2009 +0100 20.3 @@ -971,7 +971,7 @@ int hvm_set_cr0(unsigned long value) 20.4 if ( !v->domain->arch.hvm_domain.is_in_uc_mode ) 20.5 { 20.6 /* Flush physical caches. */ 20.7 - on_each_cpu(local_flush_cache, NULL, 1, 1); 20.8 + on_each_cpu(local_flush_cache, NULL, 1); 20.9 hvm_set_uc_mode(v, 1); 20.10 } 20.11 spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
21.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed May 27 11:15:08 2009 +0100 21.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed May 27 11:16:27 2009 +0100 21.3 @@ -1254,7 +1254,7 @@ static void wbinvd_ipi(void *info) 21.4 static void svm_wbinvd_intercept(void) 21.5 { 21.6 if ( has_arch_pdevs(current->domain) ) 21.7 - on_each_cpu(wbinvd_ipi, NULL, 1, 1); 21.8 + on_each_cpu(wbinvd_ipi, NULL, 1); 21.9 } 21.10 21.11 static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
22.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed May 27 11:15:08 2009 +0100 22.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed May 27 11:16:27 2009 +0100 22.3 @@ -264,7 +264,7 @@ static void vmx_clear_vmcs(struct vcpu * 22.4 int cpu = v->arch.hvm_vmx.active_cpu; 22.5 22.6 if ( cpu != -1 ) 22.7 - on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1, 1); 22.8 + on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1); 22.9 } 22.10 22.11 static void vmx_load_vmcs(struct vcpu *v) 22.12 @@ -900,7 +900,7 @@ void vmx_do_resume(struct vcpu *v) 22.13 { 22.14 int cpu = v->arch.hvm_vmx.active_cpu; 22.15 if ( cpu != -1 ) 22.16 - on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1, 1); 22.17 + on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1); 22.18 } 22.19 22.20 vmx_clear_vmcs(v);
23.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed May 27 11:15:08 2009 +0100 23.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed May 27 11:16:27 2009 +0100 23.3 @@ -1220,7 +1220,7 @@ void ept_sync_domain(struct domain *d) 23.4 if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] ) 23.5 { 23.6 ASSERT(local_irq_is_enabled()); 23.7 - on_each_cpu(__ept_sync_domain, d, 1, 1); 23.8 + on_each_cpu(__ept_sync_domain, d, 1); 23.9 } 23.10 } 23.11 23.12 @@ -2131,7 +2131,7 @@ static void vmx_wbinvd_intercept(void) 23.13 return; 23.14 23.15 if ( cpu_has_wbinvd_exiting ) 23.16 - on_each_cpu(wbinvd_ipi, NULL, 1, 1); 23.17 + on_each_cpu(wbinvd_ipi, NULL, 1); 23.18 else 23.19 wbinvd(); 23.20 }
24.1 --- a/xen/arch/x86/irq.c Wed May 27 11:15:08 2009 +0100 24.2 +++ b/xen/arch/x86/irq.c Wed May 27 11:16:27 2009 +0100 24.3 @@ -522,7 +522,7 @@ static void __pirq_guest_eoi(struct doma 24.4 } 24.5 24.6 if ( !cpus_empty(cpu_eoi_map) ) 24.7 - on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0); 24.8 + on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0); 24.9 } 24.10 24.11 int pirq_guest_eoi(struct domain *d, int irq) 24.12 @@ -761,7 +761,7 @@ static irq_guest_action_t *__pirq_guest_ 24.13 { 24.14 cpu_eoi_map = action->cpu_eoi_map; 24.15 spin_unlock_irq(&desc->lock); 24.16 - on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0); 24.17 + on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0); 24.18 spin_lock_irq(&desc->lock); 24.19 } 24.20 break; 24.21 @@ -799,7 +799,7 @@ static irq_guest_action_t *__pirq_guest_ 24.22 { 24.23 BUG_ON(action->ack_type != ACKTYPE_EOI); 24.24 spin_unlock_irq(&desc->lock); 24.25 - on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 1); 24.26 + on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1); 24.27 spin_lock_irq(&desc->lock); 24.28 } 24.29
25.1 --- a/xen/arch/x86/machine_kexec.c Wed May 27 11:15:08 2009 +0100 25.2 +++ b/xen/arch/x86/machine_kexec.c Wed May 27 11:16:27 2009 +0100 25.3 @@ -100,7 +100,7 @@ void machine_reboot_kexec(xen_kexec_imag 25.4 if ( reboot_cpu_id != smp_processor_id() ) 25.5 { 25.6 on_selected_cpus(cpumask_of(reboot_cpu_id), __machine_reboot_kexec, 25.7 - image, 1, 0); 25.8 + image, 0); 25.9 for (;;) 25.10 ; /* nothing */ 25.11 }
26.1 --- a/xen/arch/x86/oprofile/nmi_int.c Wed May 27 11:15:08 2009 +0100 26.2 +++ b/xen/arch/x86/oprofile/nmi_int.c Wed May 27 11:16:27 2009 +0100 26.3 @@ -186,7 +186,7 @@ static void nmi_cpu_setup(void * dummy) 26.4 26.5 int nmi_setup_events(void) 26.6 { 26.7 - on_each_cpu(nmi_cpu_setup, NULL, 0, 1); 26.8 + on_each_cpu(nmi_cpu_setup, NULL, 1); 26.9 return 0; 26.10 } 26.11 26.12 @@ -207,7 +207,7 @@ int nmi_reserve_counters(void) 26.13 /* We need to serialize save and setup for HT because the subset 26.14 * of msrs are distinct for save and setup operations 26.15 */ 26.16 - on_each_cpu(nmi_save_registers, NULL, 0, 1); 26.17 + on_each_cpu(nmi_save_registers, NULL, 1); 26.18 return 0; 26.19 } 26.20 26.21 @@ -256,7 +256,7 @@ static void nmi_cpu_shutdown(void * dumm 26.22 26.23 void nmi_release_counters(void) 26.24 { 26.25 - on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); 26.26 + on_each_cpu(nmi_cpu_shutdown, NULL, 1); 26.27 release_lapic_nmi(); 26.28 free_msrs(); 26.29 } 26.30 @@ -274,7 +274,7 @@ static void nmi_cpu_start(void * dummy) 26.31 26.32 int nmi_start(void) 26.33 { 26.34 - on_each_cpu(nmi_cpu_start, NULL, 0, 1); 26.35 + on_each_cpu(nmi_cpu_start, NULL, 1); 26.36 return 0; 26.37 } 26.38 26.39 @@ -306,7 +306,7 @@ static void nmi_cpu_stop(void * dummy) 26.40 26.41 void nmi_stop(void) 26.42 { 26.43 - on_each_cpu(nmi_cpu_stop, NULL, 0, 1); 26.44 + on_each_cpu(nmi_cpu_stop, NULL, 1); 26.45 } 26.46 26.47
27.1 --- a/xen/arch/x86/shutdown.c Wed May 27 11:15:08 2009 +0100 27.2 +++ b/xen/arch/x86/shutdown.c Wed May 27 11:16:27 2009 +0100 27.3 @@ -91,7 +91,7 @@ void machine_halt(void) 27.4 watchdog_disable(); 27.5 console_start_sync(); 27.6 local_irq_enable(); 27.7 - smp_call_function(__machine_halt, NULL, 1, 0); 27.8 + smp_call_function(__machine_halt, NULL, 0); 27.9 __machine_halt(NULL); 27.10 } 27.11 27.12 @@ -311,7 +311,7 @@ void machine_restart(unsigned int delay_ 27.13 { 27.14 /* Send IPI to the boot CPU (logical cpu 0). */ 27.15 on_selected_cpus(cpumask_of(0), __machine_restart, 27.16 - &delay_millisecs, 1, 0); 27.17 + &delay_millisecs, 0); 27.18 for ( ; ; ) 27.19 halt(); 27.20 }
28.1 --- a/xen/arch/x86/smp.c Wed May 27 11:15:08 2009 +0100 28.2 +++ b/xen/arch/x86/smp.c Wed May 27 11:16:27 2009 +0100 28.3 @@ -239,19 +239,17 @@ static void __smp_call_function_interrup 28.4 int smp_call_function( 28.5 void (*func) (void *info), 28.6 void *info, 28.7 - int retry, 28.8 int wait) 28.9 { 28.10 cpumask_t allbutself = cpu_online_map; 28.11 cpu_clear(smp_processor_id(), allbutself); 28.12 - return on_selected_cpus(&allbutself, func, info, retry, wait); 28.13 + return on_selected_cpus(&allbutself, func, info, wait); 28.14 } 28.15 28.16 int on_selected_cpus( 28.17 const cpumask_t *selected, 28.18 void (*func) (void *info), 28.19 void *info, 28.20 - int retry, 28.21 int wait) 28.22 { 28.23 struct call_data_struct data; 28.24 @@ -322,7 +320,7 @@ void smp_send_stop(void) 28.25 { 28.26 int timeout = 10; 28.27 28.28 - smp_call_function(stop_this_cpu, NULL, 1, 0); 28.29 + smp_call_function(stop_this_cpu, NULL, 0); 28.30 28.31 /* Wait 10ms for all other CPUs to go offline. */ 28.32 while ( (num_online_cpus() > 1) && (timeout-- > 0) )
29.1 --- a/xen/arch/x86/time.c Wed May 27 11:15:08 2009 +0100 29.2 +++ b/xen/arch/x86/time.c Wed May 27 11:16:27 2009 +0100 29.3 @@ -1193,7 +1193,7 @@ static void time_calibration(void *unuse 29.4 opt_consistent_tscs 29.5 ? time_calibration_tsc_rendezvous 29.6 : time_calibration_std_rendezvous, 29.7 - &r, 0, 1); 29.8 + &r, 1); 29.9 } 29.10 29.11 void init_percpu_time(void)
30.1 --- a/xen/arch/x86/x86_32/traps.c Wed May 27 11:15:08 2009 +0100 30.2 +++ b/xen/arch/x86/x86_32/traps.c Wed May 27 11:16:27 2009 +0100 30.3 @@ -403,7 +403,7 @@ static long register_guest_callback(stru 30.4 case CALLBACKTYPE_sysenter_deprecated: 30.5 if ( !cpu_has_sep ) 30.6 ret = -EINVAL; 30.7 - else if ( on_each_cpu(do_update_sysenter, ®->address, 1, 1) != 0 ) 30.8 + else if ( on_each_cpu(do_update_sysenter, ®->address, 1) != 0 ) 30.9 ret = -EIO; 30.10 break; 30.11
31.1 --- a/xen/common/gdbstub.c Wed May 27 11:15:08 2009 +0100 31.2 +++ b/xen/common/gdbstub.c Wed May 27 11:16:27 2009 +0100 31.3 @@ -672,7 +672,7 @@ static void gdb_smp_pause(void) 31.4 31.5 atomic_set(&gdb_smp_paused_count, 0); 31.6 31.7 - smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0, 0); 31.8 + smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0); 31.9 31.10 /* Wait 100ms for all other CPUs to enter pause loop */ 31.11 while ( (atomic_read(&gdb_smp_paused_count) < (num_online_cpus() - 1))
32.1 --- a/xen/common/keyhandler.c Wed May 27 11:15:08 2009 +0100 32.2 +++ b/xen/common/keyhandler.c Wed May 27 11:16:27 2009 +0100 32.3 @@ -119,7 +119,7 @@ static void dump_registers(unsigned char 32.4 if ( cpu == smp_processor_id() ) 32.5 continue; 32.6 printk("\n*** Dumping CPU%d host state: ***\n", cpu); 32.7 - on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1, 1); 32.8 + on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1); 32.9 } 32.10 32.11 printk("\n"); 32.12 @@ -263,7 +263,7 @@ static void read_clocks(unsigned char ke 32.13 32.14 spin_lock(&lock); 32.15 32.16 - smp_call_function(read_clocks_slave, NULL, 0, 0); 32.17 + smp_call_function(read_clocks_slave, NULL, 0); 32.18 32.19 local_irq_disable(); 32.20 read_clocks_cpumask = cpu_online_map;
33.1 --- a/xen/include/asm-ia64/linux-xen/asm/smp.h Wed May 27 11:15:08 2009 +0100 33.2 +++ b/xen/include/asm-ia64/linux-xen/asm/smp.h Wed May 27 11:16:27 2009 +0100 33.3 @@ -127,8 +127,8 @@ extern void __init smp_build_cpu_map(voi 33.4 extern void __init init_smp_config (void); 33.5 extern void smp_do_timer (struct pt_regs *regs); 33.6 33.7 -extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info, 33.8 - int retry, int wait); 33.9 +extern int smp_call_function_single (int cpuid, void (*func) (void *info), 33.10 + void *info, int wait); 33.11 extern void smp_send_reschedule (int cpu); 33.12 #ifdef XEN 33.13 extern void lock_ipi_calllock(unsigned long *flags);
34.1 --- a/xen/include/xen/smp.h Wed May 27 11:15:08 2009 +0100 34.2 +++ b/xen/include/xen/smp.h Wed May 27 11:16:27 2009 +0100 34.3 @@ -34,7 +34,6 @@ extern void smp_cpus_done(unsigned int m 34.4 extern int smp_call_function( 34.5 void (*func) (void *info), 34.6 void *info, 34.7 - int retry, 34.8 int wait); 34.9 34.10 /* 34.11 @@ -44,7 +43,6 @@ extern int on_selected_cpus( 34.12 const cpumask_t *selected, 34.13 void (*func) (void *info), 34.14 void *info, 34.15 - int retry, 34.16 int wait); 34.17 34.18 /* 34.19 @@ -59,10 +57,9 @@ void smp_prepare_boot_cpu(void); 34.20 static inline int on_each_cpu( 34.21 void (*func) (void *info), 34.22 void *info, 34.23 - int retry, 34.24 int wait) 34.25 { 34.26 - return on_selected_cpus(&cpu_online_map, func, info, retry, wait); 34.27 + return on_selected_cpus(&cpu_online_map, func, info, wait); 34.28 } 34.29 34.30 #define smp_processor_id() raw_smp_processor_id()