smp_call_function: get rid of the unused nonatomic/retry argument
It's never used and the comments refer to nonatomic and retry interchangably. So get rid of it. Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Родитель
490f5de52a
Коммит
8691e5a8f6
|
@ -662,7 +662,7 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
|
||||||
if (smp_processor_id() != boot_cpuid)
|
if (smp_processor_id() != boot_cpuid)
|
||||||
smp_call_function_single(boot_cpuid,
|
smp_call_function_single(boot_cpuid,
|
||||||
__marvel_access_rtc,
|
__marvel_access_rtc,
|
||||||
&rtc_access, 1, 1);
|
&rtc_access, 1);
|
||||||
else
|
else
|
||||||
__marvel_access_rtc(&rtc_access);
|
__marvel_access_rtc(&rtc_access);
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -710,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
|
if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
|
||||||
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
|
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -763,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
||||||
data.mm = mm;
|
data.mm = mm;
|
||||||
data.addr = addr;
|
data.addr = addr;
|
||||||
|
|
||||||
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
|
if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
|
||||||
printk(KERN_CRIT "flush_tlb_page: timed out\n");
|
printk(KERN_CRIT "flush_tlb_page: timed out\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -815,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
|
if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
|
||||||
printk(KERN_CRIT "flush_icache_page: timed out\n");
|
printk(KERN_CRIT "flush_icache_page: timed out\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ op_axp_setup(void)
|
||||||
model->reg_setup(®, ctr, &sys);
|
model->reg_setup(®, ctr, &sys);
|
||||||
|
|
||||||
/* Configure the registers on all cpus. */
|
/* Configure the registers on all cpus. */
|
||||||
(void)smp_call_function(model->cpu_setup, ®, 0, 1);
|
(void)smp_call_function(model->cpu_setup, ®, 1);
|
||||||
model->cpu_setup(®);
|
model->cpu_setup(®);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy)
|
||||||
static int
|
static int
|
||||||
op_axp_start(void)
|
op_axp_start(void)
|
||||||
{
|
{
|
||||||
(void)smp_call_function(op_axp_cpu_start, NULL, 0, 1);
|
(void)smp_call_function(op_axp_cpu_start, NULL, 1);
|
||||||
op_axp_cpu_start(NULL);
|
op_axp_cpu_start(NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy)
|
||||||
static void
|
static void
|
||||||
op_axp_stop(void)
|
op_axp_stop(void)
|
||||||
{
|
{
|
||||||
(void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1);
|
(void)smp_call_function(op_axp_cpu_stop, NULL, 1);
|
||||||
op_axp_cpu_stop(NULL);
|
op_axp_cpu_stop(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void))
|
||||||
data.ret = 0;
|
data.ret = 0;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
smp_call_function(em_func, &data, 1, 1);
|
smp_call_function(em_func, &data, 1);
|
||||||
em_func(&data);
|
em_func(&data);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
|
|
|
@ -352,7 +352,7 @@ static int __init vfp_init(void)
|
||||||
else if (vfpsid & FPSID_NODOUBLE) {
|
else if (vfpsid & FPSID_NODOUBLE) {
|
||||||
printk("no double precision support\n");
|
printk("no double precision support\n");
|
||||||
} else {
|
} else {
|
||||||
smp_call_function(vfp_enable, NULL, 1, 1);
|
smp_call_function(vfp_enable, NULL, 1);
|
||||||
|
|
||||||
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
|
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
|
||||||
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
|
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
|
||||||
|
|
|
@ -194,7 +194,7 @@ void stop_this_cpu(void* dummy)
|
||||||
/* Other calls */
|
/* Other calls */
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
smp_call_function(stop_this_cpu, NULL, 1, 0);
|
smp_call_function(stop_this_cpu, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int setup_profiling_timer(unsigned int multiplier)
|
int setup_profiling_timer(unsigned int multiplier)
|
||||||
|
@ -316,8 +316,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
|
||||||
* You must not call this function with disabled interrupts or from a
|
* You must not call this function with disabled interrupts or from a
|
||||||
* hardware interrupt handler or from a bottom half handler.
|
* hardware interrupt handler or from a bottom half handler.
|
||||||
*/
|
*/
|
||||||
int smp_call_function(void (*func)(void *info), void *info,
|
int smp_call_function(void (*func)(void *info), void *info, int wait)
|
||||||
int nonatomic, int wait)
|
|
||||||
{
|
{
|
||||||
cpumask_t cpu_mask = CPU_MASK_ALL;
|
cpumask_t cpu_mask = CPU_MASK_ALL;
|
||||||
struct call_data_struct data;
|
struct call_data_struct data;
|
||||||
|
|
|
@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
|
||||||
case CPU_ONLINE:
|
case CPU_ONLINE:
|
||||||
case CPU_ONLINE_FROZEN:
|
case CPU_ONLINE_FROZEN:
|
||||||
smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
|
smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
|
||||||
NULL, 1, 0);
|
NULL, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
|
@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
|
||||||
|
|
||||||
|
|
||||||
/* will send IPI to other CPU and wait for completion of remote call */
|
/* will send IPI to other CPU and wait for completion of remote call */
|
||||||
if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
|
if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
|
||||||
printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
|
printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
|
||||||
"error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
|
"error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
|
DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
|
||||||
ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
|
ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
|
||||||
DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
|
DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
|
@ -286,7 +286,7 @@ void cpu_idle_wait(void)
|
||||||
{
|
{
|
||||||
smp_mb();
|
smp_mb();
|
||||||
/* kick all the CPUs so that they exit out of pm_idle */
|
/* kick all the CPUs so that they exit out of pm_idle */
|
||||||
smp_call_function(do_nothing, NULL, 0, 1);
|
smp_call_function(do_nothing, NULL, 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
||||||
|
|
||||||
|
|
|
@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master)
|
||||||
|
|
||||||
go[MASTER] = 1;
|
go[MASTER] = 1;
|
||||||
|
|
||||||
if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
|
if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
|
||||||
printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
|
printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
|
||||||
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
|
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
|
||||||
if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
|
if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
|
||||||
atomic_set(&uc_pool->status, 0);
|
atomic_set(&uc_pool->status, 0);
|
||||||
status = smp_call_function(uncached_ipi_visibility, uc_pool,
|
status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
|
||||||
0, 1);
|
|
||||||
if (status || atomic_read(&uc_pool->status))
|
if (status || atomic_read(&uc_pool->status))
|
||||||
goto failed;
|
goto failed;
|
||||||
} else if (status != PAL_VISIBILITY_OK)
|
} else if (status != PAL_VISIBILITY_OK)
|
||||||
|
@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
|
||||||
if (status != PAL_STATUS_SUCCESS)
|
if (status != PAL_STATUS_SUCCESS)
|
||||||
goto failed;
|
goto failed;
|
||||||
atomic_set(&uc_pool->status, 0);
|
atomic_set(&uc_pool->status, 0);
|
||||||
status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
|
status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
|
||||||
if (status || atomic_read(&uc_pool->status))
|
if (status || atomic_read(&uc_pool->status))
|
||||||
goto failed;
|
goto failed;
|
||||||
|
|
||||||
|
|
|
@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
|
||||||
if (use_ipi) {
|
if (use_ipi) {
|
||||||
/* use an interprocessor interrupt to call SAL */
|
/* use an interprocessor interrupt to call SAL */
|
||||||
smp_call_function_single(cpu, sn_hwperf_call_sal,
|
smp_call_function_single(cpu, sn_hwperf_call_sal,
|
||||||
op_info, 1, 1);
|
op_info, 1);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* migrate the task before calling SAL */
|
/* migrate the task before calling SAL */
|
||||||
|
|
|
@ -212,7 +212,7 @@ void smp_flush_tlb_all(void)
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
smp_call_function(flush_tlb_all_ipi, NULL, 1, 1);
|
smp_call_function(flush_tlb_all_ipi, NULL, 1);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,7 +505,7 @@ void smp_invalidate_interrupt(void)
|
||||||
*==========================================================================*/
|
*==========================================================================*/
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
smp_call_function(stop_this_cpu, NULL, 1, 0);
|
smp_call_function(stop_this_cpu, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*==========================================================================*
|
/*==========================================================================*
|
||||||
|
|
|
@ -167,7 +167,7 @@ static void stop_this_cpu(void *dummy)
|
||||||
|
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
smp_call_function(stop_this_cpu, NULL, 1, 0);
|
smp_call_function(stop_this_cpu, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init smp_cpus_done(unsigned int max_cpus)
|
void __init smp_cpus_done(unsigned int max_cpus)
|
||||||
|
@ -266,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm)
|
||||||
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
|
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_MIPS_MT_SMTC
|
#ifndef CONFIG_MIPS_MT_SMTC
|
||||||
smp_call_function(func, info, 1, 1);
|
smp_call_function(func, info, 1);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,12 +43,12 @@
|
||||||
* primary cache.
|
* primary cache.
|
||||||
*/
|
*/
|
||||||
static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
|
static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
|
||||||
int retry, int wait)
|
int wait)
|
||||||
{
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
||||||
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
|
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
|
||||||
smp_call_function(func, info, retry, wait);
|
smp_call_function(func, info, wait);
|
||||||
#endif
|
#endif
|
||||||
func(info);
|
func(info);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args)
|
||||||
|
|
||||||
static void r4k___flush_cache_all(void)
|
static void r4k___flush_cache_all(void)
|
||||||
{
|
{
|
||||||
r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
|
r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int has_valid_asid(const struct mm_struct *mm)
|
static inline int has_valid_asid(const struct mm_struct *mm)
|
||||||
|
@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
|
||||||
int exec = vma->vm_flags & VM_EXEC;
|
int exec = vma->vm_flags & VM_EXEC;
|
||||||
|
|
||||||
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
|
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
|
||||||
r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
|
r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void local_r4k_flush_cache_mm(void * args)
|
static inline void local_r4k_flush_cache_mm(void * args)
|
||||||
|
@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
|
||||||
if (!cpu_has_dc_aliases)
|
if (!cpu_has_dc_aliases)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
|
r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct flush_cache_page_args {
|
struct flush_cache_page_args {
|
||||||
|
@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
|
||||||
args.addr = addr;
|
args.addr = addr;
|
||||||
args.pfn = pfn;
|
args.pfn = pfn;
|
||||||
|
|
||||||
r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
|
r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void local_r4k_flush_data_cache_page(void * addr)
|
static inline void local_r4k_flush_data_cache_page(void * addr)
|
||||||
|
@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
|
||||||
local_r4k_flush_data_cache_page((void *)addr);
|
local_r4k_flush_data_cache_page((void *)addr);
|
||||||
else
|
else
|
||||||
r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
|
r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
|
||||||
1, 1);
|
1);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct flush_icache_range_args {
|
struct flush_icache_range_args {
|
||||||
|
@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
|
||||||
args.start = start;
|
args.start = start;
|
||||||
args.end = end;
|
args.end = end;
|
||||||
|
|
||||||
r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
|
r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
|
||||||
instruction_hazard();
|
instruction_hazard();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
|
||||||
|
|
||||||
static void r4k_flush_cache_sigtramp(unsigned long addr)
|
static void r4k_flush_cache_sigtramp(unsigned long addr)
|
||||||
{
|
{
|
||||||
r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
|
r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void r4k_flush_icache_all(void)
|
static void r4k_flush_icache_all(void)
|
||||||
|
|
|
@ -64,7 +64,7 @@ static void prom_exit(void)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (smp_processor_id())
|
if (smp_processor_id())
|
||||||
/* CPU 1 */
|
/* CPU 1 */
|
||||||
smp_call_function(prom_cpu0_exit, NULL, 1, 1);
|
smp_call_function(prom_cpu0_exit, NULL, 1);
|
||||||
#endif
|
#endif
|
||||||
prom_cpu0_exit(NULL);
|
prom_cpu0_exit(NULL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg)
|
||||||
if (!reboot_smp) {
|
if (!reboot_smp) {
|
||||||
/* Get CPU 0 to do the cfe_exit */
|
/* Get CPU 0 to do the cfe_exit */
|
||||||
reboot_smp = 1;
|
reboot_smp = 1;
|
||||||
smp_call_function(cfe_linux_exit, arg, 1, 0);
|
smp_call_function(cfe_linux_exit, arg, 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
printk("Passing control back to CFE...\n");
|
printk("Passing control back to CFE...\n");
|
||||||
|
|
|
@ -66,7 +66,7 @@ static void prom_linux_exit(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (smp_processor_id()) {
|
if (smp_processor_id()) {
|
||||||
smp_call_function(prom_cpu0_exit, NULL, 1, 1);
|
smp_call_function(prom_cpu0_exit, NULL, 1);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
while(1);
|
while(1);
|
||||||
|
|
|
@ -168,7 +168,7 @@ static void stop_this_cpu(void *dummy)
|
||||||
|
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
smp_call_function(stop_this_cpu, NULL, 0, 0);
|
smp_call_function(stop_this_cpu, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct gettimeofday_struct do_gtod;
|
extern struct gettimeofday_struct do_gtod;
|
||||||
|
|
|
@ -209,7 +209,7 @@ __appldata_vtimer_setup(int cmd)
|
||||||
per_cpu(appldata_timer, i).expires = per_cpu_interval;
|
per_cpu(appldata_timer, i).expires = per_cpu_interval;
|
||||||
smp_call_function_single(i, add_virt_timer_periodic,
|
smp_call_function_single(i, add_virt_timer_periodic,
|
||||||
&per_cpu(appldata_timer, i),
|
&per_cpu(appldata_timer, i),
|
||||||
0, 1);
|
1);
|
||||||
}
|
}
|
||||||
appldata_timer_active = 1;
|
appldata_timer_active = 1;
|
||||||
P_INFO("Monitoring timer started.\n");
|
P_INFO("Monitoring timer started.\n");
|
||||||
|
@ -236,7 +236,7 @@ __appldata_vtimer_setup(int cmd)
|
||||||
args.timer = &per_cpu(appldata_timer, i);
|
args.timer = &per_cpu(appldata_timer, i);
|
||||||
args.expires = per_cpu_interval;
|
args.expires = per_cpu_interval;
|
||||||
smp_call_function_single(i, __appldata_mod_vtimer_wrap,
|
smp_call_function_single(i, __appldata_mod_vtimer_wrap,
|
||||||
&args, 0, 1);
|
&args, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,7 +109,7 @@ static void do_call_function(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __smp_call_function_map(void (*func) (void *info), void *info,
|
static void __smp_call_function_map(void (*func) (void *info), void *info,
|
||||||
int nonatomic, int wait, cpumask_t map)
|
int wait, cpumask_t map)
|
||||||
{
|
{
|
||||||
struct call_data_struct data;
|
struct call_data_struct data;
|
||||||
int cpu, local = 0;
|
int cpu, local = 0;
|
||||||
|
@ -162,7 +162,6 @@ out:
|
||||||
* smp_call_function:
|
* smp_call_function:
|
||||||
* @func: the function to run; this must be fast and non-blocking
|
* @func: the function to run; this must be fast and non-blocking
|
||||||
* @info: an arbitrary pointer to pass to the function
|
* @info: an arbitrary pointer to pass to the function
|
||||||
* @nonatomic: unused
|
|
||||||
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
||||||
*
|
*
|
||||||
* Run a function on all other CPUs.
|
* Run a function on all other CPUs.
|
||||||
|
@ -170,15 +169,14 @@ out:
|
||||||
* You must not call this function with disabled interrupts, from a
|
* You must not call this function with disabled interrupts, from a
|
||||||
* hardware interrupt handler or from a bottom half.
|
* hardware interrupt handler or from a bottom half.
|
||||||
*/
|
*/
|
||||||
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
|
int smp_call_function(void (*func) (void *info), void *info, int wait)
|
||||||
int wait)
|
|
||||||
{
|
{
|
||||||
cpumask_t map;
|
cpumask_t map;
|
||||||
|
|
||||||
spin_lock(&call_lock);
|
spin_lock(&call_lock);
|
||||||
map = cpu_online_map;
|
map = cpu_online_map;
|
||||||
cpu_clear(smp_processor_id(), map);
|
cpu_clear(smp_processor_id(), map);
|
||||||
__smp_call_function_map(func, info, nonatomic, wait, map);
|
__smp_call_function_map(func, info, wait, map);
|
||||||
spin_unlock(&call_lock);
|
spin_unlock(&call_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function);
|
||||||
* @cpu: the CPU where func should run
|
* @cpu: the CPU where func should run
|
||||||
* @func: the function to run; this must be fast and non-blocking
|
* @func: the function to run; this must be fast and non-blocking
|
||||||
* @info: an arbitrary pointer to pass to the function
|
* @info: an arbitrary pointer to pass to the function
|
||||||
* @nonatomic: unused
|
|
||||||
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
||||||
*
|
*
|
||||||
* Run a function on one processor.
|
* Run a function on one processor.
|
||||||
|
@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function);
|
||||||
* hardware interrupt handler or from a bottom half.
|
* hardware interrupt handler or from a bottom half.
|
||||||
*/
|
*/
|
||||||
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||||
int nonatomic, int wait)
|
int wait)
|
||||||
{
|
{
|
||||||
spin_lock(&call_lock);
|
spin_lock(&call_lock);
|
||||||
__smp_call_function_map(func, info, nonatomic, wait,
|
__smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
|
||||||
cpumask_of_cpu(cpu));
|
|
||||||
spin_unlock(&call_lock);
|
spin_unlock(&call_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
|
||||||
{
|
{
|
||||||
spin_lock(&call_lock);
|
spin_lock(&call_lock);
|
||||||
cpu_clear(smp_processor_id(), mask);
|
cpu_clear(smp_processor_id(), mask);
|
||||||
__smp_call_function_map(func, info, 0, wait, mask);
|
__smp_call_function_map(func, info, wait, mask);
|
||||||
spin_unlock(&call_lock);
|
spin_unlock(&call_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -690,7 +690,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
|
||||||
*/
|
*/
|
||||||
memset(&etr_sync, 0, sizeof(etr_sync));
|
memset(&etr_sync, 0, sizeof(etr_sync));
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
|
smp_call_function(etr_sync_cpu_start, NULL, 0);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
etr_enable_sync_clock();
|
etr_enable_sync_clock();
|
||||||
|
|
||||||
|
@ -729,7 +729,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
|
||||||
rc = -EAGAIN;
|
rc = -EAGAIN;
|
||||||
}
|
}
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
smp_call_function(etr_sync_cpu_end,NULL,0,0);
|
smp_call_function(etr_sync_cpu_end,NULL,0);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -168,7 +168,7 @@ static void stop_this_cpu(void *unused)
|
||||||
|
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
smp_call_function(stop_this_cpu, 0, 1, 0);
|
smp_call_function(stop_this_cpu, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_send_call_function_ipi(cpumask_t mask)
|
void arch_send_call_function_ipi(cpumask_t mask)
|
||||||
|
@ -223,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm)
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
||||||
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
|
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
|
||||||
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
|
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
|
||||||
} else {
|
} else {
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < num_online_cpus(); i++)
|
for (i = 0; i < num_online_cpus(); i++)
|
||||||
|
@ -260,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
|
||||||
fd.vma = vma;
|
fd.vma = vma;
|
||||||
fd.addr1 = start;
|
fd.addr1 = start;
|
||||||
fd.addr2 = end;
|
fd.addr2 = end;
|
||||||
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
|
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
|
||||||
} else {
|
} else {
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < num_online_cpus(); i++)
|
for (i = 0; i < num_online_cpus(); i++)
|
||||||
|
@ -303,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||||
|
|
||||||
fd.vma = vma;
|
fd.vma = vma;
|
||||||
fd.addr1 = page;
|
fd.addr1 = page;
|
||||||
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
|
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
|
||||||
} else {
|
} else {
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < num_online_cpus(); i++)
|
for (i = 0; i < num_online_cpus(); i++)
|
||||||
|
@ -327,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr)
|
||||||
fd.addr1 = asid;
|
fd.addr1 = asid;
|
||||||
fd.addr2 = vaddr;
|
fd.addr2 = vaddr;
|
||||||
|
|
||||||
smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
|
smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
|
||||||
local_flush_tlb_one(asid, vaddr);
|
local_flush_tlb_one(asid, vaddr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -807,7 +807,6 @@ extern unsigned long xcall_call_function;
|
||||||
* smp_call_function(): Run a function on all other CPUs.
|
* smp_call_function(): Run a function on all other CPUs.
|
||||||
* @func: The function to run. This must be fast and non-blocking.
|
* @func: The function to run. This must be fast and non-blocking.
|
||||||
* @info: An arbitrary pointer to pass to the function.
|
* @info: An arbitrary pointer to pass to the function.
|
||||||
* @nonatomic: currently unused.
|
|
||||||
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, else a negative status code. Does not return until
|
* Returns 0 on success, else a negative status code. Does not return until
|
||||||
|
@ -817,8 +816,7 @@ extern unsigned long xcall_call_function;
|
||||||
* hardware interrupt handler or from a bottom half handler.
|
* hardware interrupt handler or from a bottom half handler.
|
||||||
*/
|
*/
|
||||||
static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
|
static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
|
||||||
int nonatomic, int wait,
|
int wait, cpumask_t mask)
|
||||||
cpumask_t mask)
|
|
||||||
{
|
{
|
||||||
struct call_data_struct data;
|
struct call_data_struct data;
|
||||||
int cpus;
|
int cpus;
|
||||||
|
@ -853,11 +851,9 @@ out_unlock:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int smp_call_function(void (*func)(void *info), void *info,
|
int smp_call_function(void (*func)(void *info), void *info, int wait)
|
||||||
int nonatomic, int wait)
|
|
||||||
{
|
{
|
||||||
return sparc64_smp_call_function_mask(func, info, nonatomic, wait,
|
return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map);
|
||||||
cpu_online_map);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_call_function_client(int irq, struct pt_regs *regs)
|
void smp_call_function_client(int irq, struct pt_regs *regs)
|
||||||
|
@ -894,7 +890,7 @@ static void tsb_sync(void *info)
|
||||||
|
|
||||||
void smp_tsb_sync(struct mm_struct *mm)
|
void smp_tsb_sync(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
|
sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern unsigned long xcall_flush_tlb_mm;
|
extern unsigned long xcall_flush_tlb_mm;
|
||||||
|
|
|
@ -214,8 +214,7 @@ void smp_call_function_slave(int cpu)
|
||||||
atomic_inc(&scf_finished);
|
atomic_inc(&scf_finished);
|
||||||
}
|
}
|
||||||
|
|
||||||
int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
|
int smp_call_function(void (*_func)(void *info), void *_info, int wait)
|
||||||
int wait)
|
|
||||||
{
|
{
|
||||||
int cpus = num_online_cpus() - 1;
|
int cpus = num_online_cpus() - 1;
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -222,7 +222,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
|
||||||
atomic_set(&data.gate,0);
|
atomic_set(&data.gate,0);
|
||||||
|
|
||||||
/* Start the ball rolling on other CPUs */
|
/* Start the ball rolling on other CPUs */
|
||||||
if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
|
if (smp_call_function(ipi_handler, &data, 0) != 0)
|
||||||
panic("mtrr: timed out waiting for other CPUs\n");
|
panic("mtrr: timed out waiting for other CPUs\n");
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
@ -822,7 +822,7 @@ void mtrr_ap_init(void)
|
||||||
*/
|
*/
|
||||||
void mtrr_save_state(void)
|
void mtrr_save_state(void)
|
||||||
{
|
{
|
||||||
smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
|
smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init mtrr_init_finialize(void)
|
static int __init mtrr_init_finialize(void)
|
||||||
|
|
|
@ -95,7 +95,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
|
||||||
for (; count; count -= 16) {
|
for (; count; count -= 16) {
|
||||||
cmd.eax = pos;
|
cmd.eax = pos;
|
||||||
cmd.ecx = pos >> 32;
|
cmd.ecx = pos >> 32;
|
||||||
smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1);
|
smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
|
||||||
if (copy_to_user(tmp, &cmd, 16))
|
if (copy_to_user(tmp, &cmd, 16))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
tmp += 16;
|
tmp += 16;
|
||||||
|
|
|
@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
||||||
load_LDT(pc);
|
load_LDT(pc);
|
||||||
mask = cpumask_of_cpu(smp_processor_id());
|
mask = cpumask_of_cpu(smp_processor_id());
|
||||||
if (!cpus_equal(current->mm->cpu_vm_mask, mask))
|
if (!cpus_equal(current->mm->cpu_vm_mask, mask))
|
||||||
smp_call_function(flush_ldt, NULL, 1, 1);
|
smp_call_function(flush_ldt, NULL, 1);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
#else
|
#else
|
||||||
load_LDT(pc);
|
load_LDT(pc);
|
||||||
|
|
|
@ -87,7 +87,7 @@ int __init check_nmi_watchdog(void)
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||||
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
|
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
|
|
|
@ -96,7 +96,7 @@ int __init check_nmi_watchdog(void)
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||||
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
|
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||||
|
|
|
@ -164,7 +164,7 @@ static void native_smp_send_stop(void)
|
||||||
if (reboot_force)
|
if (reboot_force)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
smp_call_function(stop_this_cpu, NULL, 0, 0);
|
smp_call_function(stop_this_cpu, NULL, 0);
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
disable_local_APIC();
|
disable_local_APIC();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
|
@ -278,7 +278,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
|
||||||
{
|
{
|
||||||
long cpu = (long)arg;
|
long cpu = (long)arg;
|
||||||
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
|
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
|
||||||
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
|
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
|
||||||
{
|
{
|
||||||
if (vmx->vcpu.cpu == -1)
|
if (vmx->vcpu.cpu == -1)
|
||||||
return;
|
return;
|
||||||
smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
|
smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
|
||||||
vmx->launched = 0;
|
vmx->launched = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||||
* So need not to call smp_call_function_single() in that case.
|
* So need not to call smp_call_function_single() in that case.
|
||||||
*/
|
*/
|
||||||
if (vcpu->guest_mode && vcpu->cpu != cpu)
|
if (vcpu->guest_mode && vcpu->cpu != cpu)
|
||||||
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
|
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
|
||||||
|
|
||||||
rv.msr_no = msr_no;
|
rv.msr_no = msr_no;
|
||||||
if (safe) {
|
if (safe) {
|
||||||
smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1);
|
smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
|
||||||
err = rv.err;
|
err = rv.err;
|
||||||
} else {
|
} else {
|
||||||
smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
|
smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
|
||||||
}
|
}
|
||||||
*l = rv.l;
|
*l = rv.l;
|
||||||
*h = rv.h;
|
*h = rv.h;
|
||||||
|
@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
|
||||||
rv.l = l;
|
rv.l = l;
|
||||||
rv.h = h;
|
rv.h = h;
|
||||||
if (safe) {
|
if (safe) {
|
||||||
smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1);
|
smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
|
||||||
err = rv.err;
|
err = rv.err;
|
||||||
} else {
|
} else {
|
||||||
smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
|
smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -1113,7 +1113,7 @@ int safe_smp_processor_id(void)
|
||||||
/* broadcast a halt to all other CPUs */
|
/* broadcast a halt to all other CPUs */
|
||||||
static void voyager_smp_send_stop(void)
|
static void voyager_smp_send_stop(void)
|
||||||
{
|
{
|
||||||
smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
|
smp_call_function(smp_stop_cpu_function, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this function is triggered in time.c when a clock tick fires
|
/* this function is triggered in time.c when a clock tick fires
|
||||||
|
|
|
@ -331,7 +331,7 @@ static void stop_self(void *v)
|
||||||
|
|
||||||
void xen_smp_send_stop(void)
|
void xen_smp_send_stop(void)
|
||||||
{
|
{
|
||||||
smp_call_function(stop_self, NULL, 0, 0);
|
smp_call_function(stop_self, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_smp_send_reschedule(int cpu)
|
void xen_smp_send_reschedule(int cpu)
|
||||||
|
|
|
@ -1339,7 +1339,7 @@ static void smp_callback(void *v)
|
||||||
static int acpi_processor_latency_notify(struct notifier_block *b,
|
static int acpi_processor_latency_notify(struct notifier_block *b,
|
||||||
unsigned long l, void *v)
|
unsigned long l, void *v)
|
||||||
{
|
{
|
||||||
smp_call_function(smp_callback, NULL, 0, 1);
|
smp_call_function(smp_callback, NULL, 1);
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -340,7 +340,7 @@ static void smp_callback(void *v)
|
||||||
static int cpuidle_latency_notify(struct notifier_block *b,
|
static int cpuidle_latency_notify(struct notifier_block *b,
|
||||||
unsigned long l, void *v)
|
unsigned long l, void *v)
|
||||||
{
|
{
|
||||||
smp_call_function(smp_callback, NULL, 0, 1);
|
smp_call_function(smp_callback, NULL, 1);
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ extern void arch_send_call_function_ipi(cpumask_t mask);
|
||||||
#else /* CONFIG_SMP */
|
#else /* CONFIG_SMP */
|
||||||
|
|
||||||
#define hard_smp_processor_id() 0
|
#define hard_smp_processor_id() 0
|
||||||
#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; })
|
#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; })
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
|
||||||
unsigned long arg3, unsigned long arg4, unsigned long arg5)
|
unsigned long arg3, unsigned long arg4, unsigned long arg5)
|
||||||
{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
|
{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
|
||||||
|
|
||||||
static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
|
static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
|
||||||
{
|
{
|
||||||
xc1((smpfunc_t)func, (unsigned long)info);
|
xc1((smpfunc_t)func, (unsigned long)info);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -62,11 +62,11 @@ extern void smp_cpus_done(unsigned int max_cpus);
|
||||||
/*
|
/*
|
||||||
* Call a function on all other processors
|
* Call a function on all other processors
|
||||||
*/
|
*/
|
||||||
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
|
int smp_call_function(void(*func)(void *info), void *info, int wait);
|
||||||
int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
|
int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
|
||||||
int wait);
|
int wait);
|
||||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||||
int retry, int wait);
|
int wait);
|
||||||
void __smp_call_function_single(int cpuid, struct call_single_data *data);
|
void __smp_call_function_single(int cpuid, struct call_single_data *data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -119,7 +119,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#define smp_call_function(func, info, retry, wait) \
|
#define smp_call_function(func, info, wait) \
|
||||||
(up_smp_call_function(func, info))
|
(up_smp_call_function(func, info))
|
||||||
#define on_each_cpu(func,info,retry,wait) \
|
#define on_each_cpu(func,info,retry,wait) \
|
||||||
({ \
|
({ \
|
||||||
|
@ -131,7 +131,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
|
||||||
static inline void smp_send_reschedule(int cpu) { }
|
static inline void smp_send_reschedule(int cpu) { }
|
||||||
#define num_booting_cpus() 1
|
#define num_booting_cpus() 1
|
||||||
#define smp_prepare_boot_cpu() do {} while (0)
|
#define smp_prepare_boot_cpu() do {} while (0)
|
||||||
#define smp_call_function_single(cpuid, func, info, retry, wait) \
|
#define smp_call_function_single(cpuid, func, info, wait) \
|
||||||
({ \
|
({ \
|
||||||
WARN_ON(cpuid != 0); \
|
WARN_ON(cpuid != 0); \
|
||||||
local_irq_disable(); \
|
local_irq_disable(); \
|
||||||
|
|
|
@ -195,7 +195,6 @@ void generic_smp_call_function_single_interrupt(void)
|
||||||
* smp_call_function_single - Run a function on a specific CPU
|
* smp_call_function_single - Run a function on a specific CPU
|
||||||
* @func: The function to run. This must be fast and non-blocking.
|
* @func: The function to run. This must be fast and non-blocking.
|
||||||
* @info: An arbitrary pointer to pass to the function.
|
* @info: An arbitrary pointer to pass to the function.
|
||||||
* @retry: Unused
|
|
||||||
* @wait: If true, wait until function has completed on other CPUs.
|
* @wait: If true, wait until function has completed on other CPUs.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, else a negative status code. Note that @wait
|
* Returns 0 on success, else a negative status code. Note that @wait
|
||||||
|
@ -203,7 +202,7 @@ void generic_smp_call_function_single_interrupt(void)
|
||||||
* we fall back to on-stack allocation.
|
* we fall back to on-stack allocation.
|
||||||
*/
|
*/
|
||||||
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||||
int retry, int wait)
|
int wait)
|
||||||
{
|
{
|
||||||
struct call_single_data d;
|
struct call_single_data d;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -339,7 +338,6 @@ EXPORT_SYMBOL(smp_call_function_mask);
|
||||||
* smp_call_function(): Run a function on all other CPUs.
|
* smp_call_function(): Run a function on all other CPUs.
|
||||||
* @func: The function to run. This must be fast and non-blocking.
|
* @func: The function to run. This must be fast and non-blocking.
|
||||||
* @info: An arbitrary pointer to pass to the function.
|
* @info: An arbitrary pointer to pass to the function.
|
||||||
* @natomic: Unused
|
|
||||||
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, else a negative status code.
|
* Returns 0 on success, else a negative status code.
|
||||||
|
@ -351,7 +349,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
|
||||||
* You must not call this function with disabled interrupts or from a
|
* You must not call this function with disabled interrupts or from a
|
||||||
* hardware interrupt handler or from a bottom half handler.
|
* hardware interrupt handler or from a bottom half handler.
|
||||||
*/
|
*/
|
||||||
int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
|
int smp_call_function(void (*func)(void *), void *info, int wait)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
|
@ -679,7 +679,7 @@ int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
ret = smp_call_function(func, info, retry, wait);
|
ret = smp_call_function(func, info, wait);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
func(info);
|
func(info);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
|
@ -266,7 +266,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu)
|
||||||
"offline CPU #%d\n", *oncpu);
|
"offline CPU #%d\n", *oncpu);
|
||||||
else
|
else
|
||||||
smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
|
smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
|
||||||
&reason, 1, 1);
|
&reason, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -298,7 +298,7 @@ void flow_cache_flush(void)
|
||||||
init_completion(&info.completion);
|
init_completion(&info.completion);
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
|
smp_call_function(flow_cache_flush_per_cpu, &info, 0);
|
||||||
flow_cache_flush_tasklet((unsigned long)&info);
|
flow_cache_flush_tasklet((unsigned long)&info);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
|
|
|
@ -480,7 +480,7 @@ static void iucv_setmask_mp(void)
|
||||||
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
|
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
|
||||||
!cpu_isset(cpu, iucv_irq_cpumask))
|
!cpu_isset(cpu, iucv_irq_cpumask))
|
||||||
smp_call_function_single(cpu, iucv_allow_cpu,
|
smp_call_function_single(cpu, iucv_allow_cpu,
|
||||||
NULL, 0, 1);
|
NULL, 1);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,7 +498,7 @@ static void iucv_setmask_up(void)
|
||||||
cpumask = iucv_irq_cpumask;
|
cpumask = iucv_irq_cpumask;
|
||||||
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
|
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
|
||||||
for_each_cpu_mask(cpu, cpumask)
|
for_each_cpu_mask(cpu, cpumask)
|
||||||
smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
|
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -523,7 +523,7 @@ static int iucv_enable(void)
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
|
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
if (cpus_empty(iucv_buffer_cpumask))
|
if (cpus_empty(iucv_buffer_cpumask))
|
||||||
/* No cpu could declare an iucv buffer. */
|
/* No cpu could declare an iucv buffer. */
|
||||||
|
@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
|
||||||
case CPU_ONLINE_FROZEN:
|
case CPU_ONLINE_FROZEN:
|
||||||
case CPU_DOWN_FAILED:
|
case CPU_DOWN_FAILED:
|
||||||
case CPU_DOWN_FAILED_FROZEN:
|
case CPU_DOWN_FAILED_FROZEN:
|
||||||
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
|
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
|
||||||
break;
|
break;
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
case CPU_DOWN_PREPARE_FROZEN:
|
case CPU_DOWN_PREPARE_FROZEN:
|
||||||
|
@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
|
||||||
if (cpus_empty(cpumask))
|
if (cpus_empty(cpumask))
|
||||||
/* Can't offline last IUCV enabled cpu. */
|
/* Can't offline last IUCV enabled cpu. */
|
||||||
return NOTIFY_BAD;
|
return NOTIFY_BAD;
|
||||||
smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
|
smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
|
||||||
if (cpus_empty(iucv_irq_cpumask))
|
if (cpus_empty(iucv_irq_cpumask))
|
||||||
smp_call_function_single(first_cpu(iucv_buffer_cpumask),
|
smp_call_function_single(first_cpu(iucv_buffer_cpumask),
|
||||||
iucv_allow_cpu, NULL, 0, 1);
|
iucv_allow_cpu, NULL, 1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void)
|
||||||
* pending interrupts force them to the work queue by calling
|
* pending interrupts force them to the work queue by calling
|
||||||
* an empty function on all cpus.
|
* an empty function on all cpus.
|
||||||
*/
|
*/
|
||||||
smp_call_function(__iucv_cleanup_queue, NULL, 0, 1);
|
smp_call_function(__iucv_cleanup_queue, NULL, 1);
|
||||||
spin_lock_irq(&iucv_queue_lock);
|
spin_lock_irq(&iucv_queue_lock);
|
||||||
list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
|
list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
|
||||||
/* Remove stale work items from the task queue. */
|
/* Remove stale work items from the task queue. */
|
||||||
|
|
|
@ -1266,12 +1266,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
|
||||||
case CPU_UP_CANCELED:
|
case CPU_UP_CANCELED:
|
||||||
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
|
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
|
||||||
cpu);
|
cpu);
|
||||||
smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
|
smp_call_function_single(cpu, hardware_disable, NULL, 1);
|
||||||
break;
|
break;
|
||||||
case CPU_ONLINE:
|
case CPU_ONLINE:
|
||||||
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
|
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
|
||||||
cpu);
|
cpu);
|
||||||
smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
|
smp_call_function_single(cpu, hardware_enable, NULL, 1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
@ -1474,7 +1474,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
smp_call_function_single(cpu,
|
smp_call_function_single(cpu,
|
||||||
kvm_arch_check_processor_compat,
|
kvm_arch_check_processor_compat,
|
||||||
&r, 0, 1);
|
&r, 1);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto out_free_1;
|
goto out_free_1;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче