[PATCH] for_each_possible_cpu: powerpc
for_each_cpu() actually iterates across all possible CPUs. We've had mistakes in the past where people were using for_each_cpu() where they should have been iterating across only online or present CPUs. This is inefficient and possibly buggy. We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the future. This patch replaces for_each_cpu with for_each_possible_cpu. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Родитель
bab70a4af7
Коммит
0e5519548f
|
@ -379,7 +379,7 @@ void irq_ctx_init(void)
|
||||||
struct thread_info *tp;
|
struct thread_info *tp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
||||||
tp = softirq_ctx[i];
|
tp = softirq_ctx[i];
|
||||||
tp->cpu = i;
|
tp->cpu = i;
|
||||||
|
|
|
@ -56,7 +56,7 @@ static unsigned long get_purr(void)
|
||||||
unsigned long sum_purr = 0;
|
unsigned long sum_purr = 0;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
sum_purr += lppaca[cpu].emulated_time_base;
|
sum_purr += lppaca[cpu].emulated_time_base;
|
||||||
|
|
||||||
#ifdef PURR_DEBUG
|
#ifdef PURR_DEBUG
|
||||||
|
@ -222,7 +222,7 @@ static unsigned long get_purr(void)
|
||||||
int cpu;
|
int cpu;
|
||||||
struct cpu_usage *cu;
|
struct cpu_usage *cu;
|
||||||
|
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
cu = &per_cpu(cpu_usage_array, cpu);
|
cu = &per_cpu(cpu_usage_array, cpu);
|
||||||
sum_purr += cu->current_tb;
|
sum_purr += cu->current_tb;
|
||||||
}
|
}
|
||||||
|
|
|
@ -593,7 +593,7 @@ static void rtas_percpu_suspend_me(void *info)
|
||||||
data->waiting = 0;
|
data->waiting = 0;
|
||||||
data->args->args[data->args->nargs] =
|
data->args->args[data->args->nargs] =
|
||||||
rtas_call(ibm_suspend_me_token, 0, 1, NULL);
|
rtas_call(ibm_suspend_me_token, 0, 1, NULL);
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
plpar_hcall_norets(H_PROD,i);
|
plpar_hcall_norets(H_PROD,i);
|
||||||
} else {
|
} else {
|
||||||
data->waiting = -EBUSY;
|
data->waiting = -EBUSY;
|
||||||
|
@ -626,7 +626,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
|
||||||
/* Prod each CPU. This won't hurt, and will wake
|
/* Prod each CPU. This won't hurt, and will wake
|
||||||
* anyone we successfully put to sleep with H_Join
|
* anyone we successfully put to sleep with H_Join
|
||||||
*/
|
*/
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
plpar_hcall_norets(H_PROD, i);
|
plpar_hcall_norets(H_PROD, i);
|
||||||
|
|
||||||
return data.waiting;
|
return data.waiting;
|
||||||
|
|
|
@ -431,7 +431,7 @@ void __init smp_setup_cpu_maps(void)
|
||||||
/*
|
/*
|
||||||
* Do the sibling map; assume only two threads per processor.
|
* Do the sibling map; assume only two threads per processor.
|
||||||
*/
|
*/
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
cpu_set(cpu, cpu_sibling_map[cpu]);
|
cpu_set(cpu, cpu_sibling_map[cpu]);
|
||||||
if (cpu_has_feature(CPU_FTR_SMT))
|
if (cpu_has_feature(CPU_FTR_SMT))
|
||||||
cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
|
cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
|
||||||
|
|
|
@ -226,7 +226,7 @@ int __init ppc_init(void)
|
||||||
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
|
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
|
||||||
|
|
||||||
/* register CPU devices */
|
/* register CPU devices */
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
register_cpu(&cpu_devices[i], i, NULL);
|
register_cpu(&cpu_devices[i], i, NULL);
|
||||||
|
|
||||||
/* call platform init */
|
/* call platform init */
|
||||||
|
|
|
@ -474,7 +474,7 @@ static void __init irqstack_early_init(void)
|
||||||
* interrupt stacks must be under 256MB, we cannot afford to take
|
* interrupt stacks must be under 256MB, we cannot afford to take
|
||||||
* SLB misses on them.
|
* SLB misses on them.
|
||||||
*/
|
*/
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
softirq_ctx[i] = (struct thread_info *)
|
softirq_ctx[i] = (struct thread_info *)
|
||||||
__va(lmb_alloc_base(THREAD_SIZE,
|
__va(lmb_alloc_base(THREAD_SIZE,
|
||||||
THREAD_SIZE, 0x10000000));
|
THREAD_SIZE, 0x10000000));
|
||||||
|
@ -507,7 +507,7 @@ static void __init emergency_stack_init(void)
|
||||||
*/
|
*/
|
||||||
limit = min(0x10000000UL, lmb.rmo_size);
|
limit = min(0x10000000UL, lmb.rmo_size);
|
||||||
|
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
paca[i].emergency_sp =
|
paca[i].emergency_sp =
|
||||||
__va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
|
__va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
@ -624,7 +624,7 @@ void __init setup_per_cpu_areas(void)
|
||||||
size = PERCPU_ENOUGH_ROOM;
|
size = PERCPU_ENOUGH_ROOM;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
|
ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
|
||||||
if (!ptr)
|
if (!ptr)
|
||||||
panic("Cannot allocate cpu data for CPU %d\n", i);
|
panic("Cannot allocate cpu data for CPU %d\n", i);
|
||||||
|
|
|
@ -362,7 +362,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||||
|
|
||||||
smp_space_timers(max_cpus);
|
smp_space_timers(max_cpus);
|
||||||
|
|
||||||
for_each_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
if (cpu != boot_cpuid)
|
if (cpu != boot_cpuid)
|
||||||
smp_create_idle(cpu);
|
smp_create_idle(cpu);
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ static int __init smt_setup(void)
|
||||||
val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
|
val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
|
||||||
NULL);
|
NULL);
|
||||||
if (!smt_snooze_cmdline && val) {
|
if (!smt_snooze_cmdline && val) {
|
||||||
for_each_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
per_cpu(smt_snooze_delay, cpu) = *val;
|
per_cpu(smt_snooze_delay, cpu) = *val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ static int __init setup_smt_snooze_delay(char *str)
|
||||||
smt_snooze_cmdline = 1;
|
smt_snooze_cmdline = 1;
|
||||||
|
|
||||||
if (get_option(&str, &snooze)) {
|
if (get_option(&str, &snooze)) {
|
||||||
for_each_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
per_cpu(smt_snooze_delay, cpu) = snooze;
|
per_cpu(smt_snooze_delay, cpu) = snooze;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,7 +347,7 @@ static int __init topology_init(void)
|
||||||
|
|
||||||
register_cpu_notifier(&sysfs_cpu_nb);
|
register_cpu_notifier(&sysfs_cpu_nb);
|
||||||
|
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
|
|
@ -261,7 +261,7 @@ void snapshot_timebases(void)
|
||||||
|
|
||||||
if (!cpu_has_feature(CPU_FTR_PURR))
|
if (!cpu_has_feature(CPU_FTR_PURR))
|
||||||
return;
|
return;
|
||||||
for_each_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
|
spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
|
||||||
on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
|
on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
|
||||||
}
|
}
|
||||||
|
@ -751,7 +751,7 @@ void __init smp_space_timers(unsigned int max_cpus)
|
||||||
* systems works better if the two threads' timebase interrupts
|
* systems works better if the two threads' timebase interrupts
|
||||||
* are staggered by half a jiffy with respect to each other.
|
* are staggered by half a jiffy with respect to each other.
|
||||||
*/
|
*/
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
if (i == boot_cpuid)
|
if (i == boot_cpuid)
|
||||||
continue;
|
continue;
|
||||||
if (i == (boot_cpuid ^ 1))
|
if (i == (boot_cpuid ^ 1))
|
||||||
|
|
|
@ -239,7 +239,7 @@ void stabs_alloc(void)
|
||||||
if (cpu_has_feature(CPU_FTR_SLB))
|
if (cpu_has_feature(CPU_FTR_SLB))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
unsigned long newstab;
|
unsigned long newstab;
|
||||||
|
|
||||||
if (cpu == 0)
|
if (cpu == 0)
|
||||||
|
|
|
@ -364,7 +364,7 @@ void iic_init_IRQ(void)
|
||||||
setup_iic_hardcoded();
|
setup_iic_hardcoded();
|
||||||
|
|
||||||
irq_offset = 0;
|
irq_offset = 0;
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
iic = &per_cpu(iic, cpu);
|
iic = &per_cpu(iic, cpu);
|
||||||
if (iic->regs)
|
if (iic->regs)
|
||||||
out_be64(&iic->regs->prio, 0xff);
|
out_be64(&iic->regs->prio, 0xff);
|
||||||
|
|
|
@ -217,7 +217,7 @@ void __init cell_pervasive_init(void)
|
||||||
if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
|
if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
p = &cbe_pervasive[cpu];
|
p = &cbe_pervasive[cpu];
|
||||||
ret = cbe_find_pmd_mmio(cpu, p);
|
ret = cbe_find_pmd_mmio(cpu, p);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -541,7 +541,7 @@ nextnode:
|
||||||
ops = &pSeriesLP_ops;
|
ops = &pSeriesLP_ops;
|
||||||
else {
|
else {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
int hard_id;
|
int hard_id;
|
||||||
|
|
||||||
/* FIXME: Do this dynamically! --RR */
|
/* FIXME: Do this dynamically! --RR */
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int __i; \
|
unsigned int __i; \
|
||||||
for_each_cpu(__i) \
|
for_each_possible_cpu(__i) \
|
||||||
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
||||||
(src), (size)); \
|
(src), (size)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче