arch/ia64: remove references to cpu_*_map
This was marked as obsolete for quite a while now.. Now it is time to remove it altogether. And while doing this, get rid of first_cpu() as well. Also, remove the redundant setting of cpu_online_mask in smp_prepare_cpus() because the generic code would have already set cpu 0 in cpu_online_mask. Reported-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
38b93780a5
Коммит
7d7f98488b
|
@ -844,7 +844,7 @@ early_param("additional_cpus", setup_additional_cpus);
|
||||||
* are onlined, or offlined. The reason is per-cpu data-structures
|
* are onlined, or offlined. The reason is per-cpu data-structures
|
||||||
* are allocated by some modules at init time, and dont expect to
|
* are allocated by some modules at init time, and dont expect to
|
||||||
* do this dynamically on cpu arrival/departure.
|
* do this dynamically on cpu arrival/departure.
|
||||||
* cpu_present_map on the other hand can change dynamically.
|
* cpu_present_mask on the other hand can change dynamically.
|
||||||
* In case when cpu_hotplug is not compiled, then we resort to current
|
* In case when cpu_hotplug is not compiled, then we resort to current
|
||||||
* behaviour, which is cpu_possible == cpu_present.
|
* behaviour, which is cpu_possible == cpu_present.
|
||||||
* - Ashok Raj
|
* - Ashok Raj
|
||||||
|
@ -922,7 +922,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||||
|
|
||||||
acpi_map_cpu2node(handle, cpu, physid);
|
acpi_map_cpu2node(handle, cpu, physid);
|
||||||
|
|
||||||
cpu_set(cpu, cpu_present_map);
|
set_cpu_present(cpu, true);
|
||||||
ia64_cpu_to_sapicid[cpu] = physid;
|
ia64_cpu_to_sapicid[cpu] = physid;
|
||||||
|
|
||||||
acpi_processor_set_pdc(handle);
|
acpi_processor_set_pdc(handle);
|
||||||
|
@ -941,7 +941,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
|
||||||
int acpi_unmap_lsapic(int cpu)
|
int acpi_unmap_lsapic(int cpu)
|
||||||
{
|
{
|
||||||
ia64_cpu_to_sapicid[cpu] = -1;
|
ia64_cpu_to_sapicid[cpu] = -1;
|
||||||
cpu_clear(cpu, cpu_present_map);
|
set_cpu_present(cpu, false);
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_NUMA
|
#ifdef CONFIG_ACPI_NUMA
|
||||||
/* NUMA specific cleanup's */
|
/* NUMA specific cleanup's */
|
||||||
|
|
|
@ -118,7 +118,7 @@ static inline int find_unassigned_vector(cpumask_t domain)
|
||||||
cpumask_t mask;
|
cpumask_t mask;
|
||||||
int pos, vector;
|
int pos, vector;
|
||||||
|
|
||||||
cpus_and(mask, domain, cpu_online_map);
|
cpumask_and(&mask, &domain, cpu_online_mask);
|
||||||
if (cpus_empty(mask))
|
if (cpus_empty(mask))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
|
||||||
BUG_ON((unsigned)irq >= NR_IRQS);
|
BUG_ON((unsigned)irq >= NR_IRQS);
|
||||||
BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
|
BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
|
||||||
|
|
||||||
cpus_and(mask, domain, cpu_online_map);
|
cpumask_and(&mask, &domain, cpu_online_mask);
|
||||||
if (cpus_empty(mask))
|
if (cpus_empty(mask))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
|
if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
|
||||||
|
@ -179,7 +179,7 @@ static void __clear_irq_vector(int irq)
|
||||||
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
|
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
|
||||||
vector = cfg->vector;
|
vector = cfg->vector;
|
||||||
domain = cfg->domain;
|
domain = cfg->domain;
|
||||||
cpus_and(mask, cfg->domain, cpu_online_map);
|
cpumask_and(&mask, &cfg->domain, cpu_online_mask);
|
||||||
for_each_cpu_mask(cpu, mask)
|
for_each_cpu_mask(cpu, mask)
|
||||||
per_cpu(vector_irq, cpu)[vector] = -1;
|
per_cpu(vector_irq, cpu)[vector] = -1;
|
||||||
cfg->vector = IRQ_VECTOR_UNASSIGNED;
|
cfg->vector = IRQ_VECTOR_UNASSIGNED;
|
||||||
|
@ -322,7 +322,7 @@ void irq_complete_move(unsigned irq)
|
||||||
if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
|
if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
|
||||||
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||||
for_each_cpu_mask(i, cleanup_mask)
|
for_each_cpu_mask(i, cleanup_mask)
|
||||||
platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
|
platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
|
||||||
|
|
|
@ -1515,7 +1515,8 @@ static void
|
||||||
ia64_mca_cmc_poll (unsigned long dummy)
|
ia64_mca_cmc_poll (unsigned long dummy)
|
||||||
{
|
{
|
||||||
/* Trigger a CMC interrupt cascade */
|
/* Trigger a CMC interrupt cascade */
|
||||||
platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
|
platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
|
||||||
|
IA64_IPI_DM_INT, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1591,7 +1592,8 @@ static void
|
||||||
ia64_mca_cpe_poll (unsigned long dummy)
|
ia64_mca_cpe_poll (unsigned long dummy)
|
||||||
{
|
{
|
||||||
/* Trigger a CPE interrupt cascade */
|
/* Trigger a CPE interrupt cascade */
|
||||||
platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
|
platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
|
||||||
|
IA64_IPI_DM_INT, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_ACPI */
|
#endif /* CONFIG_ACPI */
|
||||||
|
|
|
@ -57,7 +57,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
||||||
return irq;
|
return irq;
|
||||||
|
|
||||||
irq_set_msi_desc(irq, desc);
|
irq_set_msi_desc(irq, desc);
|
||||||
cpus_and(mask, irq_to_domain(irq), cpu_online_map);
|
cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
|
||||||
dest_phys_id = cpu_physical_id(first_cpu(mask));
|
dest_phys_id = cpu_physical_id(first_cpu(mask));
|
||||||
vector = irq_to_vector(irq);
|
vector = irq_to_vector(irq);
|
||||||
|
|
||||||
|
@ -179,7 +179,7 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
|
||||||
unsigned dest;
|
unsigned dest;
|
||||||
cpumask_t mask;
|
cpumask_t mask;
|
||||||
|
|
||||||
cpus_and(mask, irq_to_domain(irq), cpu_online_map);
|
cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
|
||||||
dest = cpu_physical_id(first_cpu(mask));
|
dest = cpu_physical_id(first_cpu(mask));
|
||||||
|
|
||||||
msg->address_hi = 0;
|
msg->address_hi = 0;
|
||||||
|
|
|
@ -486,7 +486,7 @@ mark_bsp_online (void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* If we register an early console, allow CPU 0 to printk */
|
/* If we register an early console, allow CPU 0 to printk */
|
||||||
cpu_set(smp_processor_id(), cpu_online_map);
|
set_cpu_online(smp_processor_id(), true);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ stop_this_cpu(void)
|
||||||
/*
|
/*
|
||||||
* Remove this CPU:
|
* Remove this CPU:
|
||||||
*/
|
*/
|
||||||
cpu_clear(smp_processor_id(), cpu_online_map);
|
set_cpu_online(smp_processor_id(), false);
|
||||||
max_xtp();
|
max_xtp();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
cpu_halt();
|
cpu_halt();
|
||||||
|
|
|
@ -401,7 +401,7 @@ smp_callin (void)
|
||||||
/* Setup the per cpu irq handling data structures */
|
/* Setup the per cpu irq handling data structures */
|
||||||
__setup_vector_irq(cpuid);
|
__setup_vector_irq(cpuid);
|
||||||
notify_cpu_starting(cpuid);
|
notify_cpu_starting(cpuid);
|
||||||
cpu_set(cpuid, cpu_online_map);
|
set_cpu_online(cpuid, true);
|
||||||
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
|
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
|
||||||
spin_unlock(&vector_lock);
|
spin_unlock(&vector_lock);
|
||||||
ipi_call_unlock_irq();
|
ipi_call_unlock_irq();
|
||||||
|
@ -548,7 +548,7 @@ do_rest:
|
||||||
if (!cpu_isset(cpu, cpu_callin_map)) {
|
if (!cpu_isset(cpu, cpu_callin_map)) {
|
||||||
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
|
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
|
||||||
ia64_cpu_to_sapicid[cpu] = -1;
|
ia64_cpu_to_sapicid[cpu] = -1;
|
||||||
cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
|
set_cpu_online(cpu, false); /* was set in smp_callin() */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -578,8 +578,7 @@ smp_build_cpu_map (void)
|
||||||
}
|
}
|
||||||
|
|
||||||
ia64_cpu_to_sapicid[0] = boot_cpu_id;
|
ia64_cpu_to_sapicid[0] = boot_cpu_id;
|
||||||
cpus_clear(cpu_present_map);
|
init_cpu_present(cpumask_of(0));
|
||||||
set_cpu_present(0, true);
|
|
||||||
set_cpu_possible(0, true);
|
set_cpu_possible(0, true);
|
||||||
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
|
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
|
||||||
sapicid = smp_boot_data.cpu_phys_id[i];
|
sapicid = smp_boot_data.cpu_phys_id[i];
|
||||||
|
@ -606,10 +605,6 @@ smp_prepare_cpus (unsigned int max_cpus)
|
||||||
|
|
||||||
smp_setup_percpu_timer();
|
smp_setup_percpu_timer();
|
||||||
|
|
||||||
/*
|
|
||||||
* We have the boot CPU online for sure.
|
|
||||||
*/
|
|
||||||
cpu_set(0, cpu_online_map);
|
|
||||||
cpu_set(0, cpu_callin_map);
|
cpu_set(0, cpu_callin_map);
|
||||||
|
|
||||||
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
|
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
|
||||||
|
@ -633,7 +628,7 @@ smp_prepare_cpus (unsigned int max_cpus)
|
||||||
|
|
||||||
void __devinit smp_prepare_boot_cpu(void)
|
void __devinit smp_prepare_boot_cpu(void)
|
||||||
{
|
{
|
||||||
cpu_set(smp_processor_id(), cpu_online_map);
|
set_cpu_online(smp_processor_id(), true);
|
||||||
cpu_set(smp_processor_id(), cpu_callin_map);
|
cpu_set(smp_processor_id(), cpu_callin_map);
|
||||||
set_numa_node(cpu_to_node_map[smp_processor_id()]);
|
set_numa_node(cpu_to_node_map[smp_processor_id()]);
|
||||||
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
|
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
|
||||||
|
@ -690,7 +685,7 @@ int migrate_platform_irqs(unsigned int cpu)
|
||||||
/*
|
/*
|
||||||
* Now re-target the CPEI to a different processor
|
* Now re-target the CPEI to a different processor
|
||||||
*/
|
*/
|
||||||
new_cpei_cpu = any_online_cpu(cpu_online_map);
|
new_cpei_cpu = cpumask_any(cpu_online_mask);
|
||||||
mask = cpumask_of(new_cpei_cpu);
|
mask = cpumask_of(new_cpei_cpu);
|
||||||
set_cpei_target_cpu(new_cpei_cpu);
|
set_cpei_target_cpu(new_cpei_cpu);
|
||||||
data = irq_get_irq_data(ia64_cpe_irq);
|
data = irq_get_irq_data(ia64_cpe_irq);
|
||||||
|
@ -732,10 +727,10 @@ int __cpu_disable(void)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_clear(cpu, cpu_online_map);
|
set_cpu_online(cpu, false);
|
||||||
|
|
||||||
if (migrate_platform_irqs(cpu)) {
|
if (migrate_platform_irqs(cpu)) {
|
||||||
cpu_set(cpu, cpu_online_map);
|
set_cpu_online(cpu, true);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -220,7 +220,8 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
|
||||||
ssize_t len;
|
ssize_t len;
|
||||||
cpumask_t shared_cpu_map;
|
cpumask_t shared_cpu_map;
|
||||||
|
|
||||||
cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
|
cpumask_and(&shared_cpu_map,
|
||||||
|
&this_leaf->shared_cpu_map, cpu_online_mask);
|
||||||
len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
|
len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
|
||||||
len += sprintf(buf+len, "\n");
|
len += sprintf(buf+len, "\n");
|
||||||
return len;
|
return len;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче