2005-04-17 02:20:36 +04:00
|
|
|
/* smp.c: Sparc SMP support.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
|
|
|
|
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
|
|
|
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/head.h>
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/delay.h>
|
2013-02-15 18:52:06 +04:00
|
|
|
#include <linux/cpu.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <asm/ptrace.h>
|
2011-07-27 03:09:06 +04:00
|
|
|
#include <linux/atomic.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/oplib.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/cpudata.h>
|
2013-02-15 18:52:06 +04:00
|
|
|
#include <asm/timer.h>
|
2009-09-01 02:08:13 +04:00
|
|
|
#include <asm/leon.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-02-15 18:52:06 +04:00
|
|
|
#include "kernel.h"
|
2007-07-22 06:18:57 +04:00
|
|
|
#include "irq.h"
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-03-24 09:36:19 +03:00
|
|
|
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-05-14 17:14:36 +04:00
|
|
|
const struct sparc32_ipi_ops *sparc32_ipi_ops;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* The only guaranteed locking primitive available on all Sparc
|
|
|
|
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
|
|
|
|
* places the current byte at the effective address into dest_reg and
|
|
|
|
* places 0xff there afterwards. Pretty lame locking primitive
|
|
|
|
* compared to the Alpha and the Intel no? Most Sparcs have 'swap'
|
|
|
|
* instruction which is much better...
|
|
|
|
*/
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
void smp_store_cpu_info(int id)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int cpu_node;
|
2011-04-22 03:35:46 +04:00
|
|
|
int mid;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
cpu_data(id).udelay_val = loops_per_jiffy;
|
|
|
|
|
|
|
|
cpu_find_by_mid(id, &cpu_node);
|
|
|
|
cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
|
|
|
|
"clock-frequency", 0);
|
|
|
|
cpu_data(id).prom_node = cpu_node;
|
2011-04-22 03:35:46 +04:00
|
|
|
mid = cpu_get_hwmid(cpu_node);
|
2006-06-11 09:03:43 +04:00
|
|
|
|
2011-04-22 03:35:46 +04:00
|
|
|
if (mid < 0) {
|
|
|
|
printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
|
|
|
|
mid = 0;
|
|
|
|
}
|
|
|
|
cpu_data(id).mid = mid;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
|
{
|
2006-03-24 09:36:19 +03:00
|
|
|
extern void smp4m_smp_done(void);
|
2006-07-18 08:57:09 +04:00
|
|
|
extern void smp4d_smp_done(void);
|
2006-03-24 09:36:19 +03:00
|
|
|
unsigned long bogosum = 0;
|
2009-03-16 07:10:24 +03:00
|
|
|
int cpu, num = 0;
|
2006-03-24 09:36:19 +03:00
|
|
|
|
2009-03-16 07:10:24 +03:00
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
num++;
|
|
|
|
bogosum += cpu_data(cpu).udelay_val;
|
|
|
|
}
|
2006-03-24 09:36:19 +03:00
|
|
|
|
|
|
|
printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
|
|
|
|
num, bogosum/(500000/HZ),
|
|
|
|
(bogosum/(5000/HZ))%100);
|
|
|
|
|
2006-07-18 08:57:09 +04:00
|
|
|
switch(sparc_cpu_model) {
|
|
|
|
case sun4m:
|
|
|
|
smp4m_smp_done();
|
|
|
|
break;
|
|
|
|
case sun4d:
|
|
|
|
smp4d_smp_done();
|
|
|
|
break;
|
2009-09-01 02:08:13 +04:00
|
|
|
case sparc_leon:
|
|
|
|
leon_smp_done();
|
|
|
|
break;
|
2006-07-18 08:57:09 +04:00
|
|
|
case sun4e:
|
|
|
|
printk("SUN4E\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
case sun4u:
|
|
|
|
printk("SUN4U\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk("UNKNOWN!\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
2011-06-03 18:45:23 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void cpu_panic(void)
|
|
|
|
{
|
|
|
|
printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
|
|
|
|
panic("SMP bolixed\n");
|
|
|
|
}
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
struct linux_prom_registers smp_penguin_ctable = { 0 };
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
void smp_send_reschedule(int cpu)
|
|
|
|
{
|
2011-05-02 04:08:51 +04:00
|
|
|
/*
|
|
|
|
* CPU model dependent way of implementing IPI generation targeting
|
|
|
|
* a single CPU. The trap handler needs only to do trap entry/return
|
|
|
|
* to call schedule.
|
|
|
|
*/
|
2012-05-14 17:14:36 +04:00
|
|
|
sparc32_ipi_ops->resched(cpu);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void smp_send_stop(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-05-02 04:08:51 +04:00
|
|
|
void arch_send_call_function_single_ipi(int cpu)
|
|
|
|
{
|
|
|
|
/* trigger one IPI single call on one CPU */
|
2012-05-14 17:14:36 +04:00
|
|
|
sparc32_ipi_ops->single(cpu);
|
2011-05-02 04:08:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/* trigger IPI mask call on each CPU */
|
|
|
|
for_each_cpu(cpu, mask)
|
2012-05-14 17:14:36 +04:00
|
|
|
sparc32_ipi_ops->mask_one(cpu);
|
2011-05-02 04:08:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void smp_resched_interrupt(void)
|
|
|
|
{
|
2011-05-21 00:10:22 +04:00
|
|
|
irq_enter();
|
|
|
|
scheduler_ipi();
|
2011-05-02 04:08:51 +04:00
|
|
|
local_cpu_data().irq_resched_count++;
|
2011-05-21 00:10:22 +04:00
|
|
|
irq_exit();
|
|
|
|
/* re-schedule routine called by interrupt return code. */
|
2011-05-02 04:08:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void smp_call_function_single_interrupt(void)
|
|
|
|
{
|
|
|
|
irq_enter();
|
|
|
|
generic_smp_call_function_single_interrupt();
|
|
|
|
local_cpu_data().irq_call_count++;
|
|
|
|
irq_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
void smp_call_function_interrupt(void)
|
|
|
|
{
|
|
|
|
irq_enter();
|
|
|
|
generic_smp_call_function_interrupt();
|
|
|
|
local_cpu_data().irq_call_count++;
|
|
|
|
irq_exit();
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
int setup_profiling_timer(unsigned int multiplier)
|
|
|
|
{
|
2012-04-04 23:49:26 +04:00
|
|
|
return -EINVAL;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2006-03-24 09:36:19 +03:00
|
|
|
void __init smp_prepare_cpus(unsigned int max_cpus)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-02-01 16:52:33 +03:00
|
|
|
extern void __init smp4m_boot_cpus(void);
|
|
|
|
extern void __init smp4d_boot_cpus(void);
|
2006-06-20 11:30:31 +04:00
|
|
|
int i, cpuid, extra;
|
2006-03-24 09:36:19 +03:00
|
|
|
|
|
|
|
printk("Entering SMP Mode...\n");
|
|
|
|
|
|
|
|
extra = 0;
|
|
|
|
for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
|
2006-06-20 11:30:31 +04:00
|
|
|
if (cpuid >= NR_CPUS)
|
2006-03-24 09:36:19 +03:00
|
|
|
extra++;
|
|
|
|
}
|
2006-06-20 11:30:31 +04:00
|
|
|
/* i = number of cpus */
|
|
|
|
if (extra && max_cpus > i - extra)
|
2006-03-24 09:36:19 +03:00
|
|
|
printk("Warning: NR_CPUS is too low to start all cpus\n");
|
|
|
|
|
|
|
|
smp_store_cpu_info(boot_cpu_id);
|
|
|
|
|
2006-07-18 08:57:09 +04:00
|
|
|
switch(sparc_cpu_model) {
|
|
|
|
case sun4m:
|
|
|
|
smp4m_boot_cpus();
|
|
|
|
break;
|
|
|
|
case sun4d:
|
|
|
|
smp4d_boot_cpus();
|
|
|
|
break;
|
2009-09-01 02:08:13 +04:00
|
|
|
case sparc_leon:
|
|
|
|
leon_boot_cpus();
|
|
|
|
break;
|
2006-07-18 08:57:09 +04:00
|
|
|
case sun4e:
|
|
|
|
printk("SUN4E\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
case sun4u:
|
|
|
|
printk("SUN4U\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk("UNKNOWN!\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
2011-06-03 18:45:23 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2006-06-20 11:30:31 +04:00
|
|
|
/* Set this up early so that things like the scheduler can init
|
|
|
|
* properly. We use the same cpu mask for both the present and
|
|
|
|
* possible cpu map.
|
|
|
|
*/
|
|
|
|
void __init smp_setup_cpu_possible_map(void)
|
|
|
|
{
|
|
|
|
int instance, mid;
|
|
|
|
|
|
|
|
instance = 0;
|
|
|
|
while (!cpu_find_by_instance(instance, NULL, &mid)) {
|
|
|
|
if (mid < NR_CPUS) {
|
2009-03-16 07:10:22 +03:00
|
|
|
set_cpu_possible(mid, true);
|
|
|
|
set_cpu_present(mid, true);
|
2006-06-20 11:30:31 +04:00
|
|
|
}
|
|
|
|
instance++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-20 11:36:10 +04:00
|
|
|
void __init smp_prepare_boot_cpu(void)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-03-24 09:36:19 +03:00
|
|
|
int cpuid = hard_smp_processor_id();
|
|
|
|
|
|
|
|
if (cpuid >= NR_CPUS) {
|
|
|
|
prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
|
|
|
|
prom_halt();
|
|
|
|
}
|
|
|
|
if (cpuid != 0)
|
|
|
|
printk("boot cpu id != 0, this could work but is untested\n");
|
|
|
|
|
|
|
|
current_thread_info()->cpu = cpuid;
|
2009-03-16 07:10:22 +03:00
|
|
|
set_cpu_online(cpuid, true);
|
|
|
|
set_cpu_possible(cpuid, true);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
extern int smp4m_boot_one_cpu(int, struct task_struct *);
|
|
|
|
extern int smp4d_boot_one_cpu(int, struct task_struct *);
|
2006-07-18 08:57:09 +04:00
|
|
|
int ret=0;
|
|
|
|
|
|
|
|
switch(sparc_cpu_model) {
|
|
|
|
case sun4m:
|
2012-04-20 17:05:56 +04:00
|
|
|
ret = smp4m_boot_one_cpu(cpu, tidle);
|
2006-07-18 08:57:09 +04:00
|
|
|
break;
|
|
|
|
case sun4d:
|
2012-04-20 17:05:56 +04:00
|
|
|
ret = smp4d_boot_one_cpu(cpu, tidle);
|
2006-07-18 08:57:09 +04:00
|
|
|
break;
|
2009-09-01 02:08:13 +04:00
|
|
|
case sparc_leon:
|
2012-04-20 17:05:56 +04:00
|
|
|
ret = leon_boot_one_cpu(cpu, tidle);
|
2009-09-01 02:08:13 +04:00
|
|
|
break;
|
2006-07-18 08:57:09 +04:00
|
|
|
case sun4e:
|
|
|
|
printk("SUN4E\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
case sun4u:
|
|
|
|
printk("SUN4U\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk("UNKNOWN!\n");
|
|
|
|
BUG();
|
|
|
|
break;
|
2011-06-03 18:45:23 +04:00
|
|
|
}
|
2006-03-24 09:36:19 +03:00
|
|
|
|
|
|
|
if (!ret) {
|
2011-05-17 00:38:07 +04:00
|
|
|
cpumask_set_cpu(cpu, &smp_commenced_mask);
|
2006-03-24 09:36:19 +03:00
|
|
|
while (!cpu_online(cpu))
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
return ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
void arch_cpu_pre_starting(void *arg)
|
2013-02-15 18:52:06 +04:00
|
|
|
{
|
|
|
|
local_ops->cache_all();
|
|
|
|
local_ops->tlb_all();
|
|
|
|
|
|
|
|
switch(sparc_cpu_model) {
|
|
|
|
case sun4m:
|
|
|
|
sun4m_cpu_pre_starting(arg);
|
|
|
|
break;
|
|
|
|
case sun4d:
|
|
|
|
sun4d_cpu_pre_starting(arg);
|
|
|
|
break;
|
|
|
|
case sparc_leon:
|
|
|
|
leon_cpu_pre_starting(arg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
void arch_cpu_pre_online(void *arg)
|
2013-02-15 18:52:06 +04:00
|
|
|
{
|
|
|
|
unsigned int cpuid = hard_smp_processor_id();
|
|
|
|
|
|
|
|
register_percpu_ce(cpuid);
|
|
|
|
|
|
|
|
calibrate_delay();
|
|
|
|
smp_store_cpu_info(cpuid);
|
|
|
|
|
|
|
|
local_ops->cache_all();
|
|
|
|
local_ops->tlb_all();
|
|
|
|
|
|
|
|
switch(sparc_cpu_model) {
|
|
|
|
case sun4m:
|
|
|
|
sun4m_cpu_pre_online(arg);
|
|
|
|
break;
|
|
|
|
case sun4d:
|
|
|
|
sun4d_cpu_pre_online(arg);
|
|
|
|
break;
|
|
|
|
case sparc_leon:
|
|
|
|
leon_cpu_pre_online(arg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
void sparc_start_secondary(void *arg)
|
2013-02-15 18:52:06 +04:00
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SMP booting is extremely fragile in some architectures. So run
|
|
|
|
* the cpu initialization code first before anything else.
|
|
|
|
*/
|
|
|
|
arch_cpu_pre_starting(arg);
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
|
|
|
/* Invoke the CPU_STARTING notifier callbacks */
|
|
|
|
notify_cpu_starting(cpu);
|
|
|
|
|
|
|
|
arch_cpu_pre_online(arg);
|
|
|
|
|
|
|
|
/* Set the CPU in the cpu_online_mask */
|
|
|
|
set_cpu_online(cpu, true);
|
|
|
|
|
|
|
|
/* Enable local interrupts now */
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
wmb();
|
2013-04-11 23:38:50 +04:00
|
|
|
cpu_startup_entry(CPUHP_ONLINE);
|
2013-02-15 18:52:06 +04:00
|
|
|
|
|
|
|
/* We should never reach here! */
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
sparc: delete __cpuinit/__CPUINIT usage from all users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/sparc uses of the __cpuinit macros from
C files and removes __CPUINIT from assembly files. Note that even
though arch/sparc/kernel/trampoline_64.S has instances of ".previous"
in it, they are all paired off against explicit ".section" directives,
and not implicitly paired with __CPUINIT (unlike mips and arm were).
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-17 23:43:14 +04:00
|
|
|
void smp_callin(void)
|
2013-02-15 18:52:06 +04:00
|
|
|
{
|
|
|
|
sparc_start_secondary(NULL);
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
void smp_bogo(struct seq_file *m)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2006-03-23 14:01:05 +03:00
|
|
|
for_each_online_cpu(i) {
|
|
|
|
seq_printf(m,
|
|
|
|
"Cpu%dBogo\t: %lu.%02lu\n",
|
|
|
|
i,
|
|
|
|
cpu_data(i).udelay_val/(500000/HZ),
|
|
|
|
(cpu_data(i).udelay_val/(5000/HZ))%100);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void smp_info(struct seq_file *m)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
seq_printf(m, "State:\n");
|
2006-03-23 14:01:05 +03:00
|
|
|
for_each_online_cpu(i)
|
|
|
|
seq_printf(m, "CPU%d\t\t: online\n", i);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|