2005-12-04 10:39:43 +03:00
|
|
|
/*
|
|
|
|
* Architecture specific (PPC64) functions for kexec based crash dumps.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005, IBM Corp.
|
|
|
|
*
|
|
|
|
* Created by: Haren Myneni
|
|
|
|
*
|
|
|
|
* This source code is licensed under the GNU General Public License,
|
|
|
|
* Version 2. See the file COPYING for more details.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#undef DEBUG
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/reboot.h>
|
|
|
|
#include <linux/kexec.h>
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/crash_dump.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/elfcore.h>
|
|
|
|
#include <linux/init.h>
|
2006-04-04 15:43:01 +04:00
|
|
|
#include <linux/irq.h>
|
2005-12-04 10:39:43 +03:00
|
|
|
#include <linux/types.h>
|
2008-02-14 03:56:49 +03:00
|
|
|
#include <linux/lmb.h>
|
2005-12-04 10:39:43 +03:00
|
|
|
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/machdep.h>
|
2006-06-24 02:29:34 +04:00
|
|
|
#include <asm/kexec.h>
|
2005-12-04 10:39:43 +03:00
|
|
|
#include <asm/kdump.h>
|
2008-02-14 03:56:49 +03:00
|
|
|
#include <asm/prom.h>
|
2005-12-04 10:39:43 +03:00
|
|
|
#include <asm/firmware.h>
|
2006-01-11 06:25:25 +03:00
|
|
|
#include <asm/smp.h>
|
2008-01-18 07:50:30 +03:00
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/setjmp.h>
|
2005-12-04 10:39:43 +03:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
#include <asm/udbg.h>
|
|
|
|
#define DBG(fmt...) udbg_printf(fmt)
|
|
|
|
#else
|
|
|
|
#define DBG(fmt...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* This keeps a track of which one is crashing cpu. */
|
|
|
|
int crashing_cpu = -1;
|
2006-06-24 02:29:34 +04:00
|
|
|
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
|
2006-07-05 08:39:43 +04:00
|
|
|
cpumask_t cpus_in_sr = CPU_MASK_NONE;
|
2005-12-04 10:39:43 +03:00
|
|
|
|
2008-06-12 13:34:39 +04:00
|
|
|
#define CRASH_HANDLER_MAX 2
|
2008-01-18 07:50:30 +03:00
|
|
|
/* NULL terminated list of shutdown handles */
|
|
|
|
static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
|
|
|
|
static DEFINE_SPINLOCK(crash_handlers_lock);
|
|
|
|
|
2005-12-04 10:39:43 +03:00
|
|
|
#ifdef CONFIG_SMP
|
2006-06-24 02:29:34 +04:00
|
|
|
static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
|
2005-12-04 10:39:43 +03:00
|
|
|
|
|
|
|
void crash_ipi_callback(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
if (!cpu_online(cpu))
|
|
|
|
return;
|
|
|
|
|
[POWERPC] Lazy interrupt disabling for 64-bit machines
This implements a lazy strategy for disabling interrupts. This means
that local_irq_disable() et al. just clear the 'interrupts are
enabled' flag in the paca. If an interrupt comes along, the interrupt
entry code notices that interrupts are supposed to be disabled, and
clears the EE bit in SRR1, clears the 'interrupts are hard-enabled'
flag in the paca, and returns. This means that interrupts only
actually get disabled in the processor when an interrupt comes along.
When interrupts are enabled by local_irq_enable() et al., the code
sets the interrupts-enabled flag in the paca, and then checks whether
interrupts got hard-disabled. If so, it also sets the EE bit in the
MSR to hard-enable the interrupts.
This has the potential to improve performance, and also makes it
easier to make a kernel that can boot on iSeries and on other 64-bit
machines, since this lazy-disable strategy is very similar to the
soft-disable strategy that iSeries already uses.
This version renames paca->proc_enabled to paca->soft_enabled, and
changes a couple of soft-disables in the kexec code to hard-disables,
which should fix the crash that Michael Ellerman saw. This doesn't
yet use a reserved CR field for the soft_enabled and hard_enabled
flags. This applies on top of Stephen Rothwell's patches to make it
possible to build a combined iSeries/other kernel.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-10-04 10:47:49 +04:00
|
|
|
hard_irq_disable();
|
2006-06-24 02:29:34 +04:00
|
|
|
if (!cpu_isset(cpu, cpus_in_crash))
|
2006-12-07 07:40:41 +03:00
|
|
|
crash_save_cpu(regs, cpu);
|
2006-06-24 02:29:34 +04:00
|
|
|
cpu_set(cpu, cpus_in_crash);
|
2005-12-04 10:39:43 +03:00
|
|
|
|
2006-06-24 02:29:34 +04:00
|
|
|
/*
|
|
|
|
* Entered via soft-reset - could be the kdump
|
|
|
|
* process is invoked using soft-reset or user activated
|
|
|
|
* it if some CPU did not respond to an IPI.
|
|
|
|
* For soft-reset, the secondary CPU can enter this func
|
|
|
|
* twice. 1 - using IPI, and 2. soft-reset.
|
|
|
|
* Tell the kexec CPU that entered via soft-reset and ready
|
|
|
|
* to go down.
|
|
|
|
*/
|
|
|
|
if (cpu_isset(cpu, cpus_in_sr)) {
|
|
|
|
cpu_clear(cpu, cpus_in_sr);
|
|
|
|
atomic_inc(&enter_on_soft_reset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Starting the kdump boot.
|
|
|
|
* This barrier is needed to make sure that all CPUs are stopped.
|
|
|
|
* If not, soft-reset will be invoked to bring other CPUs.
|
|
|
|
*/
|
|
|
|
while (!cpu_isset(crashing_cpu, cpus_in_crash))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
if (ppc_md.kexec_cpu_down)
|
|
|
|
ppc_md.kexec_cpu_down(1, 1);
|
2006-07-05 08:39:43 +04:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
2005-12-04 10:39:43 +03:00
|
|
|
kexec_smp_wait();
|
2006-07-05 08:39:43 +04:00
|
|
|
#else
|
|
|
|
for (;;); /* FIXME */
|
|
|
|
#endif
|
|
|
|
|
2005-12-04 10:39:43 +03:00
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
|
|
|
|
2006-06-24 02:29:34 +04:00
|
|
|
/*
|
|
|
|
* Wait until all CPUs are entered via soft-reset.
|
|
|
|
*/
|
|
|
|
static void crash_soft_reset_check(int cpu)
|
|
|
|
{
|
|
|
|
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
|
|
|
|
|
|
|
|
cpu_clear(cpu, cpus_in_sr);
|
|
|
|
while (atomic_read(&enter_on_soft_reset) != ncpus)
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void crash_kexec_prepare_cpus(int cpu)
|
2005-12-04 10:39:43 +03:00
|
|
|
{
|
|
|
|
unsigned int msecs;
|
|
|
|
|
2006-06-24 02:29:34 +04:00
|
|
|
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
|
2005-12-04 10:39:43 +03:00
|
|
|
|
|
|
|
crash_send_ipi(crash_ipi_callback);
|
|
|
|
smp_wmb();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: Until we will have the way to stop other CPUSs reliabally,
|
|
|
|
* the crash CPU will send an IPI and wait for other CPUs to
|
2006-06-24 02:29:34 +04:00
|
|
|
* respond.
|
2006-02-08 02:47:03 +03:00
|
|
|
* Delay of at least 10 seconds.
|
2005-12-04 10:39:43 +03:00
|
|
|
*/
|
2006-06-24 02:29:34 +04:00
|
|
|
printk(KERN_EMERG "Sending IPI to other cpus...\n");
|
2006-02-08 02:47:03 +03:00
|
|
|
msecs = 10000;
|
2006-06-24 02:29:34 +04:00
|
|
|
while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
|
|
|
|
cpu_relax();
|
2005-12-04 10:39:43 +03:00
|
|
|
mdelay(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Would it be better to replace the trap vector here? */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: In case if we do not get all CPUs, one possibility: ask the
|
|
|
|
* user to do soft reset such that we get all.
|
2006-06-24 02:29:34 +04:00
|
|
|
* Soft-reset will be used until better mechanism is implemented.
|
|
|
|
*/
|
|
|
|
if (cpus_weight(cpus_in_crash) < ncpus) {
|
|
|
|
printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
|
|
|
|
ncpus - cpus_weight(cpus_in_crash));
|
|
|
|
printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
|
|
|
|
cpus_in_sr = CPU_MASK_NONE;
|
|
|
|
atomic_set(&enter_on_soft_reset, 0);
|
|
|
|
while (cpus_weight(cpus_in_crash) < ncpus)
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Make sure all CPUs are entered via soft-reset if the kdump is
|
|
|
|
* invoked using soft-reset.
|
2005-12-04 10:39:43 +03:00
|
|
|
*/
|
2006-06-24 02:29:34 +04:00
|
|
|
if (cpu_isset(cpu, cpus_in_sr))
|
|
|
|
crash_soft_reset_check(cpu);
|
2005-12-04 10:39:43 +03:00
|
|
|
/* Leave the IPI callback set */
|
|
|
|
}
|
2006-06-24 02:29:34 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This function will be called by secondary cpus or by kexec cpu
|
|
|
|
* if soft-reset is activated to stop some CPUs.
|
|
|
|
*/
|
|
|
|
void crash_kexec_secondary(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
unsigned long flags;
|
|
|
|
int msecs = 5;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
/* Wait 5ms if the kexec CPU is not entered yet. */
|
|
|
|
while (crashing_cpu < 0) {
|
|
|
|
if (--msecs < 0) {
|
|
|
|
/*
|
|
|
|
* Either kdump image is not loaded or
|
|
|
|
* kdump process is not started - Probably xmon
|
|
|
|
* exited using 'x'(exit and recover) or
|
|
|
|
* kexec_should_crash() failed for all running tasks.
|
|
|
|
*/
|
|
|
|
cpu_clear(cpu, cpus_in_sr);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mdelay(1);
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
if (cpu == crashing_cpu) {
|
|
|
|
/*
|
|
|
|
* Panic CPU will enter this func only via soft-reset.
|
|
|
|
* Wait until all secondary CPUs entered and
|
|
|
|
* then start kexec boot.
|
|
|
|
*/
|
|
|
|
crash_soft_reset_check(cpu);
|
|
|
|
cpu_set(crashing_cpu, cpus_in_crash);
|
|
|
|
if (ppc_md.kexec_cpu_down)
|
|
|
|
ppc_md.kexec_cpu_down(1, 0);
|
|
|
|
machine_kexec(kexec_crash_image);
|
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
|
|
|
crash_ipi_callback(regs);
|
|
|
|
}
|
|
|
|
|
2005-12-04 10:39:43 +03:00
|
|
|
#else
|
2006-06-24 02:29:34 +04:00
|
|
|
static void crash_kexec_prepare_cpus(int cpu)
|
2005-12-04 10:39:43 +03:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* move the secondarys to us so that we can copy
|
|
|
|
* the new kernel 0-0x100 safely
|
|
|
|
*
|
|
|
|
* do this if kexec in setup.c ?
|
|
|
|
*/
|
2006-07-05 08:39:43 +04:00
|
|
|
#ifdef CONFIG_PPC64
|
2005-12-04 10:39:43 +03:00
|
|
|
smp_release_cpus();
|
2006-07-05 08:39:43 +04:00
|
|
|
#else
|
|
|
|
/* FIXME */
|
|
|
|
#endif
|
2005-12-04 10:39:43 +03:00
|
|
|
}
|
|
|
|
|
2006-06-24 02:29:34 +04:00
|
|
|
void crash_kexec_secondary(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
cpus_in_sr = CPU_MASK_NONE;
|
|
|
|
}
|
2005-12-04 10:39:43 +03:00
|
|
|
#endif
|
2007-07-20 23:39:27 +04:00
|
|
|
#ifdef CONFIG_SPU_BASE
|
|
|
|
|
|
|
|
#include <asm/spu.h>
|
|
|
|
#include <asm/spu_priv1.h>
|
|
|
|
|
|
|
|
struct crash_spu_info {
|
|
|
|
struct spu *spu;
|
|
|
|
u32 saved_spu_runcntl_RW;
|
|
|
|
u32 saved_spu_status_R;
|
|
|
|
u32 saved_spu_npc_RW;
|
|
|
|
u64 saved_mfc_sr1_RW;
|
|
|
|
u64 saved_mfc_dar;
|
|
|
|
u64 saved_mfc_dsisr;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
|
|
|
|
static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
|
|
|
|
|
|
|
|
static void crash_kexec_stop_spus(void)
|
|
|
|
{
|
|
|
|
struct spu *spu;
|
|
|
|
int i;
|
|
|
|
u64 tmp;
|
|
|
|
|
|
|
|
for (i = 0; i < CRASH_NUM_SPUS; i++) {
|
|
|
|
if (!crash_spu_info[i].spu)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spu = crash_spu_info[i].spu;
|
|
|
|
|
|
|
|
crash_spu_info[i].saved_spu_runcntl_RW =
|
|
|
|
in_be32(&spu->problem->spu_runcntl_RW);
|
|
|
|
crash_spu_info[i].saved_spu_status_R =
|
|
|
|
in_be32(&spu->problem->spu_status_R);
|
|
|
|
crash_spu_info[i].saved_spu_npc_RW =
|
|
|
|
in_be32(&spu->problem->spu_npc_RW);
|
|
|
|
|
|
|
|
crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
|
|
|
|
crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
|
|
|
|
tmp = spu_mfc_sr1_get(spu);
|
|
|
|
crash_spu_info[i].saved_mfc_sr1_RW = tmp;
|
|
|
|
|
|
|
|
tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
|
|
|
|
spu_mfc_sr1_set(spu, tmp);
|
|
|
|
|
|
|
|
__delay(200);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void crash_register_spus(struct list_head *list)
|
|
|
|
{
|
|
|
|
struct spu *spu;
|
|
|
|
|
|
|
|
list_for_each_entry(spu, list, full_list) {
|
|
|
|
if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
crash_spu_info[spu->number].spu = spu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
static inline void crash_kexec_stop_spus(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SPU_BASE */
|
2005-12-04 10:39:43 +03:00
|
|
|
|
2008-01-18 07:50:30 +03:00
|
|
|
/*
|
|
|
|
* Register a function to be called on shutdown. Only use this if you
|
|
|
|
* can't reset your device in the second kernel.
|
|
|
|
*/
|
|
|
|
int crash_shutdown_register(crash_shutdown_t handler)
|
|
|
|
{
|
|
|
|
unsigned int i, rc;
|
|
|
|
|
|
|
|
spin_lock(&crash_handlers_lock);
|
|
|
|
for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
|
|
|
|
if (!crash_shutdown_handles[i]) {
|
|
|
|
/* Insert handle at first empty entry */
|
|
|
|
crash_shutdown_handles[i] = handler;
|
|
|
|
rc = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == CRASH_HANDLER_MAX) {
|
|
|
|
printk(KERN_ERR "Crash shutdown handles full, "
|
|
|
|
"not registered.\n");
|
|
|
|
rc = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&crash_handlers_lock);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(crash_shutdown_register);
|
|
|
|
|
|
|
|
int crash_shutdown_unregister(crash_shutdown_t handler)
|
|
|
|
{
|
|
|
|
unsigned int i, rc;
|
|
|
|
|
|
|
|
spin_lock(&crash_handlers_lock);
|
|
|
|
for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
|
|
|
|
if (crash_shutdown_handles[i] == handler)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (i == CRASH_HANDLER_MAX) {
|
|
|
|
printk(KERN_ERR "Crash shutdown handle not found\n");
|
|
|
|
rc = 1;
|
|
|
|
} else {
|
|
|
|
/* Shift handles down */
|
|
|
|
for (; crash_shutdown_handles[i]; i++)
|
|
|
|
crash_shutdown_handles[i] =
|
|
|
|
crash_shutdown_handles[i+1];
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&crash_handlers_lock);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(crash_shutdown_unregister);
|
|
|
|
|
|
|
|
static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
|
|
|
|
|
|
|
|
static int handle_fault(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
longjmp(crash_shutdown_buf, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-12-04 10:39:43 +03:00
|
|
|
void default_machine_crash_shutdown(struct pt_regs *regs)
|
|
|
|
{
|
2008-01-18 07:50:30 +03:00
|
|
|
unsigned int i;
|
|
|
|
int (*old_handler)(struct pt_regs *regs);
|
|
|
|
|
2006-04-04 15:43:01 +04:00
|
|
|
|
2005-12-04 10:39:43 +03:00
|
|
|
/*
|
|
|
|
* This function is only called after the system
|
2006-06-26 20:30:00 +04:00
|
|
|
* has panicked or is otherwise in a critical state.
|
2005-12-04 10:39:43 +03:00
|
|
|
* The minimum amount of code to allow a kexec'd kernel
|
|
|
|
* to run successfully needs to happen here.
|
|
|
|
*
|
|
|
|
* In practice this means stopping other cpus in
|
|
|
|
* an SMP system.
|
|
|
|
* The kernel is broken so disable interrupts.
|
|
|
|
*/
|
[POWERPC] Lazy interrupt disabling for 64-bit machines
This implements a lazy strategy for disabling interrupts. This means
that local_irq_disable() et al. just clear the 'interrupts are
enabled' flag in the paca. If an interrupt comes along, the interrupt
entry code notices that interrupts are supposed to be disabled, and
clears the EE bit in SRR1, clears the 'interrupts are hard-enabled'
flag in the paca, and returns. This means that interrupts only
actually get disabled in the processor when an interrupt comes along.
When interrupts are enabled by local_irq_enable() et al., the code
sets the interrupts-enabled flag in the paca, and then checks whether
interrupts got hard-disabled. If so, it also sets the EE bit in the
MSR to hard-enable the interrupts.
This has the potential to improve performance, and also makes it
easier to make a kernel that can boot on iSeries and on other 64-bit
machines, since this lazy-disable strategy is very similar to the
soft-disable strategy that iSeries already uses.
This version renames paca->proc_enabled to paca->soft_enabled, and
changes a couple of soft-disables in the kexec code to hard-disables,
which should fix the crash that Michael Ellerman saw. This doesn't
yet use a reserved CR field for the soft_enabled and hard_enabled
flags. This applies on top of Stephen Rothwell's patches to make it
possible to build a combined iSeries/other kernel.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-10-04 10:47:49 +04:00
|
|
|
hard_irq_disable();
|
2005-12-04 10:39:43 +03:00
|
|
|
|
2008-01-18 07:50:30 +03:00
|
|
|
for_each_irq(i) {
|
2009-10-13 23:44:51 +04:00
|
|
|
struct irq_desc *desc = irq_to_desc(i);
|
2006-04-04 15:43:01 +04:00
|
|
|
|
|
|
|
if (desc->status & IRQ_INPROGRESS)
|
2008-01-18 07:50:30 +03:00
|
|
|
desc->chip->eoi(i);
|
2006-04-04 15:43:01 +04:00
|
|
|
|
|
|
|
if (!(desc->status & IRQ_DISABLED))
|
2008-01-18 07:50:30 +03:00
|
|
|
desc->chip->disable(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call registered shutdown routines savely. Swap out
|
|
|
|
* __debugger_fault_handler, and replace on exit.
|
|
|
|
*/
|
|
|
|
old_handler = __debugger_fault_handler;
|
|
|
|
__debugger_fault_handler = handle_fault;
|
|
|
|
for (i = 0; crash_shutdown_handles[i]; i++) {
|
|
|
|
if (setjmp(crash_shutdown_buf) == 0) {
|
|
|
|
/*
|
|
|
|
* Insert syncs and delay to ensure
|
|
|
|
* instructions in the dangerous region don't
|
|
|
|
* leak away from this protected region.
|
|
|
|
*/
|
|
|
|
asm volatile("sync; isync");
|
|
|
|
/* dangerous region */
|
|
|
|
crash_shutdown_handles[i]();
|
|
|
|
asm volatile("sync; isync");
|
|
|
|
}
|
2006-04-04 15:43:01 +04:00
|
|
|
}
|
2008-01-18 07:50:30 +03:00
|
|
|
__debugger_fault_handler = old_handler;
|
2006-04-04 15:43:01 +04:00
|
|
|
|
2005-12-04 10:39:43 +03:00
|
|
|
/*
|
|
|
|
* Make a note of crashing cpu. Will be used in machine_kexec
|
|
|
|
* such that another IPI will not be sent.
|
|
|
|
*/
|
|
|
|
crashing_cpu = smp_processor_id();
|
2006-12-07 07:40:41 +03:00
|
|
|
crash_save_cpu(regs, crashing_cpu);
|
2006-06-24 02:29:34 +04:00
|
|
|
crash_kexec_prepare_cpus(crashing_cpu);
|
|
|
|
cpu_set(crashing_cpu, cpus_in_crash);
|
2007-07-20 23:39:27 +04:00
|
|
|
crash_kexec_stop_spus();
|
2006-06-24 02:29:34 +04:00
|
|
|
if (ppc_md.kexec_cpu_down)
|
|
|
|
ppc_md.kexec_cpu_down(1, 0);
|
2005-12-04 10:39:43 +03:00
|
|
|
}
|