arm64: kdump: implement machine_crash_shutdown()

Primary kernel calls machine_crash_shutdown() to shut down non-boot cpus
and save registers' status in per-cpu ELF notes before starting crash
dump kernel. See kernel_kexec().
Even if not all secondary cpus have shut down, we do kdump anyway.

As we don't have to make non-boot(crashed) cpus offline (to preserve
correct status of cpus at crash dump) before shutting down, this patch
also adds a variant of smp_send_stop().

Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
Reviewed-by: James Morse <james.morse@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
AKASHI Takahiro 2017-04-03 11:24:36 +09:00 коммит произвёл Catalin Marinas
Родитель 254a41c0ba
Коммит 78fd584cde
5 изменённых файлов: 167 добавлений и 6 удалений

Просмотреть файл

@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
#define NR_IPI 6
#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;

Просмотреть файл

@ -40,7 +40,47 @@
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
/* Empty routine needed to avoid build errors. */
if (oldregs) {
memcpy(newregs, oldregs, sizeof(*newregs));
} else {
u64 tmp1, tmp2;
__asm__ __volatile__ (
"stp x0, x1, [%2, #16 * 0]\n"
"stp x2, x3, [%2, #16 * 1]\n"
"stp x4, x5, [%2, #16 * 2]\n"
"stp x6, x7, [%2, #16 * 3]\n"
"stp x8, x9, [%2, #16 * 4]\n"
"stp x10, x11, [%2, #16 * 5]\n"
"stp x12, x13, [%2, #16 * 6]\n"
"stp x14, x15, [%2, #16 * 7]\n"
"stp x16, x17, [%2, #16 * 8]\n"
"stp x18, x19, [%2, #16 * 9]\n"
"stp x20, x21, [%2, #16 * 10]\n"
"stp x22, x23, [%2, #16 * 11]\n"
"stp x24, x25, [%2, #16 * 12]\n"
"stp x26, x27, [%2, #16 * 13]\n"
"stp x28, x29, [%2, #16 * 14]\n"
"mov %0, sp\n"
"stp x30, %0, [%2, #16 * 15]\n"
"/* faked current PSTATE */\n"
"mrs %0, CurrentEL\n"
"mrs %1, SPSEL\n"
"orr %0, %0, %1\n"
"mrs %1, DAIF\n"
"orr %0, %0, %1\n"
"mrs %1, NZCV\n"
"orr %0, %0, %1\n"
/* pc */
"adr %1, 1f\n"
"1:\n"
"stp %1, %0, [%2, #16 * 16]\n"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (newregs)
: "memory"
);
}
}
#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)

Просмотреть файл

@ -148,6 +148,9 @@ static inline void cpu_panic_kernel(void)
*/
bool cpus_are_stuck_in_kernel(void);
extern void smp_send_crash_stop(void);
extern bool smp_crash_stop_failed(void);
#endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */

Просмотреть файл

@ -9,6 +9,9 @@
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/page-flags.h>
#include <linux/smp.h>
@ -143,11 +146,15 @@ void machine_kexec(struct kimage *kimage)
{
phys_addr_t reboot_code_buffer_phys;
void *reboot_code_buffer;
bool in_kexec_crash = (kimage == kexec_crash_image);
bool stuck_cpus = cpus_are_stuck_in_kernel();
/*
* New cpus may have become stuck_in_kernel after we loaded the image.
*/
BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
"Some CPUs may be stale, kdump will be unreliable.\n");
reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
@ -199,15 +206,58 @@ void machine_kexec(struct kimage *kimage)
* relocation is complete.
*/
cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
kimage->start, 0);
cpu_soft_restart(kimage != kexec_crash_image,
reboot_code_buffer_phys, kimage->head, kimage->start, 0);
BUG(); /* Should never get here. */
}
static void machine_kexec_mask_interrupts(void)
{
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
int ret;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
/*
* First try to remove the active state. If this
* fails, try to EOI the interrupt.
*/
ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
/**
* machine_crash_shutdown - shutdown non-crashing cpus and save registers
*/
void machine_crash_shutdown(struct pt_regs *regs)
{
/* Empty routine needed to avoid build errors. */
local_irq_disable();
/* shutdown non-crashing cpus */
smp_send_crash_stop();
/* for crashing cpu */
crash_save_cpu(regs, smp_processor_id());
machine_kexec_mask_interrupts();
pr_info("Starting crashdump kernel...\n");
}
void arch_kexec_protect_crashkres(void)

Просмотреть файл

@ -39,6 +39,7 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/irq_work.h>
#include <linux/kexec.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
@ -76,6 +77,7 @@ enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CPU_STOP,
IPI_CPU_CRASH_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
IPI_WAKEUP
@ -756,6 +758,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
S(IPI_WAKEUP, "CPU wake-up interrupts"),
@ -830,6 +833,29 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
#ifdef CONFIG_KEXEC_CORE
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
#endif
static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{
#ifdef CONFIG_KEXEC_CORE
crash_save_cpu(regs, cpu);
atomic_dec(&waiting_for_crash_ipi);
local_irq_disable();
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_ops[cpu]->cpu_die)
cpu_ops[cpu]->cpu_die(cpu);
#endif
/* just in case */
cpu_park_loop();
#endif
}
/*
* Main handler for inter-processor interrupts
*/
@ -860,6 +886,15 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
case IPI_CPU_CRASH_STOP:
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
irq_enter();
ipi_cpu_crash_stop(cpu, regs);
unreachable();
}
break;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
irq_enter();
@ -932,6 +967,39 @@ void smp_send_stop(void)
cpumask_pr_args(cpu_online_mask));
}
#ifdef CONFIG_KEXEC_CORE
void smp_send_crash_stop(void)
{
cpumask_t mask;
unsigned long timeout;
if (num_online_cpus() == 1)
return;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
udelay(1);
if (atomic_read(&waiting_for_crash_ipi) > 0)
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(&mask));
}
bool smp_crash_stop_failed(void)
{
return (atomic_read(&waiting_for_crash_ipi) > 0);
}
#endif
/*
* not supported here
*/