s390/nmi: allocation of the extended save area
The machine check extended save area is needed to store the vector registers and the guarded storage control block when a CPU is interrupted by a machine check. Move the slab cache allocation of the full save area to nmi.c, for early boot use a static __initdata block. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
cc65450c83
Коммит
6c81511ca1
|
@ -80,6 +80,8 @@ union mci {
|
|||
|
||||
#define MCESA_ORIGIN_MASK (~0x3ffUL)
|
||||
#define MCESA_LC_MASK (0xfUL)
|
||||
#define MCESA_MIN_SIZE (1024)
|
||||
#define MCESA_MAX_SIZE (2048)
|
||||
|
||||
struct mcesa {
|
||||
u8 vector_save_area[1024];
|
||||
|
@ -88,8 +90,12 @@ struct mcesa {
|
|||
|
||||
struct pt_regs;
|
||||
|
||||
extern void s390_handle_mcck(void);
|
||||
extern void s390_do_machine_check(struct pt_regs *regs);
|
||||
void nmi_alloc_boot_cpu(struct lowcore *lc);
|
||||
int nmi_alloc_per_cpu(struct lowcore *lc);
|
||||
void nmi_free_per_cpu(struct lowcore *lc);
|
||||
|
||||
void s390_handle_mcck(void);
|
||||
void s390_do_machine_check(struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_S390_NMI_H */
|
||||
|
|
|
@ -12,7 +12,9 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
@ -38,6 +40,86 @@ struct mcck_struct {
|
|||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
|
||||
static struct kmem_cache *mcesa_cache;
|
||||
static unsigned long mcesa_origin_lc;
|
||||
|
||||
static inline int nmi_needs_mcesa(void)
|
||||
{
|
||||
return MACHINE_HAS_VX || MACHINE_HAS_GS;
|
||||
}
|
||||
|
||||
static inline unsigned long nmi_get_mcesa_size(void)
|
||||
{
|
||||
if (MACHINE_HAS_GS)
|
||||
return MCESA_MAX_SIZE;
|
||||
return MCESA_MIN_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The initial machine check extended save area for the boot CPU.
|
||||
* It will be replaced by nmi_init() with an allocated structure.
|
||||
* The structure is required for machine check happening early in
|
||||
* the boot process.
|
||||
*/
|
||||
static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
|
||||
|
||||
void __init nmi_alloc_boot_cpu(struct lowcore *lc)
|
||||
{
|
||||
if (!nmi_needs_mcesa())
|
||||
return;
|
||||
lc->mcesad = (unsigned long) &boot_mcesa;
|
||||
if (MACHINE_HAS_GS)
|
||||
lc->mcesad |= ilog2(MCESA_MAX_SIZE);
|
||||
}
|
||||
|
||||
static int __init nmi_init(void)
|
||||
{
|
||||
unsigned long origin, cr0, size;
|
||||
|
||||
if (!nmi_needs_mcesa())
|
||||
return 0;
|
||||
size = nmi_get_mcesa_size();
|
||||
if (size > MCESA_MIN_SIZE)
|
||||
mcesa_origin_lc = ilog2(size);
|
||||
/* create slab cache for the machine-check-extended-save-areas */
|
||||
mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
|
||||
if (!mcesa_cache)
|
||||
panic("Couldn't create nmi save area cache");
|
||||
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
|
||||
if (!origin)
|
||||
panic("Couldn't allocate nmi save area");
|
||||
/* The pointer is stored with mcesa_bits ORed in */
|
||||
kmemleak_not_leak((void *) origin);
|
||||
__ctl_store(cr0, 0, 0);
|
||||
__ctl_clear_bit(0, 28); /* disable lowcore protection */
|
||||
/* Replace boot_mcesa on the boot CPU */
|
||||
S390_lowcore.mcesad = origin | mcesa_origin_lc;
|
||||
__ctl_load(cr0, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(nmi_init);
|
||||
|
||||
int nmi_alloc_per_cpu(struct lowcore *lc)
|
||||
{
|
||||
unsigned long origin;
|
||||
|
||||
if (!nmi_needs_mcesa())
|
||||
return 0;
|
||||
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
|
||||
if (!origin)
|
||||
return -ENOMEM;
|
||||
/* The pointer is stored with mcesa_bits ORed in */
|
||||
kmemleak_not_leak((void *) origin);
|
||||
lc->mcesad = origin | mcesa_origin_lc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nmi_free_per_cpu(struct lowcore *lc)
|
||||
{
|
||||
if (!nmi_needs_mcesa())
|
||||
return;
|
||||
kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
|
||||
}
|
||||
|
||||
static notrace void s390_handle_damage(void)
|
||||
{
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
@ -340,15 +341,7 @@ static void __init setup_lowcore(void)
|
|||
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
|
||||
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
|
||||
MAX_FACILITY_BIT/8);
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
unsigned long bits, size;
|
||||
|
||||
bits = MACHINE_HAS_GS ? 11 : 10;
|
||||
size = 1UL << bits;
|
||||
lc->mcesad = (__u64) memblock_virt_alloc(size, size);
|
||||
if (MACHINE_HAS_GS)
|
||||
lc->mcesad |= bits;
|
||||
}
|
||||
nmi_alloc_boot_cpu(lc);
|
||||
vdso_alloc_boot_cpu(lc);
|
||||
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
|
||||
lc->async_enter_timer = S390_lowcore.async_enter_timer;
|
||||
|
|
|
@ -81,8 +81,6 @@ struct pcpu {
|
|||
static u8 boot_core_type;
|
||||
static struct pcpu pcpu_devices[NR_CPUS];
|
||||
|
||||
static struct kmem_cache *pcpu_mcesa_cache;
|
||||
|
||||
unsigned int smp_cpu_mt_shift;
|
||||
EXPORT_SYMBOL(smp_cpu_mt_shift);
|
||||
|
||||
|
@ -193,10 +191,8 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
|
|||
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
{
|
||||
unsigned long async_stack, panic_stack;
|
||||
unsigned long mcesa_origin, mcesa_bits;
|
||||
struct lowcore *lc;
|
||||
|
||||
mcesa_origin = mcesa_bits = 0;
|
||||
if (pcpu != &pcpu_devices[0]) {
|
||||
pcpu->lowcore = (struct lowcore *)
|
||||
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
|
||||
|
@ -204,40 +200,30 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
|||
panic_stack = __get_free_page(GFP_KERNEL);
|
||||
if (!pcpu->lowcore || !panic_stack || !async_stack)
|
||||
goto out;
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
mcesa_origin = (unsigned long)
|
||||
kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
|
||||
if (!mcesa_origin)
|
||||
goto out;
|
||||
/* The pointer is stored with mcesa_bits ORed in */
|
||||
kmemleak_not_leak((void *) mcesa_origin);
|
||||
mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
|
||||
}
|
||||
} else {
|
||||
async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
|
||||
panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
|
||||
mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
|
||||
mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
|
||||
}
|
||||
lc = pcpu->lowcore;
|
||||
memcpy(lc, &S390_lowcore, 512);
|
||||
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
|
||||
lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
|
||||
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
|
||||
lc->mcesad = mcesa_origin | mcesa_bits;
|
||||
lc->cpu_nr = cpu;
|
||||
lc->spinlock_lockval = arch_spin_lockval(cpu);
|
||||
lc->spinlock_index = 0;
|
||||
if (vdso_alloc_per_cpu(lc))
|
||||
if (nmi_alloc_per_cpu(lc))
|
||||
goto out;
|
||||
if (vdso_alloc_per_cpu(lc))
|
||||
goto out_mcesa;
|
||||
lowcore_ptr[cpu] = lc;
|
||||
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
|
||||
return 0;
|
||||
|
||||
out_mcesa:
|
||||
nmi_free_per_cpu(lc);
|
||||
out:
|
||||
if (pcpu != &pcpu_devices[0]) {
|
||||
if (mcesa_origin)
|
||||
kmem_cache_free(pcpu_mcesa_cache,
|
||||
(void *) mcesa_origin);
|
||||
free_page(panic_stack);
|
||||
free_pages(async_stack, ASYNC_ORDER);
|
||||
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
|
||||
|
@ -249,17 +235,12 @@ out:
|
|||
|
||||
static void pcpu_free_lowcore(struct pcpu *pcpu)
|
||||
{
|
||||
unsigned long mcesa_origin;
|
||||
|
||||
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
|
||||
lowcore_ptr[pcpu - pcpu_devices] = NULL;
|
||||
vdso_free_per_cpu(pcpu->lowcore);
|
||||
nmi_free_per_cpu(pcpu->lowcore);
|
||||
if (pcpu == &pcpu_devices[0])
|
||||
return;
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
|
||||
kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
|
||||
}
|
||||
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
|
||||
free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
|
||||
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
|
||||
|
@ -936,22 +917,12 @@ void __init smp_fill_possible_mask(void)
|
|||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
/* request the 0x1201 emergency signal external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1201");
|
||||
/* request the 0x1202 external call external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1202");
|
||||
/* create slab cache for the machine-check-extended-save-areas */
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
|
||||
pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
|
||||
size, size, 0, NULL);
|
||||
if (!pcpu_mcesa_cache)
|
||||
panic("Couldn't create nmi save area cache");
|
||||
}
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
|
|
Загрузка…
Ссылка в новой задаче