2005-09-26 10:04:21 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
2010-04-08 09:38:22 +04:00
|
|
|
* Copyright 2007-2010 Freescale Semiconductor, Inc.
|
2005-09-26 10:04:21 +04:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* Modified by Cort Dougan (cort@cs.nmt.edu)
|
|
|
|
* and Paul Mackerras (paulus@samba.org)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file handles the architecture-dependent parts of hardware exceptions
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-08 20:51:35 +03:00
|
|
|
#include <linux/sched/debug.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/unistd.h>
|
2005-10-06 07:27:05 +04:00
|
|
|
#include <linux/ptrace.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
#include <linux/user.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/init.h>
|
2016-08-16 17:57:34 +03:00
|
|
|
#include <linux/extable.h>
|
|
|
|
#include <linux/module.h> /* print_modules */
|
2005-10-06 07:27:05 +04:00
|
|
|
#include <linux/prctl.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/kprobes.h>
|
2005-12-04 10:39:43 +03:00
|
|
|
#include <linux/kexec.h>
|
2006-06-25 16:47:08 +04:00
|
|
|
#include <linux/backlight.h>
|
2006-12-08 14:30:41 +03:00
|
|
|
#include <linux/bug.h>
|
2007-05-08 11:27:03 +04:00
|
|
|
#include <linux/kdebug.h>
|
2011-06-04 09:36:54 +04:00
|
|
|
#include <linux/ratelimit.h>
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
#include <linux/context_tracking.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2009-05-18 06:10:05 +04:00
|
|
|
#include <asm/emulated_ops.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
#include <asm/pgtable.h>
|
2016-12-24 22:46:01 +03:00
|
|
|
#include <linux/uaccess.h>
|
2017-02-10 04:04:56 +03:00
|
|
|
#include <asm/debugfs.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
#include <asm/io.h>
|
2005-10-10 16:37:57 +04:00
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/rtas.h>
|
2005-10-19 08:53:32 +04:00
|
|
|
#include <asm/pmc.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
#include <asm/reg.h>
|
|
|
|
#ifdef CONFIG_PMAC_BACKLIGHT
|
|
|
|
#include <asm/backlight.h>
|
|
|
|
#endif
|
2005-10-01 12:43:42 +04:00
|
|
|
#ifdef CONFIG_PPC64
|
2005-10-10 16:37:57 +04:00
|
|
|
#include <asm/firmware.h>
|
2005-10-01 12:43:42 +04:00
|
|
|
#include <asm/processor.h>
|
2013-05-26 22:09:39 +04:00
|
|
|
#include <asm/tm.h>
|
2005-10-01 12:43:42 +04:00
|
|
|
#endif
|
2006-06-24 02:29:34 +04:00
|
|
|
#include <asm/kexec.h>
|
2009-02-10 23:10:44 +03:00
|
|
|
#include <asm/ppc-opcode.h>
|
2010-11-18 09:57:32 +03:00
|
|
|
#include <asm/rio.h>
|
2012-02-16 05:14:45 +04:00
|
|
|
#include <asm/fadump.h>
|
2012-03-28 21:30:02 +04:00
|
|
|
#include <asm/switch_to.h>
|
2013-02-13 20:21:39 +04:00
|
|
|
#include <asm/tm.h>
|
2012-03-28 21:30:02 +04:00
|
|
|
#include <asm/debug.h>
|
2016-05-18 04:16:50 +03:00
|
|
|
#include <asm/asm-prototypes.h>
|
KVM: PPC: Book3S HV: Fix TB corruption in guest exit path on HMI interrupt
When a guest is assigned to a core it converts the host Timebase (TB)
into guest TB by adding guest timebase offset before entering into
guest. During guest exit it restores the guest TB to host TB. This means
under certain conditions (Guest migration) host TB and guest TB can differ.
When we get an HMI for TB related issues the opal HMI handler would
try fixing errors and restore the correct host TB value. With no guest
running, we don't have any issues. But with guest running on the core
we run into TB corruption issues.
If we get an HMI while in the guest, the current HMI handler invokes opal
hmi handler before forcing guest to exit. The guest exit path subtracts
the guest TB offset from the current TB value which may have already
been restored with host value by opal hmi handler. This leads to incorrect
host and guest TB values.
With split-core, things become more complex. With split-core, TB also gets
split and each subcore gets its own TB register. When a hmi handler fixes
a TB error and restores the TB value, it affects all the TB values of
sibling subcores on the same core. On TB errors all the thread in the core
gets HMI. With existing code, the individual threads call opal hmi handle
independently which can easily throw TB out of sync if we have guest
running on subcores. Hence we will need to co-ordinate with all the
threads before making opal hmi handler call followed by TB resync.
This patch introduces a sibling subcore state structure (shared by all
threads in the core) in paca which holds information about whether sibling
subcores are in Guest mode or host mode. An array in_guest[] of size
MAX_SUBCORE_PER_CORE=4 is used to maintain the state of each subcore.
The subcore id is used as index into in_guest[] array. Only primary
thread entering/exiting the guest is responsible to set/unset its
designated array element.
On TB error, we get HMI interrupt on every thread on the core. Upon HMI,
this patch will now force guest to vacate the core/subcore. Primary
thread from each subcore will then turn off its respective bit
from the above bitmap during the guest exit path just after the
guest->host partition switch is complete.
All other threads that have just exited the guest OR were already in host
will wait until all other subcores clears their respective bit.
Once all the subcores turn off their respective bit, all threads will
will make call to opal hmi handler.
It is not necessary that opal hmi handler would resync the TB value for
every HMI interrupts. It would do so only for the HMI caused due to
TB errors. For rest, it would not touch TB value. Hence to make things
simpler, primary thread would call TB resync explicitly once for each
core immediately after opal hmi handler instead of subtracting guest
offset from TB. TB resync call will restore the TB with host value.
Thus we can be sure about the TB state.
One of the primary threads exiting the guest will take up the
responsibility of calling TB resync. It will use one of the top bits
(bit 63) from subcore state flags bitmap to make the decision. The first
primary thread (among the subcores) that is able to set the bit will
have to call the TB resync. Rest all other threads will wait until TB
resync is complete. Once TB resync is complete all threads will then
proceed.
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2016-05-15 07:14:26 +03:00
|
|
|
#include <asm/hmi.h>
|
2013-04-28 09:20:08 +04:00
|
|
|
#include <sysdev/fsl_pci.h>
|
2016-11-21 20:06:41 +03:00
|
|
|
#include <asm/kprobes.h>
|
2005-10-01 12:43:42 +04:00
|
|
|
|
2016-11-29 15:45:50 +03:00
|
|
|
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
|
2010-01-12 03:50:14 +03:00
|
|
|
int (*__debugger)(struct pt_regs *regs) __read_mostly;
|
|
|
|
int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
|
|
|
|
int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
|
|
|
|
int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
|
|
|
|
int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
|
2012-12-20 18:06:44 +04:00
|
|
|
int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
|
2010-01-12 03:50:14 +03:00
|
|
|
int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
EXPORT_SYMBOL(__debugger);
|
|
|
|
EXPORT_SYMBOL(__debugger_ipi);
|
|
|
|
EXPORT_SYMBOL(__debugger_bpt);
|
|
|
|
EXPORT_SYMBOL(__debugger_sstep);
|
|
|
|
EXPORT_SYMBOL(__debugger_iabr_match);
|
2012-12-20 18:06:44 +04:00
|
|
|
EXPORT_SYMBOL(__debugger_break_match);
|
2005-09-26 10:04:21 +04:00
|
|
|
EXPORT_SYMBOL(__debugger_fault_handler);
|
|
|
|
#endif
|
|
|
|
|
2013-02-13 20:21:32 +04:00
|
|
|
/* Transactional Memory trap debug */
|
|
|
|
#ifdef TM_DEBUG_SW
|
|
|
|
#define TM_DEBUG(x...) printk(KERN_INFO x)
|
|
|
|
#else
|
|
|
|
#define TM_DEBUG(x...) do { } while(0)
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
/*
|
|
|
|
* Trap & Exception support
|
|
|
|
*/
|
|
|
|
|
2007-03-21 04:38:12 +03:00
|
|
|
#ifdef CONFIG_PMAC_BACKLIGHT
|
|
|
|
static void pmac_backlight_unblank(void)
|
|
|
|
{
|
|
|
|
mutex_lock(&pmac_backlight_mutex);
|
|
|
|
if (pmac_backlight) {
|
|
|
|
struct backlight_properties *props;
|
|
|
|
|
|
|
|
props = &pmac_backlight->props;
|
|
|
|
props->brightness = props->max_brightness;
|
|
|
|
props->power = FB_BLANK_UNBLANK;
|
|
|
|
backlight_update_status(pmac_backlight);
|
|
|
|
}
|
|
|
|
mutex_unlock(&pmac_backlight_mutex);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void pmac_backlight_unblank(void) { }
|
|
|
|
#endif
|
|
|
|
|
2011-11-30 04:23:13 +04:00
|
|
|
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
static int die_owner = -1;
|
|
|
|
static unsigned int die_nest_count;
|
|
|
|
static int die_counter;
|
|
|
|
|
2016-09-16 13:48:08 +03:00
|
|
|
static unsigned long oops_begin(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
2011-11-30 04:23:13 +04:00
|
|
|
int cpu;
|
2007-03-21 04:38:13 +03:00
|
|
|
unsigned long flags;
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2007-03-21 04:38:11 +03:00
|
|
|
oops_enter();
|
|
|
|
|
2011-11-30 04:23:13 +04:00
|
|
|
/* racy, but better than risking deadlock. */
|
|
|
|
raw_local_irq_save(flags);
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
if (!arch_spin_trylock(&die_lock)) {
|
|
|
|
if (cpu == die_owner)
|
|
|
|
/* nested oops. should stop eventually */;
|
|
|
|
else
|
|
|
|
arch_spin_lock(&die_lock);
|
2007-03-21 04:38:13 +03:00
|
|
|
}
|
2011-11-30 04:23:13 +04:00
|
|
|
die_nest_count++;
|
|
|
|
die_owner = cpu;
|
|
|
|
console_verbose();
|
|
|
|
bust_spinlocks(1);
|
|
|
|
if (machine_is(powermac))
|
|
|
|
pmac_backlight_unblank();
|
|
|
|
return flags;
|
|
|
|
}
|
2016-09-16 13:48:08 +03:00
|
|
|
NOKPROBE_SYMBOL(oops_begin);
|
2006-03-28 16:15:54 +04:00
|
|
|
|
2016-09-16 13:48:08 +03:00
|
|
|
static void oops_end(unsigned long flags, struct pt_regs *regs,
|
2011-11-30 04:23:13 +04:00
|
|
|
int signr)
|
|
|
|
{
|
2005-09-26 10:04:21 +04:00
|
|
|
bust_spinlocks(0);
|
2013-01-21 10:47:39 +04:00
|
|
|
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
2011-11-30 04:23:13 +04:00
|
|
|
die_nest_count--;
|
2011-11-30 04:23:09 +04:00
|
|
|
oops_exit();
|
|
|
|
printk("\n");
|
2016-11-08 15:14:45 +03:00
|
|
|
if (!die_nest_count) {
|
2011-11-30 04:23:13 +04:00
|
|
|
/* Nest count reaches zero, release the lock. */
|
2016-11-08 15:14:45 +03:00
|
|
|
die_owner = -1;
|
2011-11-30 04:23:13 +04:00
|
|
|
arch_spin_unlock(&die_lock);
|
2016-11-08 15:14:45 +03:00
|
|
|
}
|
2011-11-30 04:23:13 +04:00
|
|
|
raw_local_irq_restore(flags);
|
2005-12-04 10:39:43 +03:00
|
|
|
|
2012-02-16 05:14:45 +04:00
|
|
|
crash_fadump(regs, "die oops");
|
|
|
|
|
2011-11-30 04:23:10 +04:00
|
|
|
/*
|
|
|
|
* A system reset (0x100) is a request to dump, so we always send
|
|
|
|
* it through the crashdump code.
|
|
|
|
*/
|
|
|
|
if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
|
2005-12-04 10:39:43 +03:00
|
|
|
crash_kexec(regs);
|
2011-11-30 04:23:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We aren't the primary crash CPU. We need to send it
|
|
|
|
* to a holding pattern to avoid it ending up in the panic
|
|
|
|
* code.
|
|
|
|
*/
|
|
|
|
crash_kexec_secondary(regs);
|
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2011-11-30 04:23:13 +04:00
|
|
|
if (!signr)
|
|
|
|
return;
|
|
|
|
|
2011-11-30 04:23:09 +04:00
|
|
|
/*
|
|
|
|
* While our oops output is serialised by a spinlock, output
|
|
|
|
* from panic() called below can race and corrupt it. If we
|
|
|
|
* know we are going to panic, delay for 1 second so we have a
|
|
|
|
* chance to get clean backtraces from all CPUs that are oopsing.
|
|
|
|
*/
|
|
|
|
if (in_interrupt() || panic_on_oops || !current->pid ||
|
|
|
|
is_global_init(current)) {
|
|
|
|
mdelay(MSEC_PER_SEC);
|
|
|
|
}
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
if (in_interrupt())
|
|
|
|
panic("Fatal exception in interrupt");
|
2006-07-30 14:03:34 +04:00
|
|
|
if (panic_on_oops)
|
2006-08-14 10:24:22 +04:00
|
|
|
panic("Fatal exception");
|
2011-11-30 04:23:13 +04:00
|
|
|
do_exit(signr);
|
|
|
|
}
|
2016-09-16 13:48:08 +03:00
|
|
|
NOKPROBE_SYMBOL(oops_end);
|
2006-07-30 14:03:34 +04:00
|
|
|
|
2016-09-16 13:48:08 +03:00
|
|
|
static int __die(const char *str, struct pt_regs *regs, long err)
|
2011-11-30 04:23:13 +04:00
|
|
|
{
|
|
|
|
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
printk("PREEMPT ");
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
printk("SMP NR_CPUS=%d ", NR_CPUS);
|
|
|
|
#endif
|
2016-03-18 00:17:59 +03:00
|
|
|
if (debug_pagealloc_enabled())
|
|
|
|
printk("DEBUG_PAGEALLOC ");
|
2011-11-30 04:23:13 +04:00
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
printk("NUMA ");
|
|
|
|
#endif
|
|
|
|
printk("%s\n", ppc_md.name ? ppc_md.name : "");
|
|
|
|
|
|
|
|
if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
print_modules();
|
|
|
|
show_regs(regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-09-16 13:48:08 +03:00
|
|
|
NOKPROBE_SYMBOL(__die);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2011-11-30 04:23:13 +04:00
|
|
|
void die(const char *str, struct pt_regs *regs, long err)
|
|
|
|
{
|
2016-11-08 15:14:44 +03:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (debugger(regs))
|
|
|
|
return;
|
2011-11-30 04:23:13 +04:00
|
|
|
|
2016-11-08 15:14:44 +03:00
|
|
|
flags = oops_begin(regs);
|
2011-11-30 04:23:13 +04:00
|
|
|
if (__die(str, regs, err))
|
|
|
|
err = 0;
|
|
|
|
oops_end(flags, regs, err);
|
|
|
|
}
|
2017-06-29 20:49:19 +03:00
|
|
|
NOKPROBE_SYMBOL(die);
|
2011-11-30 04:23:13 +04:00
|
|
|
|
2009-12-16 03:47:18 +03:00
|
|
|
void user_single_step_siginfo(struct task_struct *tsk,
|
|
|
|
struct pt_regs *regs, siginfo_t *info)
|
|
|
|
{
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->si_signo = SIGTRAP;
|
|
|
|
info->si_code = TRAP_TRACE;
|
|
|
|
info->si_addr = (void __user *)regs->nip;
|
|
|
|
}
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
|
|
|
|
{
|
|
|
|
siginfo_t info;
|
2007-10-12 04:20:07 +04:00
|
|
|
const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
|
|
|
|
"at %08lx nip %08lx lr %08lx code %x\n";
|
|
|
|
const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
|
|
|
|
"at %016lx nip %016lx lr %016lx code %x\n";
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
if (!user_mode(regs)) {
|
2011-11-30 04:23:13 +04:00
|
|
|
die("Exception in kernel mode", regs, signr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (show_unhandled_signals && unhandled_signal(current, signr)) {
|
2011-06-04 09:36:54 +04:00
|
|
|
printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
|
|
|
|
current->comm, current->pid, signr,
|
|
|
|
addr, regs->nip, regs->link, code);
|
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2012-05-08 07:38:50 +04:00
|
|
|
if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
|
2012-03-01 08:47:44 +04:00
|
|
|
local_irq_enable();
|
|
|
|
|
2012-08-24 01:27:09 +04:00
|
|
|
current->thread.trap_nr = code;
|
2005-09-26 10:04:21 +04:00
|
|
|
memset(&info, 0, sizeof(info));
|
|
|
|
info.si_signo = signr;
|
|
|
|
info.si_code = code;
|
|
|
|
info.si_addr = (void __user *) addr;
|
|
|
|
force_sig_info(signr, &info, current);
|
|
|
|
}
|
|
|
|
|
|
|
|
void system_reset_exception(struct pt_regs *regs)
|
|
|
|
{
|
2016-12-19 21:30:07 +03:00
|
|
|
/*
|
|
|
|
* Avoid crashes in case of nested NMI exceptions. Recoverability
|
|
|
|
* is determined by RI and in_nmi
|
|
|
|
*/
|
|
|
|
bool nested = in_nmi();
|
|
|
|
if (!nested)
|
|
|
|
nmi_enter();
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
/* See if any machine dependent calls */
|
2006-01-04 22:55:53 +03:00
|
|
|
if (ppc_md.system_reset_exception) {
|
|
|
|
if (ppc_md.system_reset_exception(regs))
|
2016-12-19 21:30:05 +03:00
|
|
|
goto out;
|
2006-01-04 22:55:53 +03:00
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2005-10-06 07:27:05 +04:00
|
|
|
die("System Reset", regs, SIGABRT);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2016-12-19 21:30:05 +03:00
|
|
|
out:
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
BUG_ON(get_paca()->in_nmi == 0);
|
|
|
|
if (get_paca()->in_nmi > 1)
|
|
|
|
panic("Unrecoverable nested System Reset");
|
|
|
|
#endif
|
2005-09-26 10:04:21 +04:00
|
|
|
/* Must die if the interrupt is not recoverable */
|
|
|
|
if (!(regs->msr & MSR_RI))
|
|
|
|
panic("Unrecoverable System Reset");
|
|
|
|
|
2016-12-19 21:30:07 +03:00
|
|
|
if (!nested)
|
|
|
|
nmi_exit();
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
/* What should we do here? We could issue a shutdown or hard reset. */
|
|
|
|
}
|
powerpc/book3s: handle machine check in Linux host.
Move machine check entry point into Linux. So far we were dependent on
firmware to decode MCE error details and handover the high level info to OS.
This patch introduces early machine check routine that saves the MCE
information (srr1, srr0, dar and dsisr) to the emergency stack. We allocate
stack frame on emergency stack and set the r1 accordingly. This allows us to be
prepared to take another exception without loosing context. One thing to note
here that, if we get another machine check while ME bit is off then we risk a
checkstop. Hence we restrict ourselves to save only MCE information and
register saved on PACA_EXMC save are before we turn the ME bit on. We use
paca->in_mce flag to differentiate between first entry and nested machine check
entry which helps proper use of emergency stack. We increment paca->in_mce
every time we enter in early machine check handler and decrement it while
leaving. When we enter machine check early handler first time (paca->in_mce ==
0), we are sure nobody is using MC emergency stack and allocate a stack frame
at the start of the emergency stack. During subsequent entry (paca->in_mce >
0), we know that r1 points inside emergency stack and we allocate separate
stack frame accordingly. This prevents us from clobbering MCE information
during nested machine checks.
The early machine check handler changes are placed under CPU_FTR_HVMODE
section. This makes sure that the early machine check handler will get executed
only in hypervisor kernel.
This is the code flow:
Machine Check Interrupt
|
V
0x200 vector ME=0, IR=0, DR=0
|
V
+-----------------------------------------------+
|machine_check_pSeries_early: | ME=0, IR=0, DR=0
| Alloc frame on emergency stack |
| Save srr1, srr0, dar and dsisr on stack |
+-----------------------------------------------+
|
(ME=1, IR=0, DR=0, RFID)
|
V
machine_check_handle_early ME=1, IR=0, DR=0
|
V
+-----------------------------------------------+
| machine_check_early (r3=pt_regs) | ME=1, IR=0, DR=0
| Things to do: (in next patches) |
| Flush SLB for SLB errors |
| Flush TLB for TLB errors |
| Decode and save MCE info |
+-----------------------------------------------+
|
(Fall through existing exception handler routine.)
|
V
machine_check_pSerie ME=1, IR=0, DR=0
|
(ME=1, IR=1, DR=1, RFID)
|
V
machine_check_common ME=1, IR=1, DR=1
.
.
.
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-10-30 18:34:08 +04:00
|
|
|
|
2016-09-05 09:42:31 +03:00
|
|
|
#ifdef CONFIG_PPC64
|
powerpc/book3s: handle machine check in Linux host.
Move machine check entry point into Linux. So far we were dependent on
firmware to decode MCE error details and handover the high level info to OS.
This patch introduces early machine check routine that saves the MCE
information (srr1, srr0, dar and dsisr) to the emergency stack. We allocate
stack frame on emergency stack and set the r1 accordingly. This allows us to be
prepared to take another exception without loosing context. One thing to note
here that, if we get another machine check while ME bit is off then we risk a
checkstop. Hence we restrict ourselves to save only MCE information and
register saved on PACA_EXMC save are before we turn the ME bit on. We use
paca->in_mce flag to differentiate between first entry and nested machine check
entry which helps proper use of emergency stack. We increment paca->in_mce
every time we enter in early machine check handler and decrement it while
leaving. When we enter machine check early handler first time (paca->in_mce ==
0), we are sure nobody is using MC emergency stack and allocate a stack frame
at the start of the emergency stack. During subsequent entry (paca->in_mce >
0), we know that r1 points inside emergency stack and we allocate separate
stack frame accordingly. This prevents us from clobbering MCE information
during nested machine checks.
The early machine check handler changes are placed under CPU_FTR_HVMODE
section. This makes sure that the early machine check handler will get executed
only in hypervisor kernel.
This is the code flow:
Machine Check Interrupt
|
V
0x200 vector ME=0, IR=0, DR=0
|
V
+-----------------------------------------------+
|machine_check_pSeries_early: | ME=0, IR=0, DR=0
| Alloc frame on emergency stack |
| Save srr1, srr0, dar and dsisr on stack |
+-----------------------------------------------+
|
(ME=1, IR=0, DR=0, RFID)
|
V
machine_check_handle_early ME=1, IR=0, DR=0
|
V
+-----------------------------------------------+
| machine_check_early (r3=pt_regs) | ME=1, IR=0, DR=0
| Things to do: (in next patches) |
| Flush SLB for SLB errors |
| Flush TLB for TLB errors |
| Decode and save MCE info |
+-----------------------------------------------+
|
(Fall through existing exception handler routine.)
|
V
machine_check_pSerie ME=1, IR=0, DR=0
|
(ME=1, IR=1, DR=1, RFID)
|
V
machine_check_common ME=1, IR=1, DR=1
.
.
.
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-10-30 18:34:08 +04:00
|
|
|
/*
|
|
|
|
* This function is called in real mode. Strictly no printk's please.
|
|
|
|
*
|
|
|
|
* regs->nip and regs->msr contains srr0 and ssr1.
|
|
|
|
*/
|
|
|
|
long machine_check_early(struct pt_regs *regs)
|
|
|
|
{
|
2013-10-30 18:34:40 +04:00
|
|
|
long handled = 0;
|
|
|
|
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 00:23:25 +04:00
|
|
|
__this_cpu_inc(irq_stat.mce_exceptions);
|
2014-06-11 12:48:07 +04:00
|
|
|
|
2013-10-30 18:34:40 +04:00
|
|
|
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
|
|
|
|
handled = cur_cpu_spec->machine_check_early(regs);
|
|
|
|
return handled;
|
powerpc/book3s: handle machine check in Linux host.
Move machine check entry point into Linux. So far we were dependent on
firmware to decode MCE error details and handover the high level info to OS.
This patch introduces early machine check routine that saves the MCE
information (srr1, srr0, dar and dsisr) to the emergency stack. We allocate
stack frame on emergency stack and set the r1 accordingly. This allows us to be
prepared to take another exception without loosing context. One thing to note
here that, if we get another machine check while ME bit is off then we risk a
checkstop. Hence we restrict ourselves to save only MCE information and
register saved on PACA_EXMC save are before we turn the ME bit on. We use
paca->in_mce flag to differentiate between first entry and nested machine check
entry which helps proper use of emergency stack. We increment paca->in_mce
every time we enter in early machine check handler and decrement it while
leaving. When we enter machine check early handler first time (paca->in_mce ==
0), we are sure nobody is using MC emergency stack and allocate a stack frame
at the start of the emergency stack. During subsequent entry (paca->in_mce >
0), we know that r1 points inside emergency stack and we allocate separate
stack frame accordingly. This prevents us from clobbering MCE information
during nested machine checks.
The early machine check handler changes are placed under CPU_FTR_HVMODE
section. This makes sure that the early machine check handler will get executed
only in hypervisor kernel.
This is the code flow:
Machine Check Interrupt
|
V
0x200 vector ME=0, IR=0, DR=0
|
V
+-----------------------------------------------+
|machine_check_pSeries_early: | ME=0, IR=0, DR=0
| Alloc frame on emergency stack |
| Save srr1, srr0, dar and dsisr on stack |
+-----------------------------------------------+
|
(ME=1, IR=0, DR=0, RFID)
|
V
machine_check_handle_early ME=1, IR=0, DR=0
|
V
+-----------------------------------------------+
| machine_check_early (r3=pt_regs) | ME=1, IR=0, DR=0
| Things to do: (in next patches) |
| Flush SLB for SLB errors |
| Flush TLB for TLB errors |
| Decode and save MCE info |
+-----------------------------------------------+
|
(Fall through existing exception handler routine.)
|
V
machine_check_pSerie ME=1, IR=0, DR=0
|
(ME=1, IR=1, DR=1, RFID)
|
V
machine_check_common ME=1, IR=1, DR=1
.
.
.
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-10-30 18:34:08 +04:00
|
|
|
}
|
|
|
|
|
2014-07-29 17:10:01 +04:00
|
|
|
long hmi_exception_realmode(struct pt_regs *regs)
|
|
|
|
{
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 00:23:25 +04:00
|
|
|
__this_cpu_inc(irq_stat.hmi_exceptions);
|
2014-07-29 17:10:01 +04:00
|
|
|
|
KVM: PPC: Book3S HV: Fix TB corruption in guest exit path on HMI interrupt
When a guest is assigned to a core it converts the host Timebase (TB)
into guest TB by adding guest timebase offset before entering into
guest. During guest exit it restores the guest TB to host TB. This means
under certain conditions (Guest migration) host TB and guest TB can differ.
When we get an HMI for TB related issues the opal HMI handler would
try fixing errors and restore the correct host TB value. With no guest
running, we don't have any issues. But with guest running on the core
we run into TB corruption issues.
If we get an HMI while in the guest, the current HMI handler invokes opal
hmi handler before forcing guest to exit. The guest exit path subtracts
the guest TB offset from the current TB value which may have already
been restored with host value by opal hmi handler. This leads to incorrect
host and guest TB values.
With split-core, things become more complex. With split-core, TB also gets
split and each subcore gets its own TB register. When a hmi handler fixes
a TB error and restores the TB value, it affects all the TB values of
sibling subcores on the same core. On TB errors all the thread in the core
gets HMI. With existing code, the individual threads call opal hmi handle
independently which can easily throw TB out of sync if we have guest
running on subcores. Hence we will need to co-ordinate with all the
threads before making opal hmi handler call followed by TB resync.
This patch introduces a sibling subcore state structure (shared by all
threads in the core) in paca which holds information about whether sibling
subcores are in Guest mode or host mode. An array in_guest[] of size
MAX_SUBCORE_PER_CORE=4 is used to maintain the state of each subcore.
The subcore id is used as index into in_guest[] array. Only primary
thread entering/exiting the guest is responsible to set/unset its
designated array element.
On TB error, we get HMI interrupt on every thread on the core. Upon HMI,
this patch will now force guest to vacate the core/subcore. Primary
thread from each subcore will then turn off its respective bit
from the above bitmap during the guest exit path just after the
guest->host partition switch is complete.
All other threads that have just exited the guest OR were already in host
will wait until all other subcores clears their respective bit.
Once all the subcores turn off their respective bit, all threads will
will make call to opal hmi handler.
It is not necessary that opal hmi handler would resync the TB value for
every HMI interrupts. It would do so only for the HMI caused due to
TB errors. For rest, it would not touch TB value. Hence to make things
simpler, primary thread would call TB resync explicitly once for each
core immediately after opal hmi handler instead of subtracting guest
offset from TB. TB resync call will restore the TB with host value.
Thus we can be sure about the TB state.
One of the primary threads exiting the guest will take up the
responsibility of calling TB resync. It will use one of the top bits
(bit 63) from subcore state flags bitmap to make the decision. The first
primary thread (among the subcores) that is able to set the bit will
have to call the TB resync. Rest all other threads will wait until TB
resync is complete. Once TB resync is complete all threads will then
proceed.
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2016-05-15 07:14:26 +03:00
|
|
|
wait_for_subcore_guest_exit();
|
|
|
|
|
2014-07-29 17:10:01 +04:00
|
|
|
if (ppc_md.hmi_exception_early)
|
|
|
|
ppc_md.hmi_exception_early(regs);
|
|
|
|
|
KVM: PPC: Book3S HV: Fix TB corruption in guest exit path on HMI interrupt
When a guest is assigned to a core it converts the host Timebase (TB)
into guest TB by adding guest timebase offset before entering into
guest. During guest exit it restores the guest TB to host TB. This means
under certain conditions (Guest migration) host TB and guest TB can differ.
When we get an HMI for TB related issues the opal HMI handler would
try fixing errors and restore the correct host TB value. With no guest
running, we don't have any issues. But with guest running on the core
we run into TB corruption issues.
If we get an HMI while in the guest, the current HMI handler invokes opal
hmi handler before forcing guest to exit. The guest exit path subtracts
the guest TB offset from the current TB value which may have already
been restored with host value by opal hmi handler. This leads to incorrect
host and guest TB values.
With split-core, things become more complex. With split-core, TB also gets
split and each subcore gets its own TB register. When a hmi handler fixes
a TB error and restores the TB value, it affects all the TB values of
sibling subcores on the same core. On TB errors all the thread in the core
gets HMI. With existing code, the individual threads call opal hmi handle
independently which can easily throw TB out of sync if we have guest
running on subcores. Hence we will need to co-ordinate with all the
threads before making opal hmi handler call followed by TB resync.
This patch introduces a sibling subcore state structure (shared by all
threads in the core) in paca which holds information about whether sibling
subcores are in Guest mode or host mode. An array in_guest[] of size
MAX_SUBCORE_PER_CORE=4 is used to maintain the state of each subcore.
The subcore id is used as index into in_guest[] array. Only primary
thread entering/exiting the guest is responsible to set/unset its
designated array element.
On TB error, we get HMI interrupt on every thread on the core. Upon HMI,
this patch will now force guest to vacate the core/subcore. Primary
thread from each subcore will then turn off its respective bit
from the above bitmap during the guest exit path just after the
guest->host partition switch is complete.
All other threads that have just exited the guest OR were already in host
will wait until all other subcores clears their respective bit.
Once all the subcores turn off their respective bit, all threads will
will make call to opal hmi handler.
It is not necessary that opal hmi handler would resync the TB value for
every HMI interrupts. It would do so only for the HMI caused due to
TB errors. For rest, it would not touch TB value. Hence to make things
simpler, primary thread would call TB resync explicitly once for each
core immediately after opal hmi handler instead of subtracting guest
offset from TB. TB resync call will restore the TB with host value.
Thus we can be sure about the TB state.
One of the primary threads exiting the guest will take up the
responsibility of calling TB resync. It will use one of the top bits
(bit 63) from subcore state flags bitmap to make the decision. The first
primary thread (among the subcores) that is able to set the bit will
have to call the TB resync. Rest all other threads will wait until TB
resync is complete. Once TB resync is complete all threads will then
proceed.
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2016-05-15 07:14:26 +03:00
|
|
|
wait_for_tb_resync();
|
|
|
|
|
2014-07-29 17:10:01 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I/O accesses can cause machine checks on powermacs.
|
|
|
|
* Check if the NIP corresponds to the address of a sync
|
|
|
|
* instruction for which there is an entry in the exception
|
|
|
|
* table.
|
|
|
|
* Note that the 601 only takes a machine check on TEA
|
|
|
|
* (transfer error ack) signal assertion, and does not
|
|
|
|
* set any of the top 16 bits of SRR1.
|
|
|
|
* -- paulus.
|
|
|
|
*/
|
|
|
|
static inline int check_io_access(struct pt_regs *regs)
|
|
|
|
{
|
2006-11-13 01:27:39 +03:00
|
|
|
#ifdef CONFIG_PPC32
|
2005-09-26 10:04:21 +04:00
|
|
|
unsigned long msr = regs->msr;
|
|
|
|
const struct exception_table_entry *entry;
|
|
|
|
unsigned int *nip = (unsigned int *)regs->nip;
|
|
|
|
|
|
|
|
if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
|
|
|
|
&& (entry = search_exception_tables(regs->nip)) != NULL) {
|
|
|
|
/*
|
|
|
|
* Check that it's a sync instruction, or somewhere
|
|
|
|
* in the twi; isync; nop sequence that inb/inw/inl uses.
|
|
|
|
* As the address is in the exception table
|
|
|
|
* we should be able to read the instr there.
|
|
|
|
* For the debug message, we look at the preceding
|
|
|
|
* load or store.
|
|
|
|
*/
|
2016-05-17 15:01:39 +03:00
|
|
|
if (*nip == PPC_INST_NOP)
|
2005-09-26 10:04:21 +04:00
|
|
|
nip -= 2;
|
2016-05-17 15:01:39 +03:00
|
|
|
else if (*nip == PPC_INST_ISYNC)
|
2005-09-26 10:04:21 +04:00
|
|
|
--nip;
|
2016-05-17 15:01:39 +03:00
|
|
|
if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
|
2005-09-26 10:04:21 +04:00
|
|
|
unsigned int rb;
|
|
|
|
|
|
|
|
--nip;
|
|
|
|
rb = (*nip >> 11) & 0x1f;
|
|
|
|
printk(KERN_DEBUG "%s bad port %lx at %p\n",
|
|
|
|
(*nip & 0x100)? "OUT to": "IN from",
|
|
|
|
regs->gpr[rb] - _IO_BASE, nip);
|
|
|
|
regs->msr |= MSR_RI;
|
2016-10-14 08:47:31 +03:00
|
|
|
regs->nip = extable_fixup(entry);
|
2005-09-26 10:04:21 +04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
2006-11-13 01:27:39 +03:00
|
|
|
#endif /* CONFIG_PPC32 */
|
2005-09-26 10:04:21 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-08 14:50:57 +03:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2005-09-26 10:04:21 +04:00
|
|
|
/* On 4xx, the reason for the machine check or program exception
|
|
|
|
is in the ESR. */
|
|
|
|
#define get_reason(regs) ((regs)->dsisr)
|
|
|
|
#ifndef CONFIG_FSL_BOOKE
|
|
|
|
#define get_mc_reason(regs) ((regs)->dsisr)
|
|
|
|
#else
|
2010-04-08 09:38:22 +04:00
|
|
|
#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
|
2005-09-26 10:04:21 +04:00
|
|
|
#endif
|
|
|
|
#define REASON_FP ESR_FP
|
|
|
|
#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
|
|
|
|
#define REASON_PRIVILEGED ESR_PPR
|
|
|
|
#define REASON_TRAP ESR_PTR
|
|
|
|
|
|
|
|
/* single-step stuff */
|
2013-07-04 10:15:46 +04:00
|
|
|
#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
|
|
|
|
#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
#else
|
|
|
|
/* On non-4xx, the reason for the machine check or program
|
|
|
|
exception is in the MSR. */
|
|
|
|
#define get_reason(regs) ((regs)->msr)
|
|
|
|
#define get_mc_reason(regs) ((regs)->msr)
|
2013-02-13 20:21:32 +04:00
|
|
|
#define REASON_TM 0x200000
|
2005-09-26 10:04:21 +04:00
|
|
|
#define REASON_FP 0x100000
|
|
|
|
#define REASON_ILLEGAL 0x80000
|
|
|
|
#define REASON_PRIVILEGED 0x40000
|
|
|
|
#define REASON_TRAP 0x20000
|
|
|
|
|
|
|
|
#define single_stepping(regs) ((regs)->msr & MSR_SE)
|
|
|
|
#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
|
|
|
|
#endif
|
|
|
|
|
2007-12-21 07:39:21 +03:00
|
|
|
#if defined(CONFIG_4xx)
|
|
|
|
int machine_check_4xx(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
2006-03-31 07:11:15 +04:00
|
|
|
unsigned long reason = get_mc_reason(regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
if (reason & ESR_IMCP) {
|
|
|
|
printk("Instruction");
|
|
|
|
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
|
|
|
|
} else
|
|
|
|
printk("Data");
|
|
|
|
printk(" machine check in kernel mode.\n");
|
2007-12-21 07:39:21 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int machine_check_440A(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long reason = get_mc_reason(regs);
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
printk("Machine check in kernel mode.\n");
|
|
|
|
if (reason & ESR_IMCP){
|
|
|
|
printk("Instruction Synchronous Machine Check exception\n");
|
|
|
|
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
u32 mcsr = mfspr(SPRN_MCSR);
|
|
|
|
if (mcsr & MCSR_IB)
|
|
|
|
printk("Instruction Read PLB Error\n");
|
|
|
|
if (mcsr & MCSR_DRB)
|
|
|
|
printk("Data Read PLB Error\n");
|
|
|
|
if (mcsr & MCSR_DWB)
|
|
|
|
printk("Data Write PLB Error\n");
|
|
|
|
if (mcsr & MCSR_TLBP)
|
|
|
|
printk("TLB Parity Error\n");
|
|
|
|
if (mcsr & MCSR_ICP){
|
|
|
|
flush_instruction_cache();
|
|
|
|
printk("I-Cache Parity Error\n");
|
|
|
|
}
|
|
|
|
if (mcsr & MCSR_DCSP)
|
|
|
|
printk("D-Cache Search Parity Error\n");
|
|
|
|
if (mcsr & MCSR_DCFP)
|
|
|
|
printk("D-Cache Flush Parity Error\n");
|
|
|
|
if (mcsr & MCSR_IMPE)
|
|
|
|
printk("Machine Check exception is imprecise\n");
|
|
|
|
|
|
|
|
/* Clear MCSR */
|
|
|
|
mtspr(SPRN_MCSR, mcsr);
|
|
|
|
}
|
2007-12-21 07:39:21 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2010-03-05 06:43:18 +03:00
|
|
|
|
|
|
|
int machine_check_47x(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long reason = get_mc_reason(regs);
|
|
|
|
u32 mcsr;
|
|
|
|
|
|
|
|
printk(KERN_ERR "Machine check in kernel mode.\n");
|
|
|
|
if (reason & ESR_IMCP) {
|
|
|
|
printk(KERN_ERR
|
|
|
|
"Instruction Synchronous Machine Check exception\n");
|
|
|
|
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
mcsr = mfspr(SPRN_MCSR);
|
|
|
|
if (mcsr & MCSR_IB)
|
|
|
|
printk(KERN_ERR "Instruction Read PLB Error\n");
|
|
|
|
if (mcsr & MCSR_DRB)
|
|
|
|
printk(KERN_ERR "Data Read PLB Error\n");
|
|
|
|
if (mcsr & MCSR_DWB)
|
|
|
|
printk(KERN_ERR "Data Write PLB Error\n");
|
|
|
|
if (mcsr & MCSR_TLBP)
|
|
|
|
printk(KERN_ERR "TLB Parity Error\n");
|
|
|
|
if (mcsr & MCSR_ICP) {
|
|
|
|
flush_instruction_cache();
|
|
|
|
printk(KERN_ERR "I-Cache Parity Error\n");
|
|
|
|
}
|
|
|
|
if (mcsr & MCSR_DCSP)
|
|
|
|
printk(KERN_ERR "D-Cache Search Parity Error\n");
|
|
|
|
if (mcsr & PPC47x_MCSR_GPR)
|
|
|
|
printk(KERN_ERR "GPR Parity Error\n");
|
|
|
|
if (mcsr & PPC47x_MCSR_FPR)
|
|
|
|
printk(KERN_ERR "FPR Parity Error\n");
|
|
|
|
if (mcsr & PPC47x_MCSR_IPR)
|
|
|
|
printk(KERN_ERR "Machine Check exception is imprecise\n");
|
|
|
|
|
|
|
|
/* Clear MCSR */
|
|
|
|
mtspr(SPRN_MCSR, mcsr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2007-12-21 07:39:21 +03:00
|
|
|
#elif defined(CONFIG_E500)
|
2010-04-08 09:38:22 +04:00
|
|
|
int machine_check_e500mc(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long mcsr = mfspr(SPRN_MCSR);
|
|
|
|
unsigned long reason = mcsr;
|
|
|
|
int recoverable = 1;
|
|
|
|
|
2011-06-16 23:09:17 +04:00
|
|
|
if (reason & MCSR_LD) {
|
2010-11-18 09:57:32 +03:00
|
|
|
recoverable = fsl_rio_mcheck_exception(regs);
|
|
|
|
if (recoverable == 1)
|
|
|
|
goto silent_out;
|
|
|
|
}
|
|
|
|
|
2010-04-08 09:38:22 +04:00
|
|
|
printk("Machine check in kernel mode.\n");
|
|
|
|
printk("Caused by (from MCSR=%lx): ", reason);
|
|
|
|
|
|
|
|
if (reason & MCSR_MCP)
|
|
|
|
printk("Machine Check Signal\n");
|
|
|
|
|
|
|
|
if (reason & MCSR_ICPERR) {
|
|
|
|
printk("Instruction Cache Parity Error\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is recoverable by invalidating the i-cache.
|
|
|
|
*/
|
|
|
|
mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
|
|
|
|
while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
|
|
|
|
;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will generally be accompanied by an instruction
|
|
|
|
* fetch error report -- only treat MCSR_IF as fatal
|
|
|
|
* if it wasn't due to an L1 parity error.
|
|
|
|
*/
|
|
|
|
reason &= ~MCSR_IF;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_DCPERR_MC) {
|
|
|
|
printk("Data Cache Parity Error\n");
|
2011-08-27 15:14:23 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In write shadow mode we auto-recover from the error, but it
|
|
|
|
* may still get logged and cause a machine check. We should
|
|
|
|
* only treat the non-write shadow case as non-recoverable.
|
|
|
|
*/
|
|
|
|
if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
|
|
|
|
recoverable = 0;
|
2010-04-08 09:38:22 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_L2MMU_MHIT) {
|
|
|
|
printk("Hit on multiple TLB entries\n");
|
|
|
|
recoverable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_NMI)
|
|
|
|
printk("Non-maskable interrupt\n");
|
|
|
|
|
|
|
|
if (reason & MCSR_IF) {
|
|
|
|
printk("Instruction Fetch Error Report\n");
|
|
|
|
recoverable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_LD) {
|
|
|
|
printk("Load Error Report\n");
|
|
|
|
recoverable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_ST) {
|
|
|
|
printk("Store Error Report\n");
|
|
|
|
recoverable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_LDG) {
|
|
|
|
printk("Guarded Load Error Report\n");
|
|
|
|
recoverable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_TLBSYNC)
|
|
|
|
printk("Simultaneous tlbsync operations\n");
|
|
|
|
|
|
|
|
if (reason & MCSR_BSL2_ERR) {
|
|
|
|
printk("Level 2 Cache Error\n");
|
|
|
|
recoverable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason & MCSR_MAV) {
|
|
|
|
u64 addr;
|
|
|
|
|
|
|
|
addr = mfspr(SPRN_MCAR);
|
|
|
|
addr |= (u64)mfspr(SPRN_MCARU) << 32;
|
|
|
|
|
|
|
|
printk("Machine Check %s Address: %#llx\n",
|
|
|
|
reason & MCSR_MEA ? "Effective" : "Physical", addr);
|
|
|
|
}
|
|
|
|
|
2010-11-18 09:57:32 +03:00
|
|
|
silent_out:
|
2010-04-08 09:38:22 +04:00
|
|
|
mtspr(SPRN_MCSR, mcsr);
|
|
|
|
return mfspr(SPRN_MCSR) == 0 && recoverable;
|
|
|
|
}
|
|
|
|
|
2007-12-21 07:39:21 +03:00
|
|
|
int machine_check_e500(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long reason = get_mc_reason(regs);
|
|
|
|
|
2010-11-18 09:57:32 +03:00
|
|
|
if (reason & MCSR_BUS_RBERR) {
|
|
|
|
if (fsl_rio_mcheck_exception(regs))
|
|
|
|
return 1;
|
2013-04-28 09:20:08 +04:00
|
|
|
if (fsl_pci_mcheck_exception(regs))
|
|
|
|
return 1;
|
2010-11-18 09:57:32 +03:00
|
|
|
}
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
printk("Machine check in kernel mode.\n");
|
|
|
|
printk("Caused by (from MCSR=%lx): ", reason);
|
|
|
|
|
|
|
|
if (reason & MCSR_MCP)
|
|
|
|
printk("Machine Check Signal\n");
|
|
|
|
if (reason & MCSR_ICPERR)
|
|
|
|
printk("Instruction Cache Parity Error\n");
|
|
|
|
if (reason & MCSR_DCP_PERR)
|
|
|
|
printk("Data Cache Push Parity Error\n");
|
|
|
|
if (reason & MCSR_DCPERR)
|
|
|
|
printk("Data Cache Parity Error\n");
|
|
|
|
if (reason & MCSR_BUS_IAERR)
|
|
|
|
printk("Bus - Instruction Address Error\n");
|
|
|
|
if (reason & MCSR_BUS_RAERR)
|
|
|
|
printk("Bus - Read Address Error\n");
|
|
|
|
if (reason & MCSR_BUS_WAERR)
|
|
|
|
printk("Bus - Write Address Error\n");
|
|
|
|
if (reason & MCSR_BUS_IBERR)
|
|
|
|
printk("Bus - Instruction Data Error\n");
|
|
|
|
if (reason & MCSR_BUS_RBERR)
|
|
|
|
printk("Bus - Read Data Bus Error\n");
|
|
|
|
if (reason & MCSR_BUS_WBERR)
|
2014-06-17 17:30:53 +04:00
|
|
|
printk("Bus - Write Data Bus Error\n");
|
2005-09-26 10:04:21 +04:00
|
|
|
if (reason & MCSR_BUS_IPERR)
|
|
|
|
printk("Bus - Instruction Parity Error\n");
|
|
|
|
if (reason & MCSR_BUS_RPERR)
|
|
|
|
printk("Bus - Read Parity Error\n");
|
2007-12-21 07:39:21 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2010-10-08 17:32:11 +04:00
|
|
|
|
|
|
|
int machine_check_generic(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2007-12-21 07:39:21 +03:00
|
|
|
#elif defined(CONFIG_E200)
|
|
|
|
int machine_check_e200(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long reason = get_mc_reason(regs);
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
printk("Machine check in kernel mode.\n");
|
|
|
|
printk("Caused by (from MCSR=%lx): ", reason);
|
|
|
|
|
|
|
|
if (reason & MCSR_MCP)
|
|
|
|
printk("Machine Check Signal\n");
|
|
|
|
if (reason & MCSR_CP_PERR)
|
|
|
|
printk("Cache Push Parity Error\n");
|
|
|
|
if (reason & MCSR_CPERR)
|
|
|
|
printk("Cache Parity Error\n");
|
|
|
|
if (reason & MCSR_EXCP_ERR)
|
|
|
|
printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
|
|
|
|
if (reason & MCSR_BUS_IRERR)
|
|
|
|
printk("Bus - Read Bus Error on instruction fetch\n");
|
|
|
|
if (reason & MCSR_BUS_DRERR)
|
|
|
|
printk("Bus - Read Bus Error on data load\n");
|
|
|
|
if (reason & MCSR_BUS_WRERR)
|
|
|
|
printk("Bus - Write Bus Error on buffered store or cache line push\n");
|
2007-12-21 07:39:21 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-09-16 11:23:11 +03:00
|
|
|
#elif defined(CONFIG_PPC_8xx)
|
|
|
|
int machine_check_8xx(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long reason = get_mc_reason(regs);
|
|
|
|
|
|
|
|
pr_err("Machine check in kernel mode.\n");
|
|
|
|
pr_err("Caused by (from SRR1=%lx): ", reason);
|
|
|
|
if (reason & 0x40000000)
|
|
|
|
pr_err("Fetch error at address %lx\n", regs->nip);
|
|
|
|
else
|
|
|
|
pr_err("Data access error at address %lx\n", regs->dar);
|
|
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
/* the qspan pci read routines can cause machine checks -- Cort
|
|
|
|
*
|
|
|
|
* yuck !!! that totally needs to go away ! There are better ways
|
|
|
|
* to deal with that than having a wart in the mcheck handler.
|
|
|
|
* -- BenH
|
|
|
|
*/
|
|
|
|
bad_page_fault(regs, regs->dar, SIGBUS);
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
2007-12-21 07:39:21 +03:00
|
|
|
#else
|
|
|
|
int machine_check_generic(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long reason = get_mc_reason(regs);
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
printk("Machine check in kernel mode.\n");
|
|
|
|
printk("Caused by (from SRR1=%lx): ", reason);
|
|
|
|
switch (reason & 0x601F0000) {
|
|
|
|
case 0x80000:
|
|
|
|
printk("Machine check signal\n");
|
|
|
|
break;
|
|
|
|
case 0: /* for 601 */
|
|
|
|
case 0x40000:
|
|
|
|
case 0x140000: /* 7450 MSS error and TEA */
|
|
|
|
printk("Transfer error ack signal\n");
|
|
|
|
break;
|
|
|
|
case 0x20000:
|
|
|
|
printk("Data parity error signal\n");
|
|
|
|
break;
|
|
|
|
case 0x10000:
|
|
|
|
printk("Address parity error signal\n");
|
|
|
|
break;
|
|
|
|
case 0x20000000:
|
|
|
|
printk("L1 Data Cache error\n");
|
|
|
|
break;
|
|
|
|
case 0x40000000:
|
|
|
|
printk("L1 Instruction Cache error\n");
|
|
|
|
break;
|
|
|
|
case 0x00100000:
|
|
|
|
printk("L2 data cache parity error\n");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk("Unknown values in msr\n");
|
|
|
|
}
|
2007-09-20 23:11:20 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2007-12-21 07:39:21 +03:00
|
|
|
#endif /* everything else */
|
2007-09-20 23:11:20 +04:00
|
|
|
|
|
|
|
void machine_check_exception(struct pt_regs *regs)
|
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
2007-09-20 23:11:20 +04:00
|
|
|
int recover = 0;
|
|
|
|
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 00:23:25 +04:00
|
|
|
__this_cpu_inc(irq_stat.mce_exceptions);
|
2010-01-31 23:34:06 +03:00
|
|
|
|
2017-04-18 19:38:17 +03:00
|
|
|
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
|
|
|
|
|
2007-12-21 07:39:21 +03:00
|
|
|
/* See if any machine dependent calls. In theory, we would want
|
|
|
|
* to call the CPU first, and call the ppc_md. one if the CPU
|
|
|
|
* one returns a positive number. However there is existing code
|
|
|
|
* that assumes the board gets a first chance, so let's keep it
|
|
|
|
* that way for now and fix things later. --BenH.
|
|
|
|
*/
|
2007-09-20 23:11:20 +04:00
|
|
|
if (ppc_md.machine_check_exception)
|
|
|
|
recover = ppc_md.machine_check_exception(regs);
|
2007-12-21 07:39:21 +03:00
|
|
|
else if (cur_cpu_spec->machine_check)
|
|
|
|
recover = cur_cpu_spec->machine_check(regs);
|
2007-09-20 23:11:20 +04:00
|
|
|
|
2007-12-21 07:39:21 +03:00
|
|
|
if (recover > 0)
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2007-09-20 23:11:20 +04:00
|
|
|
|
2011-01-11 22:45:31 +03:00
|
|
|
if (debugger_fault_handler(regs))
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2007-09-20 23:11:20 +04:00
|
|
|
|
|
|
|
if (check_io_access(regs))
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2007-09-20 23:11:20 +04:00
|
|
|
|
2005-10-06 07:27:05 +04:00
|
|
|
die("Machine check", regs, SIGBUS);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
/* Must die if the interrupt is not recoverable */
|
|
|
|
if (!(regs->msr & MSR_RI))
|
|
|
|
panic("Unrecoverable Machine check");
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
bail:
|
|
|
|
exception_exit(prev_state);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void SMIException(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
die("System Management Interrupt", regs, SIGABRT);
|
|
|
|
}
|
|
|
|
|
2014-07-29 17:10:01 +04:00
|
|
|
void handle_hmi_exception(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *old_regs;
|
|
|
|
|
|
|
|
old_regs = set_irq_regs(regs);
|
|
|
|
irq_enter();
|
|
|
|
|
|
|
|
if (ppc_md.handle_hmi_exception)
|
|
|
|
ppc_md.handle_hmi_exception(regs);
|
|
|
|
|
|
|
|
irq_exit();
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
}
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
void unknown_exception(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
|
|
|
|
regs->nip, regs->msr, regs->trap);
|
|
|
|
|
|
|
|
_exception(SIGTRAP, regs, 0, 0);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
exception_exit(prev_state);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
void instruction_breakpoint_exception(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
|
|
|
|
5, SIGTRAP) == NOTIFY_STOP)
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
if (debugger_iabr_match(regs))
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
bail:
|
|
|
|
exception_exit(prev_state);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void RunModeException(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
_exception(SIGTRAP, regs, 0, 0);
|
|
|
|
}
|
|
|
|
|
2016-09-16 13:48:08 +03:00
|
|
|
void single_step_exception(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
|
|
|
|
2010-06-15 10:05:31 +04:00
|
|
|
clear_single_step(regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2016-11-21 20:06:41 +03:00
|
|
|
if (kprobe_post_handler(regs))
|
|
|
|
return;
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
|
|
|
|
5, SIGTRAP) == NOTIFY_STOP)
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
if (debugger_sstep(regs))
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
bail:
|
|
|
|
exception_exit(prev_state);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
2016-09-16 13:48:08 +03:00
|
|
|
NOKPROBE_SYMBOL(single_step_exception);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* After we have successfully emulated an instruction, we have to
|
|
|
|
* check if the instruction was being single-stepped, and if so,
|
|
|
|
* pretend we got a single-step exception. This was pointed out
|
|
|
|
* by Kumar Gala. -- paulus
|
|
|
|
*/
|
2005-10-06 07:27:05 +04:00
|
|
|
static void emulate_single_step(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
2010-06-15 10:05:31 +04:00
|
|
|
if (single_stepping(regs))
|
|
|
|
single_step_exception(regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
2007-02-07 10:47:59 +03:00
|
|
|
static inline int __parse_fpscr(unsigned long fpscr)
|
2005-10-01 12:43:42 +04:00
|
|
|
{
|
2007-02-07 10:47:59 +03:00
|
|
|
int ret = 0;
|
2005-10-01 12:43:42 +04:00
|
|
|
|
|
|
|
/* Invalid operation */
|
|
|
|
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
|
2007-02-07 10:47:59 +03:00
|
|
|
ret = FPE_FLTINV;
|
2005-10-01 12:43:42 +04:00
|
|
|
|
|
|
|
/* Overflow */
|
|
|
|
else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
|
2007-02-07 10:47:59 +03:00
|
|
|
ret = FPE_FLTOVF;
|
2005-10-01 12:43:42 +04:00
|
|
|
|
|
|
|
/* Underflow */
|
|
|
|
else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
|
2007-02-07 10:47:59 +03:00
|
|
|
ret = FPE_FLTUND;
|
2005-10-01 12:43:42 +04:00
|
|
|
|
|
|
|
/* Divide by zero */
|
|
|
|
else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
|
2007-02-07 10:47:59 +03:00
|
|
|
ret = FPE_FLTDIV;
|
2005-10-01 12:43:42 +04:00
|
|
|
|
|
|
|
/* Inexact result */
|
|
|
|
else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
|
2007-02-07 10:47:59 +03:00
|
|
|
ret = FPE_FLTRES;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void parse_fpe(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int code = 0;
|
|
|
|
|
|
|
|
flush_fp_to_thread(current);
|
|
|
|
|
2013-09-10 14:20:42 +04:00
|
|
|
code = __parse_fpscr(current->thread.fp_state.fpscr);
|
2005-10-01 12:43:42 +04:00
|
|
|
|
|
|
|
_exception(SIGFPE, regs, code, regs->nip);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Illegal instruction emulation support. Originally written to
|
2005-09-26 10:04:21 +04:00
|
|
|
* provide the PVR to user applications using the mfspr rd, PVR.
|
|
|
|
* Return non-zero if we can't emulate, or -EFAULT if the associated
|
|
|
|
* memory access caused an access fault. Return zero on success.
|
|
|
|
*
|
|
|
|
* There are a couple of ways to do this, either "decode" the instruction
|
|
|
|
* or directly match lots of bits. In this case, matching lots of
|
|
|
|
* bits is faster and easier.
|
2005-10-10 16:37:57 +04:00
|
|
|
*
|
2005-09-26 10:04:21 +04:00
|
|
|
*/
|
|
|
|
static int emulate_string_inst(struct pt_regs *regs, u32 instword)
|
|
|
|
{
|
|
|
|
u8 rT = (instword >> 21) & 0x1f;
|
|
|
|
u8 rA = (instword >> 16) & 0x1f;
|
|
|
|
u8 NB_RB = (instword >> 11) & 0x1f;
|
|
|
|
u32 num_bytes;
|
|
|
|
unsigned long EA;
|
|
|
|
int pos = 0;
|
|
|
|
|
|
|
|
/* Early out if we are an invalid form of lswx */
|
2009-02-10 23:10:44 +03:00
|
|
|
if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
|
2005-09-26 10:04:21 +04:00
|
|
|
if ((rT == rA) || (rT == NB_RB))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
EA = (rA == 0) ? 0 : regs->gpr[rA];
|
|
|
|
|
2009-02-10 23:10:44 +03:00
|
|
|
switch (instword & PPC_INST_STRING_MASK) {
|
|
|
|
case PPC_INST_LSWX:
|
|
|
|
case PPC_INST_STSWX:
|
2005-09-26 10:04:21 +04:00
|
|
|
EA += NB_RB;
|
|
|
|
num_bytes = regs->xer & 0x7f;
|
|
|
|
break;
|
2009-02-10 23:10:44 +03:00
|
|
|
case PPC_INST_LSWI:
|
|
|
|
case PPC_INST_STSWI:
|
2005-09-26 10:04:21 +04:00
|
|
|
num_bytes = (NB_RB == 0) ? 32 : NB_RB;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (num_bytes != 0)
|
|
|
|
{
|
|
|
|
u8 val;
|
|
|
|
u32 shift = 8 * (3 - (pos & 0x3));
|
|
|
|
|
2013-06-25 20:41:05 +04:00
|
|
|
/* if process is 32-bit, clear upper 32 bits of EA */
|
|
|
|
if ((regs->msr & MSR_64BIT) == 0)
|
|
|
|
EA &= 0xFFFFFFFF;
|
|
|
|
|
2009-02-10 23:10:44 +03:00
|
|
|
switch ((instword & PPC_INST_STRING_MASK)) {
|
|
|
|
case PPC_INST_LSWX:
|
|
|
|
case PPC_INST_LSWI:
|
2005-09-26 10:04:21 +04:00
|
|
|
if (get_user(val, (u8 __user *)EA))
|
|
|
|
return -EFAULT;
|
|
|
|
/* first time updating this reg,
|
|
|
|
* zero it out */
|
|
|
|
if (pos == 0)
|
|
|
|
regs->gpr[rT] = 0;
|
|
|
|
regs->gpr[rT] |= val << shift;
|
|
|
|
break;
|
2009-02-10 23:10:44 +03:00
|
|
|
case PPC_INST_STSWI:
|
|
|
|
case PPC_INST_STSWX:
|
2005-09-26 10:04:21 +04:00
|
|
|
val = regs->gpr[rT] >> shift;
|
|
|
|
if (put_user(val, (u8 __user *)EA))
|
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* move EA to next address */
|
|
|
|
EA += 1;
|
|
|
|
num_bytes--;
|
|
|
|
|
|
|
|
/* manage our position within the register */
|
|
|
|
if (++pos == 4) {
|
|
|
|
pos = 0;
|
|
|
|
if (++rT == 32)
|
|
|
|
rT = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-08-30 22:11:38 +04:00
|
|
|
static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
|
|
|
|
{
|
|
|
|
u32 ra,rs;
|
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
ra = (instword >> 16) & 0x1f;
|
|
|
|
rs = (instword >> 21) & 0x1f;
|
|
|
|
|
|
|
|
tmp = regs->gpr[rs];
|
|
|
|
tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
|
|
|
|
tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
|
|
|
|
tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
|
|
|
|
regs->gpr[ra] = tmp;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-20 06:35:29 +03:00
|
|
|
static int emulate_isel(struct pt_regs *regs, u32 instword)
|
|
|
|
{
|
|
|
|
u8 rT = (instword >> 21) & 0x1f;
|
|
|
|
u8 rA = (instword >> 16) & 0x1f;
|
|
|
|
u8 rB = (instword >> 11) & 0x1f;
|
|
|
|
u8 BC = (instword >> 6) & 0x1f;
|
|
|
|
u8 bit;
|
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
tmp = (rA == 0) ? 0 : regs->gpr[rA];
|
|
|
|
bit = (regs->ccr >> (31 - BC)) & 0x1;
|
|
|
|
|
|
|
|
regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-26 22:09:39 +04:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
static inline bool tm_abort_check(struct pt_regs *regs, int cause)
|
|
|
|
{
|
|
|
|
/* If we're emulating a load/store in an active transaction, we cannot
|
|
|
|
* emulate it as the kernel operates in transaction suspended context.
|
|
|
|
* We need to abort the transaction. This creates a persistent TM
|
|
|
|
* abort so tell the user what caused it with a new code.
|
|
|
|
*/
|
|
|
|
if (MSR_TM_TRANSACTIONAL(regs->msr)) {
|
|
|
|
tm_enable();
|
|
|
|
tm_abort(cause);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool tm_abort_check(struct pt_regs *regs, int reason)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
static int emulate_instruction(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
u32 instword;
|
|
|
|
u32 rd;
|
|
|
|
|
2013-08-06 20:01:47 +04:00
|
|
|
if (!user_mode(regs))
|
2005-09-26 10:04:21 +04:00
|
|
|
return -EINVAL;
|
|
|
|
CHECK_FULL_REGS(regs);
|
|
|
|
|
|
|
|
if (get_user(instword, (u32 __user *)(regs->nip)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* Emulate the mfspr rD, PVR. */
|
2009-02-10 23:10:44 +03:00
|
|
|
if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
|
2009-10-27 21:46:55 +03:00
|
|
|
PPC_WARN_EMULATED(mfpvr, regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
rd = (instword >> 21) & 0x1f;
|
|
|
|
regs->gpr[rd] = mfspr(SPRN_PVR);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulating the dcba insn is just a no-op. */
|
2009-05-18 06:10:05 +04:00
|
|
|
if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
|
2009-10-27 21:46:55 +03:00
|
|
|
PPC_WARN_EMULATED(dcba, regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
return 0;
|
2009-05-18 06:10:05 +04:00
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
/* Emulate the mcrxr insn. */
|
2009-02-10 23:10:44 +03:00
|
|
|
if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
|
2005-10-10 16:37:57 +04:00
|
|
|
int shift = (instword >> 21) & 0x1c;
|
2005-09-26 10:04:21 +04:00
|
|
|
unsigned long msk = 0xf0000000UL >> shift;
|
|
|
|
|
2009-10-27 21:46:55 +03:00
|
|
|
PPC_WARN_EMULATED(mcrxr, regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
|
|
|
|
regs->xer &= ~0xf0000000UL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulate load/store string insn. */
|
2009-05-18 06:10:05 +04:00
|
|
|
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
|
2013-05-26 22:09:39 +04:00
|
|
|
if (tm_abort_check(regs,
|
|
|
|
TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
|
|
|
|
return -EINVAL;
|
2009-10-27 21:46:55 +03:00
|
|
|
PPC_WARN_EMULATED(string, regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
return emulate_string_inst(regs, instword);
|
2009-05-18 06:10:05 +04:00
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2006-08-30 22:11:38 +04:00
|
|
|
/* Emulate the popcntb (Population Count Bytes) instruction. */
|
2009-02-10 23:10:44 +03:00
|
|
|
if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
|
2009-10-27 21:46:55 +03:00
|
|
|
PPC_WARN_EMULATED(popcntb, regs);
|
2006-08-30 22:11:38 +04:00
|
|
|
return emulate_popcntb_inst(regs, instword);
|
|
|
|
}
|
|
|
|
|
2007-11-20 06:35:29 +03:00
|
|
|
/* Emulate isel (Integer Select) instruction */
|
2009-02-10 23:10:44 +03:00
|
|
|
if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
|
2009-10-27 21:46:55 +03:00
|
|
|
PPC_WARN_EMULATED(isel, regs);
|
2007-11-20 06:35:29 +03:00
|
|
|
return emulate_isel(regs, instword);
|
|
|
|
}
|
|
|
|
|
2013-07-04 01:26:47 +04:00
|
|
|
/* Emulate sync instruction variants */
|
|
|
|
if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
|
|
|
|
PPC_WARN_EMULATED(sync, regs);
|
|
|
|
asm volatile("sync");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-02 18:18:48 +03:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/* Emulate the mfspr rD, DSCR. */
|
2013-05-02 00:06:33 +04:00
|
|
|
if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
|
|
|
|
PPC_INST_MFSPR_DSCR_USER) ||
|
|
|
|
((instword & PPC_INST_MFSPR_DSCR_MASK) ==
|
|
|
|
PPC_INST_MFSPR_DSCR)) &&
|
2011-03-02 18:18:48 +03:00
|
|
|
cpu_has_feature(CPU_FTR_DSCR)) {
|
|
|
|
PPC_WARN_EMULATED(mfdscr, regs);
|
|
|
|
rd = (instword >> 21) & 0x1f;
|
|
|
|
regs->gpr[rd] = mfspr(SPRN_DSCR);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Emulate the mtspr DSCR, rD. */
|
2013-05-02 00:06:33 +04:00
|
|
|
if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
|
|
|
|
PPC_INST_MTSPR_DSCR_USER) ||
|
|
|
|
((instword & PPC_INST_MTSPR_DSCR_MASK) ==
|
|
|
|
PPC_INST_MTSPR_DSCR)) &&
|
2011-03-02 18:18:48 +03:00
|
|
|
cpu_has_feature(CPU_FTR_DSCR)) {
|
|
|
|
PPC_WARN_EMULATED(mtdscr, regs);
|
|
|
|
rd = (instword >> 21) & 0x1f;
|
2012-09-03 20:48:46 +04:00
|
|
|
current->thread.dscr = regs->gpr[rd];
|
2011-03-02 18:18:48 +03:00
|
|
|
current->thread.dscr_inherit = 1;
|
2012-09-03 20:48:46 +04:00
|
|
|
mtspr(SPRN_DSCR, current->thread.dscr);
|
2011-03-02 18:18:48 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2006-12-08 14:30:41 +03:00
|
|
|
int is_valid_bugaddr(unsigned long addr)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
2006-12-08 14:30:41 +03:00
|
|
|
return is_kernel_addr(addr);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
2013-07-14 12:40:07 +04:00
|
|
|
#ifdef CONFIG_MATH_EMULATION
|
|
|
|
static int emulate_math(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
extern int do_mathemu(struct pt_regs *regs);
|
|
|
|
|
|
|
|
ret = do_mathemu(regs);
|
|
|
|
if (ret >= 0)
|
|
|
|
PPC_WARN_EMULATED(math, regs);
|
|
|
|
|
|
|
|
switch (ret) {
|
|
|
|
case 0:
|
|
|
|
emulate_single_step(regs);
|
|
|
|
return 0;
|
|
|
|
case 1: {
|
|
|
|
int code = 0;
|
2013-09-10 14:20:42 +04:00
|
|
|
code = __parse_fpscr(current->thread.fp_state.fpscr);
|
2013-07-14 12:40:07 +04:00
|
|
|
_exception(SIGFPE, regs, code, regs->nip);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case -EFAULT:
|
|
|
|
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline int emulate_math(struct pt_regs *regs) { return -1; }
|
|
|
|
#endif
|
|
|
|
|
2016-09-16 13:48:08 +03:00
|
|
|
void program_check_exception(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
2005-09-26 10:04:21 +04:00
|
|
|
unsigned int reason = get_reason(regs);
|
|
|
|
|
2006-12-08 11:43:30 +03:00
|
|
|
/* We can now get here via a FP Unavailable exception if the core
|
2007-02-07 10:13:32 +03:00
|
|
|
* has no FPU, in that case the reason flags will be 0 */
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
if (reason & REASON_FP) {
|
|
|
|
/* IEEE FP exception */
|
|
|
|
parse_fpe(regs);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-10-06 07:27:05 +04:00
|
|
|
}
|
|
|
|
if (reason & REASON_TRAP) {
|
2016-02-18 05:48:01 +03:00
|
|
|
unsigned long bugaddr;
|
2010-05-21 06:04:25 +04:00
|
|
|
/* Debugger is first in line to stop recursive faults in
|
|
|
|
* rcu_lock, notify_die, or atomic_notifier_call_chain */
|
|
|
|
if (debugger_bpt(regs))
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2010-05-21 06:04:25 +04:00
|
|
|
|
2016-11-21 20:06:41 +03:00
|
|
|
if (kprobe_handler(regs))
|
|
|
|
goto bail;
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
/* trap exception */
|
2005-10-01 12:43:42 +04:00
|
|
|
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
|
|
|
|
== NOTIFY_STOP)
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2006-12-08 14:30:41 +03:00
|
|
|
|
2016-02-18 05:48:01 +03:00
|
|
|
bugaddr = regs->nip;
|
|
|
|
/*
|
|
|
|
* Fixup bugaddr for BUG_ON() in real mode
|
|
|
|
*/
|
|
|
|
if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
|
|
|
|
bugaddr += PAGE_OFFSET;
|
|
|
|
|
2006-12-08 14:30:41 +03:00
|
|
|
if (!(regs->msr & MSR_PR) && /* not user-mode */
|
2016-02-18 05:48:01 +03:00
|
|
|
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
|
2005-09-26 10:04:21 +04:00
|
|
|
regs->nip += 4;
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
2005-10-06 07:27:05 +04:00
|
|
|
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-10-06 07:27:05 +04:00
|
|
|
}
|
2013-02-13 20:21:40 +04:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
if (reason & REASON_TM) {
|
|
|
|
/* This is a TM "Bad Thing Exception" program check.
|
|
|
|
* This occurs when:
|
|
|
|
* - An rfid/hrfid/mtmsrd attempts to cause an illegal
|
|
|
|
* transition in TM states.
|
|
|
|
* - A trechkpt is attempted when transactional.
|
|
|
|
* - A treclaim is attempted when non transactional.
|
|
|
|
* - A tend is illegally attempted.
|
|
|
|
* - writing a TM SPR when transactional.
|
|
|
|
*/
|
|
|
|
if (!user_mode(regs) &&
|
|
|
|
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
|
|
|
|
regs->nip += 4;
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2013-02-13 20:21:40 +04:00
|
|
|
}
|
|
|
|
/* If usermode caused this, it's done something illegal and
|
|
|
|
* gets a SIGILL slap on the wrist. We call it an illegal
|
|
|
|
* operand to distinguish from the instruction just being bad
|
|
|
|
* (e.g. executing a 'tend' on a CPU without TM!); it's an
|
|
|
|
* illegal /placement/ of a valid instruction.
|
|
|
|
*/
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2013-02-13 20:21:40 +04:00
|
|
|
} else {
|
|
|
|
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
|
|
|
|
"at %lx (msr 0x%x)\n", regs->nip, reason);
|
|
|
|
die("Unrecoverable exception", regs, SIGABRT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2005-10-06 07:27:05 +04:00
|
|
|
|
2013-08-15 09:22:19 +04:00
|
|
|
/*
|
|
|
|
* If we took the program check in the kernel skip down to sending a
|
|
|
|
* SIGILL. The subsequent cases all relate to emulating instructions
|
|
|
|
* which we should only do for userspace. We also do not want to enable
|
|
|
|
* interrupts for kernel faults because that might lead to further
|
|
|
|
* faults, and loose the context of the original exception.
|
|
|
|
*/
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto sigill;
|
|
|
|
|
2012-05-08 07:38:50 +04:00
|
|
|
/* We restore the interrupt state now */
|
|
|
|
if (!arch_irq_disabled_regs(regs))
|
|
|
|
local_irq_enable();
|
2006-03-03 09:11:40 +03:00
|
|
|
|
2007-02-07 10:13:32 +03:00
|
|
|
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
|
|
|
|
* but there seems to be a hardware bug on the 405GP (RevD)
|
|
|
|
* that means ESR is sometimes set incorrectly - either to
|
|
|
|
* ESR_DST (!?) or 0. In the process of chasing this with the
|
|
|
|
* hardware people - not sure if it can happen on any illegal
|
|
|
|
* instruction or only on FP instructions, whether there is a
|
2013-06-09 11:01:24 +04:00
|
|
|
* pattern to occurrences etc. -dgibson 31/Mar/2003
|
|
|
|
*/
|
2013-07-14 12:40:07 +04:00
|
|
|
if (!emulate_math(regs))
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2007-02-07 10:13:32 +03:00
|
|
|
|
2005-10-06 07:27:05 +04:00
|
|
|
/* Try to emulate it if we should. */
|
|
|
|
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
|
2005-09-26 10:04:21 +04:00
|
|
|
switch (emulate_instruction(regs)) {
|
|
|
|
case 0:
|
|
|
|
regs->nip += 4;
|
|
|
|
emulate_single_step(regs);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
case -EFAULT:
|
|
|
|
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
}
|
2005-10-06 07:27:05 +04:00
|
|
|
|
2013-08-15 09:22:19 +04:00
|
|
|
sigill:
|
2005-10-06 07:27:05 +04:00
|
|
|
if (reason & REASON_PRIVILEGED)
|
|
|
|
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
|
|
|
|
else
|
|
|
|
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
bail:
|
|
|
|
exception_exit(prev_state);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
2016-09-16 13:48:08 +03:00
|
|
|
NOKPROBE_SYMBOL(program_check_exception);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2013-06-14 14:07:41 +04:00
|
|
|
/*
|
|
|
|
* This occurs when running in hypervisor mode on POWER6 or later
|
|
|
|
* and an illegal instruction is encountered.
|
|
|
|
*/
|
2016-09-16 13:48:08 +03:00
|
|
|
void emulation_assist_interrupt(struct pt_regs *regs)
|
2013-06-14 14:07:41 +04:00
|
|
|
{
|
|
|
|
regs->msr |= REASON_ILLEGAL;
|
|
|
|
program_check_exception(regs);
|
|
|
|
}
|
2016-09-16 13:48:08 +03:00
|
|
|
NOKPROBE_SYMBOL(emulation_assist_interrupt);
|
2013-06-14 14:07:41 +04:00
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
void alignment_exception(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
2006-11-01 07:11:39 +03:00
|
|
|
int sig, code, fixed = 0;
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2012-05-08 07:38:50 +04:00
|
|
|
/* We restore the interrupt state now */
|
|
|
|
if (!arch_irq_disabled_regs(regs))
|
|
|
|
local_irq_enable();
|
|
|
|
|
2013-05-26 22:09:39 +04:00
|
|
|
if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
|
|
|
|
goto bail;
|
|
|
|
|
2006-06-07 10:15:39 +04:00
|
|
|
/* we don't implement logging of alignment exceptions */
|
|
|
|
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
|
|
|
|
fixed = fix_alignment(regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
if (fixed == 1) {
|
|
|
|
regs->nip += 4; /* skip over emulated instruction */
|
|
|
|
emulate_single_step(regs);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
/* Operand address was bad */
|
2005-09-26 10:04:21 +04:00
|
|
|
if (fixed == -EFAULT) {
|
2006-11-01 07:11:39 +03:00
|
|
|
sig = SIGSEGV;
|
|
|
|
code = SEGV_ACCERR;
|
|
|
|
} else {
|
|
|
|
sig = SIGBUS;
|
|
|
|
code = BUS_ADRALN;
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
2006-11-01 07:11:39 +03:00
|
|
|
if (user_mode(regs))
|
|
|
|
_exception(sig, regs, code, regs->dar);
|
|
|
|
else
|
|
|
|
bad_page_fault(regs, regs->dar, sig);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
bail:
|
|
|
|
exception_exit(prev_state);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
powerpc/mm: Preserve CFAR value on SLB miss caused by access to bogus address
Currently, if userspace or the kernel accesses a completely bogus address,
for example with any of bits 46-59 set, we first take an SLB miss interrupt,
install a corresponding SLB entry with VSID 0, retry the instruction, then
take a DSI/ISI interrupt because there is no HPT entry mapping the address.
However, by the time of the second interrupt, the Come-From Address Register
(CFAR) has been overwritten by the rfid instruction at the end of the SLB
miss interrupt handler. Since bogus accesses can often be caused by a
function return after the stack has been overwritten, the CFAR value would
be very useful as it could indicate which function it was whose return had
led to the bogus address.
This patch adds code to create a full exception frame in the SLB miss handler
in the case of a bogus address, rather than inserting an SLB entry with a
zero VSID field. Then we call a new slb_miss_bad_addr() function in C code,
which delivers a signal for a user access or creates an oops for a kernel
access. In the latter case the oops message will show the CFAR value at the
time of the access.
In the case of the radix MMU, a segment miss interrupt indicates an access
outside the ranges mapped by the page tables. Previously this was handled
by the code for an unrecoverable SLB miss (one with MSR[RI] = 0), which is
not really correct. With this patch, we now handle these interrupts with
slb_miss_bad_addr(), which is much more consistent.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-09-02 14:49:21 +03:00
|
|
|
void slb_miss_bad_addr(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
enum ctx_state prev_state = exception_enter();
|
|
|
|
|
|
|
|
if (user_mode(regs))
|
|
|
|
_exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
|
|
|
|
else
|
|
|
|
bad_page_fault(regs, regs->dar, SIGSEGV);
|
|
|
|
|
|
|
|
exception_exit(prev_state);
|
|
|
|
}
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
void StackOverflow(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
|
|
|
|
current, regs->gpr[1]);
|
|
|
|
debugger(regs);
|
|
|
|
show_regs(regs);
|
|
|
|
panic("kernel stack overflow");
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonrecoverable_exception(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
|
|
|
|
regs->nip, regs->msr);
|
|
|
|
debugger(regs);
|
|
|
|
die("nonrecoverable exception", regs, SIGKILL);
|
|
|
|
}
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
void kernel_fp_unavailable_exception(struct pt_regs *regs)
|
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
|
|
|
|
"%lx at %lx\n", regs->trap, regs->nip);
|
|
|
|
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
exception_exit(prev_state);
|
2005-10-01 12:43:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void altivec_unavailable_exception(struct pt_regs *regs)
|
|
|
|
{
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
enum ctx_state prev_state = exception_enter();
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
if (user_mode(regs)) {
|
|
|
|
/* A user program has executed an altivec instruction,
|
|
|
|
but this kernel doesn't support altivec. */
|
|
|
|
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
goto bail;
|
2005-10-01 12:43:42 +04:00
|
|
|
}
|
2006-10-13 05:41:00 +04:00
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
|
|
|
|
"%lx at %lx\n", regs->trap, regs->nip);
|
|
|
|
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
|
powerpc: Exception hooks for context tracking subsystem
This is the exception hooks for context tracking subsystem, including
data access, program check, single step, instruction breakpoint, machine check,
alignment, fp unavailable, altivec assist, unknown exception, whose handlers
might use RCU.
This patch corresponds to
[PATCH] x86: Exception hooks for userspace RCU extended QS
commit 6ba3c97a38803883c2eee489505796cb0a727122
But after the exception handling moved to generic code, and some changes in
following two commits:
56dd9470d7c8734f055da2a6bac553caf4a468eb
context_tracking: Move exception handling to generic code
6c1e0256fad84a843d915414e4b5973b7443d48d
context_tracking: Restore correct previous context state on exception exit
it is able for exception hooks to use the generic code above instead of a
redundant arch implementation.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-05-13 20:16:41 +04:00
|
|
|
|
|
|
|
bail:
|
|
|
|
exception_exit(prev_state);
|
2005-10-01 12:43:42 +04:00
|
|
|
}
|
|
|
|
|
2008-06-25 08:07:18 +04:00
|
|
|
void vsx_unavailable_exception(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
/* A user program has executed an vsx instruction,
|
|
|
|
but this kernel doesn't support vsx. */
|
|
|
|
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
|
|
|
|
"%lx at %lx\n", regs->trap, regs->nip);
|
|
|
|
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
|
|
|
|
}
|
|
|
|
|
2013-08-09 11:29:29 +04:00
|
|
|
#ifdef CONFIG_PPC64
|
2016-09-14 11:02:15 +03:00
|
|
|
static void tm_unavailable(struct pt_regs *regs)
|
|
|
|
{
|
2016-09-14 11:02:16 +03:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
current->thread.load_tm++;
|
|
|
|
regs->msr |= MSR_TM;
|
|
|
|
tm_enable();
|
|
|
|
tm_restore_sprs(¤t->thread);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2016-09-14 11:02:15 +03:00
|
|
|
pr_emerg("Unrecoverable TM Unavailable Exception "
|
|
|
|
"%lx at %lx\n", regs->trap, regs->nip);
|
|
|
|
die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
|
|
|
|
}
|
|
|
|
|
2013-06-25 11:47:56 +04:00
|
|
|
void facility_unavailable_exception(struct pt_regs *regs)
|
2013-02-13 20:21:38 +04:00
|
|
|
{
|
2013-06-25 11:47:56 +04:00
|
|
|
static char *facility_strings[] = {
|
2013-08-09 11:29:29 +04:00
|
|
|
[FSCR_FP_LG] = "FPU",
|
|
|
|
[FSCR_VECVSX_LG] = "VMX/VSX",
|
|
|
|
[FSCR_DSCR_LG] = "DSCR",
|
|
|
|
[FSCR_PM_LG] = "PMU SPRs",
|
|
|
|
[FSCR_BHRB_LG] = "BHRB",
|
|
|
|
[FSCR_TM_LG] = "TM",
|
|
|
|
[FSCR_EBB_LG] = "EBB",
|
|
|
|
[FSCR_TAR_LG] = "TAR",
|
2017-04-07 04:27:43 +03:00
|
|
|
[FSCR_MSGP_LG] = "MSGP",
|
2017-04-07 04:27:44 +03:00
|
|
|
[FSCR_SCV_LG] = "SCV",
|
2013-06-25 11:47:56 +04:00
|
|
|
};
|
2013-08-09 11:29:29 +04:00
|
|
|
char *facility = "unknown";
|
2013-06-25 11:47:56 +04:00
|
|
|
u64 value;
|
2015-05-21 09:43:01 +03:00
|
|
|
u32 instword, rd;
|
2013-08-09 11:29:29 +04:00
|
|
|
u8 status;
|
|
|
|
bool hv;
|
2013-06-25 11:47:56 +04:00
|
|
|
|
2013-08-09 11:29:29 +04:00
|
|
|
hv = (regs->trap == 0xf80);
|
|
|
|
if (hv)
|
2013-06-25 11:47:57 +04:00
|
|
|
value = mfspr(SPRN_HFSCR);
|
2013-08-09 11:29:29 +04:00
|
|
|
else
|
|
|
|
value = mfspr(SPRN_FSCR);
|
|
|
|
|
|
|
|
status = value >> 56;
|
|
|
|
if (status == FSCR_DSCR_LG) {
|
2015-05-21 09:43:01 +03:00
|
|
|
/*
|
|
|
|
* User is accessing the DSCR register using the problem
|
|
|
|
* state only SPR number (0x03) either through a mfspr or
|
|
|
|
* a mtspr instruction. If it is a write attempt through
|
|
|
|
* a mtspr, then we set the inherit bit. This also allows
|
|
|
|
* the user to write or read the register directly in the
|
|
|
|
* future by setting via the FSCR DSCR bit. But in case it
|
|
|
|
* is a read DSCR attempt through a mfspr instruction, we
|
|
|
|
* just emulate the instruction instead. This code path will
|
|
|
|
* always emulate all the mfspr instructions till the user
|
2016-02-24 21:51:11 +03:00
|
|
|
* has attempted at least one mtspr instruction. This way it
|
2015-05-21 09:43:01 +03:00
|
|
|
* preserves the same behaviour when the user is accessing
|
|
|
|
* the DSCR through privilege level only SPR number (0x11)
|
|
|
|
* which is emulated through illegal instruction exception.
|
|
|
|
* We always leave HFSCR DSCR set.
|
2013-08-09 11:29:29 +04:00
|
|
|
*/
|
2015-05-21 09:43:01 +03:00
|
|
|
if (get_user(instword, (u32 __user *)(regs->nip))) {
|
|
|
|
pr_err("Failed to fetch the user instruction\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write into DSCR (mtspr 0x03, RS) */
|
|
|
|
if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
|
|
|
|
== PPC_INST_MTSPR_DSCR_USER) {
|
|
|
|
rd = (instword >> 21) & 0x1f;
|
|
|
|
current->thread.dscr = regs->gpr[rd];
|
|
|
|
current->thread.dscr_inherit = 1;
|
2016-06-09 05:31:08 +03:00
|
|
|
current->thread.fscr |= FSCR_DSCR;
|
|
|
|
mtspr(SPRN_FSCR, current->thread.fscr);
|
2015-05-21 09:43:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Read from DSCR (mfspr RT, 0x03) */
|
|
|
|
if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
|
|
|
|
== PPC_INST_MFSPR_DSCR_USER) {
|
|
|
|
if (emulate_instruction(regs)) {
|
|
|
|
pr_err("DSCR based mfspr emulation failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
regs->nip += 4;
|
|
|
|
emulate_single_step(regs);
|
|
|
|
}
|
2013-08-09 11:29:29 +04:00
|
|
|
return;
|
2013-06-25 11:47:57 +04:00
|
|
|
}
|
|
|
|
|
2016-09-14 11:02:15 +03:00
|
|
|
if (status == FSCR_TM_LG) {
|
|
|
|
/*
|
|
|
|
* If we're here then the hardware is TM aware because it
|
|
|
|
* generated an exception with FSRM_TM set.
|
|
|
|
*
|
|
|
|
* If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
|
|
|
|
* told us not to do TM, or the kernel is not built with TM
|
|
|
|
* support.
|
|
|
|
*
|
|
|
|
* If both of those things are true, then userspace can spam the
|
|
|
|
* console by triggering the printk() below just by continually
|
|
|
|
* doing tbegin (or any TM instruction). So in that case just
|
|
|
|
* send the process a SIGILL immediately.
|
|
|
|
*/
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tm_unavailable(regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-11-30 09:45:09 +03:00
|
|
|
if ((hv || status >= 2) &&
|
|
|
|
(status < ARRAY_SIZE(facility_strings)) &&
|
2013-08-09 11:29:29 +04:00
|
|
|
facility_strings[status])
|
|
|
|
facility = facility_strings[status];
|
2013-06-25 11:47:56 +04:00
|
|
|
|
2013-02-13 20:21:38 +04:00
|
|
|
/* We restore the interrupt state now */
|
|
|
|
if (!arch_irq_disabled_regs(regs))
|
|
|
|
local_irq_enable();
|
|
|
|
|
2016-11-30 09:45:09 +03:00
|
|
|
pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
|
|
|
|
hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
|
2013-02-13 20:21:38 +04:00
|
|
|
|
2016-09-14 11:02:15 +03:00
|
|
|
out:
|
2013-02-13 20:21:38 +04:00
|
|
|
if (user_mode(regs)) {
|
|
|
|
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-25 11:47:56 +04:00
|
|
|
die("Unexpected facility unavailable exception", regs, SIGABRT);
|
2013-02-13 20:21:38 +04:00
|
|
|
}
|
2013-08-09 11:29:29 +04:00
|
|
|
#endif
|
2013-02-13 20:21:38 +04:00
|
|
|
|
2013-02-13 20:21:39 +04:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
|
|
|
|
void fp_unavailable_tm(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
/* Note: This does not handle any kind of FP laziness. */
|
|
|
|
|
|
|
|
TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
|
|
|
|
regs->nip, regs->msr);
|
|
|
|
|
|
|
|
/* We can only have got here if the task started using FP after
|
|
|
|
* beginning the transaction. So, the transactional regs are just a
|
|
|
|
* copy of the checkpointed ones. But, we still need to recheckpoint
|
|
|
|
* as we're enabling FP for the process; it will return, abort the
|
|
|
|
* transaction, and probably retry but now with FP enabled. So the
|
|
|
|
* checkpointed FP registers need to be loaded.
|
|
|
|
*/
|
powerpc: Don't corrupt transactional state when using FP/VMX in kernel
Currently, when we have a process using the transactional memory
facilities on POWER8 (that is, the processor is in transactional
or suspended state), and the process enters the kernel and the
kernel then uses the floating-point or vector (VMX/Altivec) facility,
we end up corrupting the user-visible FP/VMX/VSX state. This
happens, for example, if a page fault causes a copy-on-write
operation, because the copy_page function will use VMX to do the
copy on POWER8. The test program below demonstrates the bug.
The bug happens because when FP/VMX state for a transactional process
is stored in the thread_struct, we store the checkpointed state in
.fp_state/.vr_state and the transactional (current) state in
.transact_fp/.transact_vr. However, when the kernel wants to use
FP/VMX, it calls enable_kernel_fp() or enable_kernel_altivec(),
which saves the current state in .fp_state/.vr_state. Furthermore,
when we return to the user process we return with FP/VMX/VSX
disabled. The next time the process uses FP/VMX/VSX, we don't know
which set of state (the current register values, .fp_state/.vr_state,
or .transact_fp/.transact_vr) we should be using, since we have no
way to tell if we are still in the same transaction, and if not,
whether the previous transaction succeeded or failed.
Thus it is necessary to strictly adhere to the rule that if FP has
been enabled at any point in a transaction, we must keep FP enabled
for the user process with the current transactional state in the
FP registers, until we detect that it is no longer in a transaction.
Similarly for VMX; once enabled it must stay enabled until the
process is no longer transactional.
In order to keep this rule, we add a new thread_info flag which we
test when returning from the kernel to userspace, called TIF_RESTORE_TM.
This flag indicates that there is FP/VMX/VSX state to be restored
before entering userspace, and when it is set the .tm_orig_msr field
in the thread_struct indicates what state needs to be restored.
The restoration is done by restore_tm_state(). The TIF_RESTORE_TM
bit is set by new giveup_fpu/altivec_maybe_transactional helpers,
which are called from enable_kernel_fp/altivec, giveup_vsx, and
flush_fp/altivec_to_thread instead of giveup_fpu/altivec.
The other thing to be done is to get the transactional FP/VMX/VSX
state from .fp_state/.vr_state when doing reclaim, if that state
has been saved there by giveup_fpu/altivec_maybe_transactional.
Having done this, we set the FP/VMX bit in the thread's MSR after
reclaim to indicate that that part of the state is now valid
(having been reclaimed from the processor's checkpointed state).
Finally, in the signal handling code, we move the clearing of the
transactional state bits in the thread's MSR a bit earlier, before
calling flush_fp_to_thread(), so that we don't unnecessarily set
the TIF_RESTORE_TM bit.
This is the test program:
/* Michael Neuling 4/12/2013
*
* See if the altivec state is leaked out of an aborted transaction due to
* kernel vmx copy loops.
*
* gcc -m64 htm_vmxcopy.c -o htm_vmxcopy
*
*/
/* We don't use all of these, but for reference: */
int main(int argc, char *argv[])
{
long double vecin = 1.3;
long double vecout;
unsigned long pgsize = getpagesize();
int i;
int fd;
int size = pgsize*16;
char tmpfile[] = "/tmp/page_faultXXXXXX";
char buf[pgsize];
char *a;
uint64_t aborted = 0;
fd = mkstemp(tmpfile);
assert(fd >= 0);
memset(buf, 0, pgsize);
for (i = 0; i < size; i += pgsize)
assert(write(fd, buf, pgsize) == pgsize);
unlink(tmpfile);
a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
assert(a != MAP_FAILED);
asm __volatile__(
"lxvd2x 40,0,%[vecinptr] ; " // set 40 to initial value
TBEGIN
"beq 3f ;"
TSUSPEND
"xxlxor 40,40,40 ; " // set 40 to 0
"std 5, 0(%[map]) ;" // cause kernel vmx copy page
TABORT
TRESUME
TEND
"li %[res], 0 ;"
"b 5f ;"
"3: ;" // Abort handler
"li %[res], 1 ;"
"5: ;"
"stxvd2x 40,0,%[vecoutptr] ; "
: [res]"=r"(aborted)
: [vecinptr]"r"(&vecin),
[vecoutptr]"r"(&vecout),
[map]"r"(a)
: "memory", "r0", "r3", "r4", "r5", "r6", "r7");
if (aborted && (vecin != vecout)){
printf("FAILED: vector state leaked on abort %f != %f\n",
(double)vecin, (double)vecout);
exit(1);
}
munmap(a, size);
close(fd);
printf("PASSED!\n");
return 0;
}
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-01-13 08:56:29 +04:00
|
|
|
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
|
2013-02-13 20:21:39 +04:00
|
|
|
/* Reclaim didn't save out any FPRs to transact_fprs. */
|
|
|
|
|
|
|
|
/* Enable FP for the task: */
|
|
|
|
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
|
|
|
|
|
|
|
|
/* This loads and recheckpoints the FP registers from
|
|
|
|
* thread.fpr[]. They will remain in registers after the
|
|
|
|
* checkpoint so we don't need to reload them after.
|
2014-01-13 08:56:30 +04:00
|
|
|
* If VMX is in use, the VRs now hold checkpointed values,
|
|
|
|
* so we don't want to load the VRs from the thread_struct.
|
2013-02-13 20:21:39 +04:00
|
|
|
*/
|
2014-01-13 08:56:30 +04:00
|
|
|
tm_recheckpoint(¤t->thread, MSR_FP);
|
|
|
|
|
|
|
|
/* If VMX is in use, get the transactional values back */
|
|
|
|
if (regs->msr & MSR_VEC) {
|
2016-09-23 09:18:24 +03:00
|
|
|
msr_check_and_set(MSR_VEC);
|
|
|
|
load_vr_state(¤t->thread.vr_state);
|
2014-01-13 08:56:30 +04:00
|
|
|
/* At this point all the VSX state is loaded, so enable it */
|
|
|
|
regs->msr |= MSR_VSX;
|
|
|
|
}
|
2013-02-13 20:21:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void altivec_unavailable_tm(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
/* See the comments in fp_unavailable_tm(). This function operates
|
|
|
|
* the same way.
|
|
|
|
*/
|
|
|
|
|
|
|
|
TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
|
|
|
|
"MSR=%lx\n",
|
|
|
|
regs->nip, regs->msr);
|
powerpc: Don't corrupt transactional state when using FP/VMX in kernel
Currently, when we have a process using the transactional memory
facilities on POWER8 (that is, the processor is in transactional
or suspended state), and the process enters the kernel and the
kernel then uses the floating-point or vector (VMX/Altivec) facility,
we end up corrupting the user-visible FP/VMX/VSX state. This
happens, for example, if a page fault causes a copy-on-write
operation, because the copy_page function will use VMX to do the
copy on POWER8. The test program below demonstrates the bug.
The bug happens because when FP/VMX state for a transactional process
is stored in the thread_struct, we store the checkpointed state in
.fp_state/.vr_state and the transactional (current) state in
.transact_fp/.transact_vr. However, when the kernel wants to use
FP/VMX, it calls enable_kernel_fp() or enable_kernel_altivec(),
which saves the current state in .fp_state/.vr_state. Furthermore,
when we return to the user process we return with FP/VMX/VSX
disabled. The next time the process uses FP/VMX/VSX, we don't know
which set of state (the current register values, .fp_state/.vr_state,
or .transact_fp/.transact_vr) we should be using, since we have no
way to tell if we are still in the same transaction, and if not,
whether the previous transaction succeeded or failed.
Thus it is necessary to strictly adhere to the rule that if FP has
been enabled at any point in a transaction, we must keep FP enabled
for the user process with the current transactional state in the
FP registers, until we detect that it is no longer in a transaction.
Similarly for VMX; once enabled it must stay enabled until the
process is no longer transactional.
In order to keep this rule, we add a new thread_info flag which we
test when returning from the kernel to userspace, called TIF_RESTORE_TM.
This flag indicates that there is FP/VMX/VSX state to be restored
before entering userspace, and when it is set the .tm_orig_msr field
in the thread_struct indicates what state needs to be restored.
The restoration is done by restore_tm_state(). The TIF_RESTORE_TM
bit is set by new giveup_fpu/altivec_maybe_transactional helpers,
which are called from enable_kernel_fp/altivec, giveup_vsx, and
flush_fp/altivec_to_thread instead of giveup_fpu/altivec.
The other thing to be done is to get the transactional FP/VMX/VSX
state from .fp_state/.vr_state when doing reclaim, if that state
has been saved there by giveup_fpu/altivec_maybe_transactional.
Having done this, we set the FP/VMX bit in the thread's MSR after
reclaim to indicate that that part of the state is now valid
(having been reclaimed from the processor's checkpointed state).
Finally, in the signal handling code, we move the clearing of the
transactional state bits in the thread's MSR a bit earlier, before
calling flush_fp_to_thread(), so that we don't unnecessarily set
the TIF_RESTORE_TM bit.
This is the test program:
/* Michael Neuling 4/12/2013
*
* See if the altivec state is leaked out of an aborted transaction due to
* kernel vmx copy loops.
*
* gcc -m64 htm_vmxcopy.c -o htm_vmxcopy
*
*/
/* We don't use all of these, but for reference: */
int main(int argc, char *argv[])
{
long double vecin = 1.3;
long double vecout;
unsigned long pgsize = getpagesize();
int i;
int fd;
int size = pgsize*16;
char tmpfile[] = "/tmp/page_faultXXXXXX";
char buf[pgsize];
char *a;
uint64_t aborted = 0;
fd = mkstemp(tmpfile);
assert(fd >= 0);
memset(buf, 0, pgsize);
for (i = 0; i < size; i += pgsize)
assert(write(fd, buf, pgsize) == pgsize);
unlink(tmpfile);
a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
assert(a != MAP_FAILED);
asm __volatile__(
"lxvd2x 40,0,%[vecinptr] ; " // set 40 to initial value
TBEGIN
"beq 3f ;"
TSUSPEND
"xxlxor 40,40,40 ; " // set 40 to 0
"std 5, 0(%[map]) ;" // cause kernel vmx copy page
TABORT
TRESUME
TEND
"li %[res], 0 ;"
"b 5f ;"
"3: ;" // Abort handler
"li %[res], 1 ;"
"5: ;"
"stxvd2x 40,0,%[vecoutptr] ; "
: [res]"=r"(aborted)
: [vecinptr]"r"(&vecin),
[vecoutptr]"r"(&vecout),
[map]"r"(a)
: "memory", "r0", "r3", "r4", "r5", "r6", "r7");
if (aborted && (vecin != vecout)){
printf("FAILED: vector state leaked on abort %f != %f\n",
(double)vecin, (double)vecout);
exit(1);
}
munmap(a, size);
close(fd);
printf("PASSED!\n");
return 0;
}
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-01-13 08:56:29 +04:00
|
|
|
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
|
2013-02-13 20:21:39 +04:00
|
|
|
regs->msr |= MSR_VEC;
|
2014-01-13 08:56:30 +04:00
|
|
|
tm_recheckpoint(¤t->thread, MSR_VEC);
|
2013-02-13 20:21:39 +04:00
|
|
|
current->thread.used_vr = 1;
|
2014-01-13 08:56:30 +04:00
|
|
|
|
|
|
|
if (regs->msr & MSR_FP) {
|
2016-09-23 09:18:24 +03:00
|
|
|
msr_check_and_set(MSR_FP);
|
|
|
|
load_fp_state(¤t->thread.fp_state);
|
2014-01-13 08:56:30 +04:00
|
|
|
regs->msr |= MSR_VSX;
|
|
|
|
}
|
2013-02-13 20:21:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void vsx_unavailable_tm(struct pt_regs *regs)
|
|
|
|
{
|
2014-01-13 08:56:30 +04:00
|
|
|
unsigned long orig_msr = regs->msr;
|
|
|
|
|
2013-02-13 20:21:39 +04:00
|
|
|
/* See the comments in fp_unavailable_tm(). This works similarly,
|
|
|
|
* though we're loading both FP and VEC registers in here.
|
|
|
|
*
|
|
|
|
* If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
|
|
|
|
* regs. Either way, set MSR_VSX.
|
|
|
|
*/
|
|
|
|
|
|
|
|
TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
|
|
|
|
"MSR=%lx\n",
|
|
|
|
regs->nip, regs->msr);
|
|
|
|
|
2014-01-13 08:56:30 +04:00
|
|
|
current->thread.used_vsr = 1;
|
|
|
|
|
|
|
|
/* If FP and VMX are already loaded, we have all the state we need */
|
|
|
|
if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
|
|
|
|
regs->msr |= MSR_VSX;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-02-13 20:21:39 +04:00
|
|
|
/* This reclaims FP and/or VR regs if they're already enabled */
|
powerpc: Don't corrupt transactional state when using FP/VMX in kernel
Currently, when we have a process using the transactional memory
facilities on POWER8 (that is, the processor is in transactional
or suspended state), and the process enters the kernel and the
kernel then uses the floating-point or vector (VMX/Altivec) facility,
we end up corrupting the user-visible FP/VMX/VSX state. This
happens, for example, if a page fault causes a copy-on-write
operation, because the copy_page function will use VMX to do the
copy on POWER8. The test program below demonstrates the bug.
The bug happens because when FP/VMX state for a transactional process
is stored in the thread_struct, we store the checkpointed state in
.fp_state/.vr_state and the transactional (current) state in
.transact_fp/.transact_vr. However, when the kernel wants to use
FP/VMX, it calls enable_kernel_fp() or enable_kernel_altivec(),
which saves the current state in .fp_state/.vr_state. Furthermore,
when we return to the user process we return with FP/VMX/VSX
disabled. The next time the process uses FP/VMX/VSX, we don't know
which set of state (the current register values, .fp_state/.vr_state,
or .transact_fp/.transact_vr) we should be using, since we have no
way to tell if we are still in the same transaction, and if not,
whether the previous transaction succeeded or failed.
Thus it is necessary to strictly adhere to the rule that if FP has
been enabled at any point in a transaction, we must keep FP enabled
for the user process with the current transactional state in the
FP registers, until we detect that it is no longer in a transaction.
Similarly for VMX; once enabled it must stay enabled until the
process is no longer transactional.
In order to keep this rule, we add a new thread_info flag which we
test when returning from the kernel to userspace, called TIF_RESTORE_TM.
This flag indicates that there is FP/VMX/VSX state to be restored
before entering userspace, and when it is set the .tm_orig_msr field
in the thread_struct indicates what state needs to be restored.
The restoration is done by restore_tm_state(). The TIF_RESTORE_TM
bit is set by new giveup_fpu/altivec_maybe_transactional helpers,
which are called from enable_kernel_fp/altivec, giveup_vsx, and
flush_fp/altivec_to_thread instead of giveup_fpu/altivec.
The other thing to be done is to get the transactional FP/VMX/VSX
state from .fp_state/.vr_state when doing reclaim, if that state
has been saved there by giveup_fpu/altivec_maybe_transactional.
Having done this, we set the FP/VMX bit in the thread's MSR after
reclaim to indicate that that part of the state is now valid
(having been reclaimed from the processor's checkpointed state).
Finally, in the signal handling code, we move the clearing of the
transactional state bits in the thread's MSR a bit earlier, before
calling flush_fp_to_thread(), so that we don't unnecessarily set
the TIF_RESTORE_TM bit.
This is the test program:
/* Michael Neuling 4/12/2013
*
* See if the altivec state is leaked out of an aborted transaction due to
* kernel vmx copy loops.
*
* gcc -m64 htm_vmxcopy.c -o htm_vmxcopy
*
*/
/* We don't use all of these, but for reference: */
int main(int argc, char *argv[])
{
long double vecin = 1.3;
long double vecout;
unsigned long pgsize = getpagesize();
int i;
int fd;
int size = pgsize*16;
char tmpfile[] = "/tmp/page_faultXXXXXX";
char buf[pgsize];
char *a;
uint64_t aborted = 0;
fd = mkstemp(tmpfile);
assert(fd >= 0);
memset(buf, 0, pgsize);
for (i = 0; i < size; i += pgsize)
assert(write(fd, buf, pgsize) == pgsize);
unlink(tmpfile);
a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
assert(a != MAP_FAILED);
asm __volatile__(
"lxvd2x 40,0,%[vecinptr] ; " // set 40 to initial value
TBEGIN
"beq 3f ;"
TSUSPEND
"xxlxor 40,40,40 ; " // set 40 to 0
"std 5, 0(%[map]) ;" // cause kernel vmx copy page
TABORT
TRESUME
TEND
"li %[res], 0 ;"
"b 5f ;"
"3: ;" // Abort handler
"li %[res], 1 ;"
"5: ;"
"stxvd2x 40,0,%[vecoutptr] ; "
: [res]"=r"(aborted)
: [vecinptr]"r"(&vecin),
[vecoutptr]"r"(&vecout),
[map]"r"(a)
: "memory", "r0", "r3", "r4", "r5", "r6", "r7");
if (aborted && (vecin != vecout)){
printf("FAILED: vector state leaked on abort %f != %f\n",
(double)vecin, (double)vecout);
exit(1);
}
munmap(a, size);
close(fd);
printf("PASSED!\n");
return 0;
}
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-01-13 08:56:29 +04:00
|
|
|
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
|
2013-02-13 20:21:39 +04:00
|
|
|
|
|
|
|
regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
|
|
|
|
MSR_VSX;
|
2014-01-13 08:56:30 +04:00
|
|
|
|
|
|
|
/* This loads & recheckpoints FP and VRs; but we have
|
|
|
|
* to be sure not to overwrite previously-valid state.
|
|
|
|
*/
|
|
|
|
tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr);
|
|
|
|
|
2016-09-23 09:18:24 +03:00
|
|
|
msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC));
|
|
|
|
|
2014-01-13 08:56:30 +04:00
|
|
|
if (orig_msr & MSR_FP)
|
2016-09-23 09:18:24 +03:00
|
|
|
load_fp_state(¤t->thread.fp_state);
|
2014-01-13 08:56:30 +04:00
|
|
|
if (orig_msr & MSR_VEC)
|
2016-09-23 09:18:24 +03:00
|
|
|
load_vr_state(¤t->thread.vr_state);
|
2013-02-13 20:21:39 +04:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
void performance_monitor_exception(struct pt_regs *regs)
|
|
|
|
{
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 00:23:25 +04:00
|
|
|
__this_cpu_inc(irq_stat.pmu_irqs);
|
2010-01-31 23:34:06 +03:00
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
perf_irq(regs);
|
|
|
|
}
|
|
|
|
|
2005-10-06 07:27:05 +04:00
|
|
|
#ifdef CONFIG_8xx
|
2005-09-26 10:04:21 +04:00
|
|
|
void SoftwareEmulation(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
CHECK_FULL_REGS(regs);
|
|
|
|
|
|
|
|
if (!user_mode(regs)) {
|
|
|
|
debugger(regs);
|
2013-08-28 18:19:17 +04:00
|
|
|
die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
|
|
|
|
regs, SIGFPE);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
2013-07-14 12:40:07 +04:00
|
|
|
if (!emulate_math(regs))
|
2007-02-07 10:47:59 +03:00
|
|
|
return;
|
2013-07-14 12:40:07 +04:00
|
|
|
|
2007-09-19 00:29:35 +04:00
|
|
|
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
2005-10-06 07:27:05 +04:00
|
|
|
#endif /* CONFIG_8xx */
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2010-02-08 14:50:57 +03:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2010-02-08 14:51:18 +03:00
|
|
|
static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
|
|
|
|
{
|
|
|
|
int changed = 0;
|
|
|
|
/*
|
|
|
|
* Determine the cause of the debug event, clear the
|
|
|
|
* event flags and send a trap to the handler. Torez
|
|
|
|
*/
|
|
|
|
if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
|
|
|
|
dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
|
2010-02-08 14:51:18 +03:00
|
|
|
#endif
|
|
|
|
do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
|
|
|
|
5);
|
|
|
|
changed |= 0x01;
|
|
|
|
} else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
|
|
|
|
dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
|
|
|
|
do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
|
|
|
|
6);
|
|
|
|
changed |= 0x01;
|
|
|
|
} else if (debug_status & DBSR_IAC1) {
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
|
2010-02-08 14:51:18 +03:00
|
|
|
dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
|
|
|
|
do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
|
|
|
|
1);
|
|
|
|
changed |= 0x01;
|
|
|
|
} else if (debug_status & DBSR_IAC2) {
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
|
2010-02-08 14:51:18 +03:00
|
|
|
do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
|
|
|
|
2);
|
|
|
|
changed |= 0x01;
|
|
|
|
} else if (debug_status & DBSR_IAC3) {
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
|
2010-02-08 14:51:18 +03:00
|
|
|
dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
|
|
|
|
do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
|
|
|
|
3);
|
|
|
|
changed |= 0x01;
|
|
|
|
} else if (debug_status & DBSR_IAC4) {
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
|
2010-02-08 14:51:18 +03:00
|
|
|
do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
|
|
|
|
4);
|
|
|
|
changed |= 0x01;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* At the point this routine was called, the MSR(DE) was turned off.
|
|
|
|
* Check all other debug flags and see if that bit needs to be turned
|
|
|
|
* back on or not.
|
|
|
|
*/
|
2013-07-04 10:15:46 +04:00
|
|
|
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
|
2013-06-26 09:42:22 +04:00
|
|
|
current->thread.debug.dbcr1))
|
2010-02-08 14:51:18 +03:00
|
|
|
regs->msr |= MSR_DE;
|
|
|
|
else
|
|
|
|
/* Make sure the IDM flag is off */
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
2010-02-08 14:51:18 +03:00
|
|
|
|
|
|
|
if (changed & 0x01)
|
2013-07-04 10:15:46 +04:00
|
|
|
mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
|
2010-02-08 14:51:18 +03:00
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2016-09-16 13:48:08 +03:00
|
|
|
void DebugException(struct pt_regs *regs, unsigned long debug_status)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbsr = debug_status;
|
2010-02-08 14:51:18 +03:00
|
|
|
|
2009-05-29 01:26:38 +04:00
|
|
|
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
|
|
|
|
* on server, it stops on the target of the branch. In order to simulate
|
|
|
|
* the server behaviour, we thus restart right away with a single step
|
|
|
|
* instead of stopping here when hitting a BT
|
|
|
|
*/
|
|
|
|
if (debug_status & DBSR_BT) {
|
|
|
|
regs->msr &= ~MSR_DE;
|
|
|
|
|
|
|
|
/* Disable BT */
|
|
|
|
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
|
|
|
|
/* Clear the BT event */
|
|
|
|
mtspr(SPRN_DBSR, DBSR_BT);
|
|
|
|
|
|
|
|
/* Do the single step trick only when coming from userspace */
|
|
|
|
if (user_mode(regs)) {
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_BT;
|
|
|
|
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
2009-05-29 01:26:38 +04:00
|
|
|
regs->msr |= MSR_DE;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-11-21 20:06:41 +03:00
|
|
|
if (kprobe_post_handler(regs))
|
|
|
|
return;
|
|
|
|
|
2009-05-29 01:26:38 +04:00
|
|
|
if (notify_die(DIE_SSTEP, "block_step", regs, 5,
|
|
|
|
5, SIGTRAP) == NOTIFY_STOP) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (debugger_sstep(regs))
|
|
|
|
return;
|
|
|
|
} else if (debug_status & DBSR_IC) { /* Instruction complete */
|
2005-09-26 10:04:21 +04:00
|
|
|
regs->msr &= ~MSR_DE;
|
2008-06-26 11:01:37 +04:00
|
|
|
|
|
|
|
/* Disable instruction completion */
|
|
|
|
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
|
|
|
|
/* Clear the instruction completion event */
|
|
|
|
mtspr(SPRN_DBSR, DBSR_IC);
|
|
|
|
|
2016-11-21 20:06:41 +03:00
|
|
|
if (kprobe_post_handler(regs))
|
|
|
|
return;
|
|
|
|
|
2008-06-26 11:01:37 +04:00
|
|
|
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
|
|
|
|
5, SIGTRAP) == NOTIFY_STOP) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (debugger_sstep(regs))
|
|
|
|
return;
|
|
|
|
|
2008-07-23 20:10:41 +04:00
|
|
|
if (user_mode(regs)) {
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_IC;
|
|
|
|
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
|
|
|
|
current->thread.debug.dbcr1))
|
2010-02-08 14:51:18 +03:00
|
|
|
regs->msr |= MSR_DE;
|
|
|
|
else
|
|
|
|
/* Make sure the IDM bit is off */
|
2013-07-04 10:15:46 +04:00
|
|
|
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
2008-07-23 20:10:41 +04:00
|
|
|
}
|
2010-02-08 14:51:18 +03:00
|
|
|
|
|
|
|
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
|
|
|
} else
|
|
|
|
handle_debug(regs, debug_status);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
2016-09-16 13:48:08 +03:00
|
|
|
NOKPROBE_SYMBOL(DebugException);
|
2010-02-08 14:50:57 +03:00
|
|
|
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
#if !defined(CONFIG_TAU_INT)
|
|
|
|
void TAUException(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
|
|
|
|
regs->nip, regs->msr, regs->trap, print_tainted());
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_INT_TAU */
|
|
|
|
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
2005-10-01 12:43:42 +04:00
|
|
|
void altivec_assist_exception(struct pt_regs *regs)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!user_mode(regs)) {
|
|
|
|
printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
|
|
|
|
" at %lx\n", regs->nip);
|
2005-10-06 07:27:05 +04:00
|
|
|
die("Kernel VMX/Altivec assist exception", regs, SIGILL);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
flush_altivec_to_thread(current);
|
|
|
|
|
2009-10-27 21:46:55 +03:00
|
|
|
PPC_WARN_EMULATED(altivec, regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
err = emulate_altivec(regs);
|
|
|
|
if (err == 0) {
|
|
|
|
regs->nip += 4; /* skip emulated instruction */
|
|
|
|
emulate_single_step(regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == -EFAULT) {
|
|
|
|
/* got an error reading the instruction */
|
|
|
|
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
|
|
|
|
} else {
|
|
|
|
/* didn't recognize the instruction */
|
|
|
|
/* XXX quick hack for now: set the non-Java bit in the VSCR */
|
2011-06-04 09:36:54 +04:00
|
|
|
printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
|
|
|
|
"in %s at %lx\n", current->comm, regs->nip);
|
2013-09-10 14:20:42 +04:00
|
|
|
current->thread.vr_state.vscr.u[3] |= 0x10000;
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
|
|
|
#ifdef CONFIG_FSL_BOOKE
|
|
|
|
void CacheLockingException(struct pt_regs *regs, unsigned long address,
|
|
|
|
unsigned long error_code)
|
|
|
|
{
|
|
|
|
/* We treat cache locking instructions from the user
|
|
|
|
* as priv ops, in the future we could try to do
|
|
|
|
* something smarter
|
|
|
|
*/
|
|
|
|
if (error_code & (ESR_DLK|ESR_ILK))
|
|
|
|
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_FSL_BOOKE */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
void SPEFloatingPointException(struct pt_regs *regs)
|
|
|
|
{
|
2008-10-28 06:50:21 +03:00
|
|
|
extern int do_spe_mathemu(struct pt_regs *regs);
|
2005-09-26 10:04:21 +04:00
|
|
|
unsigned long spefscr;
|
|
|
|
int fpexc_mode;
|
|
|
|
int code = 0;
|
2008-10-28 06:50:21 +03:00
|
|
|
int err;
|
|
|
|
|
2011-06-15 03:34:25 +04:00
|
|
|
flush_spe_to_thread(current);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
spefscr = current->thread.spefscr;
|
|
|
|
fpexc_mode = current->thread.fpexc_mode;
|
|
|
|
|
|
|
|
if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
|
|
|
|
code = FPE_FLTOVF;
|
|
|
|
}
|
|
|
|
else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
|
|
|
|
code = FPE_FLTUND;
|
|
|
|
}
|
|
|
|
else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
|
|
|
|
code = FPE_FLTDIV;
|
|
|
|
else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
|
|
|
|
code = FPE_FLTINV;
|
|
|
|
}
|
|
|
|
else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
|
|
|
|
code = FPE_FLTRES;
|
|
|
|
|
2008-10-28 06:50:21 +03:00
|
|
|
err = do_spe_mathemu(regs);
|
|
|
|
if (err == 0) {
|
|
|
|
regs->nip += 4; /* skip emulated instruction */
|
|
|
|
emulate_single_step(regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == -EFAULT) {
|
|
|
|
/* got an error reading the instruction */
|
|
|
|
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
|
|
|
|
} else if (err == -EINVAL) {
|
|
|
|
/* didn't recognize the instruction */
|
|
|
|
printk(KERN_ERR "unrecognized spe instruction "
|
|
|
|
"in %s at %lx\n", current->comm, regs->nip);
|
|
|
|
} else {
|
|
|
|
_exception(SIGFPE, regs, code, regs->nip);
|
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2008-10-28 06:50:21 +03:00
|
|
|
|
|
|
|
void SPEFloatingPointRoundException(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
extern int speround_handler(struct pt_regs *regs);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
if (regs->msr & MSR_SPE)
|
|
|
|
giveup_spe(current);
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
regs->nip -= 4;
|
|
|
|
err = speround_handler(regs);
|
|
|
|
if (err == 0) {
|
|
|
|
regs->nip += 4; /* skip emulated instruction */
|
|
|
|
emulate_single_step(regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == -EFAULT) {
|
|
|
|
/* got an error reading the instruction */
|
|
|
|
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
|
|
|
|
} else if (err == -EINVAL) {
|
|
|
|
/* didn't recognize the instruction */
|
|
|
|
printk(KERN_ERR "unrecognized spe instruction "
|
|
|
|
"in %s at %lx\n", current->comm, regs->nip);
|
|
|
|
} else {
|
|
|
|
_exception(SIGFPE, regs, 0, regs->nip);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2005-09-26 10:04:21 +04:00
|
|
|
#endif
|
|
|
|
|
2005-10-01 12:43:42 +04:00
|
|
|
/*
|
|
|
|
* We enter here if we get an unrecoverable exception, that is, one
|
|
|
|
* that happened at a point where the RI (recoverable interrupt) bit
|
|
|
|
* in the MSR is 0. This indicates that SRR0/1 are live, and that
|
|
|
|
* we therefore lost state by taking this exception.
|
|
|
|
*/
|
|
|
|
void unrecoverable_exception(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
|
|
|
|
regs->trap, regs->nip);
|
|
|
|
die("Unrecoverable exception", regs, SIGABRT);
|
|
|
|
}
|
2017-06-29 20:49:19 +03:00
|
|
|
NOKPROBE_SYMBOL(unrecoverable_exception);
|
2005-10-01 12:43:42 +04:00
|
|
|
|
2012-10-05 12:07:15 +04:00
|
|
|
#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
|
2005-09-26 10:04:21 +04:00
|
|
|
/*
|
|
|
|
* Default handler for a Watchdog exception,
|
|
|
|
* spins until a reboot occurs
|
|
|
|
*/
|
|
|
|
void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
/* Generic WatchdogHandler, implement your own */
|
|
|
|
mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void WatchdogException(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
|
|
|
|
WatchdogHandler(regs);
|
|
|
|
}
|
|
|
|
#endif
|
2005-10-01 12:43:42 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We enter here if we discover during exception entry that we are
|
|
|
|
* running in supervisor mode with a userspace value in the stack pointer.
|
|
|
|
*/
|
|
|
|
void kernel_bad_stack(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
|
|
|
|
regs->gpr[1], regs->nip);
|
|
|
|
die("Bad kernel stack pointer", regs, SIGABRT);
|
|
|
|
}
|
2017-06-29 20:49:19 +03:00
|
|
|
NOKPROBE_SYMBOL(kernel_bad_stack);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
void __init trap_init(void)
|
|
|
|
{
|
|
|
|
}
|
2009-05-18 06:10:05 +04:00
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_EMULATED_STATS
|
|
|
|
|
|
|
|
#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
|
|
|
|
|
|
|
|
struct ppc_emulated ppc_emulated = {
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
WARN_EMULATED_SETUP(altivec),
|
|
|
|
#endif
|
|
|
|
WARN_EMULATED_SETUP(dcba),
|
|
|
|
WARN_EMULATED_SETUP(dcbz),
|
|
|
|
WARN_EMULATED_SETUP(fp_pair),
|
|
|
|
WARN_EMULATED_SETUP(isel),
|
|
|
|
WARN_EMULATED_SETUP(mcrxr),
|
|
|
|
WARN_EMULATED_SETUP(mfpvr),
|
|
|
|
WARN_EMULATED_SETUP(multiple),
|
|
|
|
WARN_EMULATED_SETUP(popcntb),
|
|
|
|
WARN_EMULATED_SETUP(spe),
|
|
|
|
WARN_EMULATED_SETUP(string),
|
2013-10-29 07:07:59 +04:00
|
|
|
WARN_EMULATED_SETUP(sync),
|
2009-05-18 06:10:05 +04:00
|
|
|
WARN_EMULATED_SETUP(unaligned),
|
|
|
|
#ifdef CONFIG_MATH_EMULATION
|
|
|
|
WARN_EMULATED_SETUP(math),
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
WARN_EMULATED_SETUP(vsx),
|
|
|
|
#endif
|
2011-03-02 18:18:48 +03:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
WARN_EMULATED_SETUP(mfdscr),
|
|
|
|
WARN_EMULATED_SETUP(mtdscr),
|
2014-03-28 10:01:23 +04:00
|
|
|
WARN_EMULATED_SETUP(lq_stq),
|
2011-03-02 18:18:48 +03:00
|
|
|
#endif
|
2009-05-18 06:10:05 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
u32 ppc_warn_emulated;
|
|
|
|
|
|
|
|
void ppc_warn_emulated_print(const char *type)
|
|
|
|
{
|
2011-06-04 09:36:54 +04:00
|
|
|
pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
|
|
|
|
type);
|
2009-05-18 06:10:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init ppc_warn_emulated_init(void)
|
|
|
|
{
|
|
|
|
struct dentry *dir, *d;
|
|
|
|
unsigned int i;
|
|
|
|
struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
|
|
|
|
|
|
|
|
if (!powerpc_debugfs_root)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
dir = debugfs_create_dir("emulated_instructions",
|
|
|
|
powerpc_debugfs_root);
|
|
|
|
if (!dir)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
|
|
|
|
&ppc_warn_emulated);
|
|
|
|
if (!d)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
|
|
|
|
d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
|
|
|
|
(u32 *)&entries[i].val.counter);
|
|
|
|
if (!d)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
debugfs_remove_recursive(dir);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
device_initcall(ppc_warn_emulated_init);
|
|
|
|
|
|
|
|
#endif /* CONFIG_PPC_EMULATED_STATS */
|