2019-06-03 08:44:50 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 15:49:33 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_DEBUG_MONITORS_H
|
|
|
|
#define __ASM_DEBUG_MONITORS_H
|
|
|
|
|
2015-07-24 18:37:47 +03:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/types.h>
|
2016-02-23 10:56:45 +03:00
|
|
|
#include <asm/brk-imm.h>
|
2015-07-24 18:37:43 +03:00
|
|
|
#include <asm/esr.h>
|
2015-07-24 18:37:41 +03:00
|
|
|
#include <asm/insn.h>
|
2015-07-24 18:37:47 +03:00
|
|
|
#include <asm/ptrace.h>
|
2015-07-24 18:37:41 +03:00
|
|
|
|
2014-05-07 15:13:14 +04:00
|
|
|
/* Low-level stepping controls. */
|
|
|
|
#define DBG_MDSCR_SS (1 << 0)
|
|
|
|
#define DBG_SPSR_SS (1 << 21)
|
|
|
|
|
|
|
|
/* MDSCR_EL1 enabling bits */
|
|
|
|
#define DBG_MDSCR_KDE (1 << 13)
|
|
|
|
#define DBG_MDSCR_MDE (1 << 15)
|
|
|
|
#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
|
|
|
|
|
2012-03-05 15:49:33 +04:00
|
|
|
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
|
|
|
|
|
|
|
|
/* AArch64 */
|
|
|
|
#define DBG_ESR_EVT_HWBP 0x0
|
|
|
|
#define DBG_ESR_EVT_HWSS 0x1
|
|
|
|
#define DBG_ESR_EVT_HWWP 0x2
|
|
|
|
#define DBG_ESR_EVT_BRK 0x6
|
|
|
|
|
2014-01-28 15:20:18 +04:00
|
|
|
/*
|
|
|
|
* Break point instruction encoding
|
|
|
|
*/
|
2015-07-24 18:37:41 +03:00
|
|
|
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
|
2014-01-28 15:20:18 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* BRK instruction encoding
|
|
|
|
* The #imm16 value should be placed at bits[20:5] within BRK ins
|
|
|
|
*/
|
|
|
|
#define AARCH64_BREAK_MON 0xd4200000
|
|
|
|
|
2014-09-16 20:42:33 +04:00
|
|
|
/*
|
|
|
|
* BRK instruction for provoking a fault on purpose
|
|
|
|
* Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
|
|
|
|
*/
|
|
|
|
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
|
|
|
|
|
2015-07-24 18:37:46 +03:00
|
|
|
#define AARCH64_BREAK_KGDB_DYN_DBG \
|
|
|
|
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
|
2014-01-28 15:20:18 +04:00
|
|
|
|
|
|
|
#define CACHE_FLUSH_IS_SAFE 1
|
|
|
|
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 19:35:48 +03:00
|
|
|
/* kprobes BRK opcodes with ESR encoding */
|
2019-02-26 18:06:42 +03:00
|
|
|
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
|
2016-11-02 12:10:46 +03:00
|
|
|
/* uprobes BRK opcodes with ESR encoding */
|
2019-02-26 18:06:42 +03:00
|
|
|
#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 19:35:48 +03:00
|
|
|
|
2012-03-05 15:49:33 +04:00
|
|
|
/* AArch32 */
|
|
|
|
#define DBG_ESR_EVT_BKPT 0x4
|
|
|
|
#define DBG_ESR_EVT_VECC 0x5
|
|
|
|
|
|
|
|
#define AARCH32_BREAK_ARM 0x07f001f0
|
|
|
|
#define AARCH32_BREAK_THUMB 0xde01
|
|
|
|
#define AARCH32_BREAK_THUMB2_LO 0xf7f0
|
|
|
|
#define AARCH32_BREAK_THUMB2_HI 0xa000
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
struct task_struct;
|
|
|
|
|
|
|
|
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
|
|
|
|
|
2013-12-04 09:50:20 +04:00
|
|
|
#define DBG_HOOK_HANDLED 0
|
|
|
|
#define DBG_HOOK_ERROR 1
|
|
|
|
|
|
|
|
struct step_hook {
|
|
|
|
struct list_head node;
|
|
|
|
int (*fn)(struct pt_regs *regs, unsigned int esr);
|
|
|
|
};
|
|
|
|
|
2019-02-26 15:52:47 +03:00
|
|
|
void register_user_step_hook(struct step_hook *hook);
|
|
|
|
void unregister_user_step_hook(struct step_hook *hook);
|
|
|
|
|
|
|
|
void register_kernel_step_hook(struct step_hook *hook);
|
|
|
|
void unregister_kernel_step_hook(struct step_hook *hook);
|
2013-12-04 09:50:20 +04:00
|
|
|
|
|
|
|
struct break_hook {
|
|
|
|
struct list_head node;
|
|
|
|
int (*fn)(struct pt_regs *regs, unsigned int esr);
|
2019-02-26 15:52:47 +03:00
|
|
|
u16 imm;
|
|
|
|
u16 mask; /* These bits are ignored when comparing with imm */
|
2013-12-04 09:50:20 +04:00
|
|
|
};
|
|
|
|
|
2019-02-26 15:52:47 +03:00
|
|
|
void register_user_break_hook(struct break_hook *hook);
|
|
|
|
void unregister_user_break_hook(struct break_hook *hook);
|
|
|
|
|
|
|
|
void register_kernel_break_hook(struct break_hook *hook);
|
|
|
|
void unregister_kernel_break_hook(struct break_hook *hook);
|
2013-12-04 09:50:20 +04:00
|
|
|
|
2012-03-05 15:49:33 +04:00
|
|
|
u8 debug_monitors_arch(void);
|
|
|
|
|
2015-07-27 20:36:54 +03:00
|
|
|
enum dbg_active_el {
|
2014-05-07 15:13:14 +04:00
|
|
|
DBG_ACTIVE_EL0 = 0,
|
|
|
|
DBG_ACTIVE_EL1,
|
|
|
|
};
|
|
|
|
|
2015-07-27 20:36:54 +03:00
|
|
|
void enable_debug_monitors(enum dbg_active_el el);
|
|
|
|
void disable_debug_monitors(enum dbg_active_el el);
|
2012-03-05 15:49:33 +04:00
|
|
|
|
|
|
|
void user_rewind_single_step(struct task_struct *task);
|
|
|
|
void user_fastforward_single_step(struct task_struct *task);
|
|
|
|
|
|
|
|
void kernel_enable_single_step(struct pt_regs *regs);
|
|
|
|
void kernel_disable_single_step(void);
|
|
|
|
int kernel_active_single_step(void);
|
|
|
|
|
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
int reinstall_suspended_bps(struct pt_regs *regs);
|
|
|
|
#else
|
|
|
|
static inline int reinstall_suspended_bps(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-16 12:48:13 +04:00
|
|
|
int aarch32_break_handler(struct pt_regs *regs);
|
|
|
|
|
2020-05-14 02:06:37 +03:00
|
|
|
void debug_traps_init(void);
|
|
|
|
|
2012-03-05 15:49:33 +04:00
|
|
|
#endif /* __ASSEMBLY */
|
|
|
|
#endif /* __ASM_DEBUG_MONITORS_H */
|