Merge commit 'paulus-perf/master' into next
This commit is contained in:
Коммит
5f07aa7524
|
@ -141,6 +141,7 @@ config PPC
|
|||
select GENERIC_ATOMIC64 if PPC32
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
|
||||
|
||||
config EARLY_PRINTK
|
||||
bool
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#define PPC_STLCX stringify_in_c(stdcx.)
|
||||
#define PPC_CNTLZL stringify_in_c(cntlzd)
|
||||
#define PPC_LR_STKOFF 16
|
||||
#define PPC_MIN_STKFRM 112
|
||||
|
||||
/* Move to CR, single-entry optimized version. Only available
|
||||
* on POWER4 and later.
|
||||
|
@ -55,6 +56,7 @@
|
|||
#define PPC_CNTLZL stringify_in_c(cntlzw)
|
||||
#define PPC_MTOCRF stringify_in_c(mtcrf)
|
||||
#define PPC_LR_STKOFF 4
|
||||
#define PPC_MIN_STKFRM 16
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -517,6 +517,10 @@ static inline int cpu_has_feature(unsigned long feature)
|
|||
& feature);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
#define HBP_NUM 1
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* PowerPC BookIII S hardware breakpoint definitions
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright 2010, IBM Corporation.
|
||||
* Author: K.Prasad <prasad@linux.vnet.ibm.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H
|
||||
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
|
||||
struct arch_hw_breakpoint {
|
||||
bool extraneous_interrupt;
|
||||
u8 len; /* length of the target data symbol */
|
||||
int type;
|
||||
unsigned long address;
|
||||
};
|
||||
|
||||
#include <linux/kdebug.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
struct perf_event;
|
||||
struct pmu;
|
||||
struct perf_sample_data;
|
||||
|
||||
#define HW_BREAKPOINT_ALIGN 0x7
|
||||
/* Maximum permissible length of any HW Breakpoint */
|
||||
#define HW_BREAKPOINT_LEN 0x8
|
||||
|
||||
extern int hw_breakpoint_slots(int type);
|
||||
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
|
||||
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
|
||||
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
|
||||
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
|
||||
unsigned long val, void *data);
|
||||
int arch_install_hw_breakpoint(struct perf_event *bp);
|
||||
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
|
||||
void hw_breakpoint_pmu_read(struct perf_event *bp);
|
||||
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
|
||||
|
||||
extern struct pmu perf_ops_bp;
|
||||
extern void ptrace_triggered(struct perf_event *bp, int nmi,
|
||||
struct perf_sample_data *data, struct pt_regs *regs);
|
||||
static inline void hw_breakpoint_disable(void)
|
||||
{
|
||||
set_dabr(0);
|
||||
}
|
||||
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
|
||||
|
||||
#else /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
static inline void hw_breakpoint_disable(void) { }
|
||||
static inline void thread_change_pc(struct task_struct *tsk,
|
||||
struct pt_regs *regs) { }
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
|
|
@ -52,13 +52,17 @@
|
|||
#define PPC_INST_WAIT 0x7c00007c
|
||||
#define PPC_INST_TLBIVAX 0x7c000624
|
||||
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
|
||||
#define PPC_INST_XXLOR 0xf0000510
|
||||
|
||||
/* macros to insert fields into opcodes */
|
||||
#define __PPC_RA(a) (((a) & 0x1f) << 16)
|
||||
#define __PPC_RB(b) (((b) & 0x1f) << 11)
|
||||
#define __PPC_RS(s) (((s) & 0x1f) << 21)
|
||||
#define __PPC_RT(s) __PPC_RS(s)
|
||||
#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
|
||||
#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
|
||||
#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
|
||||
#define __PPC_XT(s) __PPC_XS(s)
|
||||
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
|
||||
#define __PPC_WC(w) (((w) & 0x3) << 21)
|
||||
/*
|
||||
|
@ -106,9 +110,12 @@
|
|||
* the 128 bit load store instructions based on that.
|
||||
*/
|
||||
#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
|
||||
#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
|
||||
#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \
|
||||
VSX_XX1((s), (a), (b)))
|
||||
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
|
||||
VSX_XX1((s), (a), (b)))
|
||||
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
|
||||
VSX_XX3((t), (a), (b)))
|
||||
|
||||
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
|
||||
|
|
|
@ -209,6 +209,14 @@ struct thread_struct {
|
|||
#ifdef CONFIG_PPC64
|
||||
unsigned long start_tb; /* Start purr when proc switched in */
|
||||
unsigned long accum_tb; /* Total accumilated purr for process */
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
struct perf_event *ptrace_bps[HBP_NUM];
|
||||
/*
|
||||
* Helps identify source of single-step exception and subsequent
|
||||
* hw-breakpoint enablement
|
||||
*/
|
||||
struct perf_event *last_hit_ubp;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
#endif
|
||||
unsigned long dabr; /* Data address breakpoint register */
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
|
|
|
@ -34,6 +34,7 @@ obj-y += vdso32/
|
|||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
paca.o nvram_64.o firmware.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
|
||||
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
|
||||
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o
|
||||
|
|
|
@ -828,6 +828,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
|
|||
|
||||
/* We have a data breakpoint exception - handle it */
|
||||
handle_dabr_fault:
|
||||
bl .save_nvgprs
|
||||
ld r4,_DAR(r1)
|
||||
ld r5,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
|
|
|
@ -0,0 +1,364 @@
|
|||
/*
|
||||
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
|
||||
* using the CPU's debug registers. Derived from
|
||||
* "arch/x86/kernel/hw_breakpoint.c"
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright 2010 IBM Corporation
|
||||
* Author: K.Prasad <prasad@linux.vnet.ibm.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sstep.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* Stores the breakpoints currently in use on each breakpoint address
|
||||
* register for every cpu
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
|
||||
|
||||
/*
|
||||
* Returns total number of data or instruction breakpoints available.
|
||||
*/
|
||||
int hw_breakpoint_slots(int type)
|
||||
{
|
||||
if (type == TYPE_DATA)
|
||||
return HBP_NUM;
|
||||
return 0; /* no instruction breakpoints available */
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a perf counter breakpoint.
|
||||
*
|
||||
* We seek a free debug address register and use it for this
|
||||
* breakpoint.
|
||||
*
|
||||
* Atomic: we hold the counter->ctx->lock and we only handle variables
|
||||
* and registers local to this cpu.
|
||||
*/
|
||||
int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
struct perf_event **slot = &__get_cpu_var(bp_per_reg);
|
||||
|
||||
*slot = bp;
|
||||
|
||||
/*
|
||||
* Do not install DABR values if the instruction must be single-stepped.
|
||||
* If so, DABR will be populated in single_step_dabr_instruction().
|
||||
*/
|
||||
if (current->thread.last_hit_ubp != bp)
|
||||
set_dabr(info->address | info->type | DABR_TRANSLATION);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Uninstall the breakpoint contained in the given counter.
|
||||
*
|
||||
* First we search the debug address register it uses and then we disable
|
||||
* it.
|
||||
*
|
||||
* Atomic: we hold the counter->ctx->lock and we only handle variables
|
||||
* and registers local to this cpu.
|
||||
*/
|
||||
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
struct perf_event **slot = &__get_cpu_var(bp_per_reg);
|
||||
|
||||
if (*slot != bp) {
|
||||
WARN_ONCE(1, "Can't find the breakpoint");
|
||||
return;
|
||||
}
|
||||
|
||||
*slot = NULL;
|
||||
set_dabr(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform cleanup of arch-specific counters during unregistration
|
||||
* of the perf-event
|
||||
*/
|
||||
void arch_unregister_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
/*
|
||||
* If the breakpoint is unregistered between a hw_breakpoint_handler()
|
||||
* and the single_step_dabr_instruction(), then cleanup the breakpoint
|
||||
* restoration variables to prevent dangling pointers.
|
||||
*/
|
||||
if (bp->ctx->task)
|
||||
bp->ctx->task->thread.last_hit_ubp = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for virtual address in kernel space.
|
||||
*/
|
||||
int arch_check_bp_in_kernelspace(struct perf_event *bp)
|
||||
{
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
return is_kernel_addr(info->address);
|
||||
}
|
||||
|
||||
int arch_bp_generic_fields(int type, int *gen_bp_type)
|
||||
{
|
||||
switch (type) {
|
||||
case DABR_DATA_READ:
|
||||
*gen_bp_type = HW_BREAKPOINT_R;
|
||||
break;
|
||||
case DABR_DATA_WRITE:
|
||||
*gen_bp_type = HW_BREAKPOINT_W;
|
||||
break;
|
||||
case (DABR_DATA_WRITE | DABR_DATA_READ):
|
||||
*gen_bp_type = (HW_BREAKPOINT_W | HW_BREAKPOINT_R);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate the arch-specific HW Breakpoint register settings
|
||||
*/
|
||||
int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
if (!bp)
|
||||
return ret;
|
||||
|
||||
switch (bp->attr.bp_type) {
|
||||
case HW_BREAKPOINT_R:
|
||||
info->type = DABR_DATA_READ;
|
||||
break;
|
||||
case HW_BREAKPOINT_W:
|
||||
info->type = DABR_DATA_WRITE;
|
||||
break;
|
||||
case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
|
||||
info->type = (DABR_DATA_READ | DABR_DATA_WRITE);
|
||||
break;
|
||||
default:
|
||||
return ret;
|
||||
}
|
||||
|
||||
info->address = bp->attr.bp_addr;
|
||||
info->len = bp->attr.bp_len;
|
||||
|
||||
/*
|
||||
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
|
||||
* and breakpoint addresses are aligned to nearest double-word
|
||||
* HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
|
||||
* 'symbolsize' should satisfy the check below.
|
||||
*/
|
||||
if (info->len >
|
||||
(HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN)))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restores the breakpoint on the debug registers.
|
||||
* Invoke this function if it is known that the execution context is
|
||||
* about to change to cause loss of MSR_SE settings.
|
||||
*/
|
||||
void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
|
||||
{
|
||||
struct arch_hw_breakpoint *info;
|
||||
|
||||
if (likely(!tsk->thread.last_hit_ubp))
|
||||
return;
|
||||
|
||||
info = counter_arch_bp(tsk->thread.last_hit_ubp);
|
||||
regs->msr &= ~MSR_SE;
|
||||
set_dabr(info->address | info->type | DABR_TRANSLATION);
|
||||
tsk->thread.last_hit_ubp = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle debug exception notifications.
|
||||
*/
|
||||
int __kprobes hw_breakpoint_handler(struct die_args *args)
|
||||
{
|
||||
int rc = NOTIFY_STOP;
|
||||
struct perf_event *bp;
|
||||
struct pt_regs *regs = args->regs;
|
||||
int stepped = 1;
|
||||
struct arch_hw_breakpoint *info;
|
||||
unsigned int instr;
|
||||
unsigned long dar = regs->dar;
|
||||
|
||||
/* Disable breakpoints during exception handling */
|
||||
set_dabr(0);
|
||||
|
||||
/*
|
||||
* The counter may be concurrently released but that can only
|
||||
* occur from a call_rcu() path. We can then safely fetch
|
||||
* the breakpoint, use its callback, touch its counter
|
||||
* while we are in an rcu_read_lock() path.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
bp = __get_cpu_var(bp_per_reg);
|
||||
if (!bp)
|
||||
goto out;
|
||||
info = counter_arch_bp(bp);
|
||||
|
||||
/*
|
||||
* Return early after invoking user-callback function without restoring
|
||||
* DABR if the breakpoint is from ptrace which always operates in
|
||||
* one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
|
||||
* generated in do_dabr().
|
||||
*/
|
||||
if (bp->overflow_handler == ptrace_triggered) {
|
||||
perf_bp_event(bp, regs);
|
||||
rc = NOTIFY_DONE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify if dar lies within the address range occupied by the symbol
|
||||
* being watched to filter extraneous exceptions. If it doesn't,
|
||||
* we still need to single-step the instruction, but we don't
|
||||
* generate an event.
|
||||
*/
|
||||
info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
|
||||
(dar - bp->attr.bp_addr < bp->attr.bp_len));
|
||||
|
||||
/* Do not emulate user-space instructions, instead single-step them */
|
||||
if (user_mode(regs)) {
|
||||
bp->ctx->task->thread.last_hit_ubp = bp;
|
||||
regs->msr |= MSR_SE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
stepped = 0;
|
||||
instr = 0;
|
||||
if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
|
||||
stepped = emulate_step(regs, instr);
|
||||
|
||||
/*
|
||||
* emulate_step() could not execute it. We've failed in reliably
|
||||
* handling the hw-breakpoint. Unregister it and throw a warning
|
||||
* message to let the user know about it.
|
||||
*/
|
||||
if (!stepped) {
|
||||
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
|
||||
"0x%lx will be disabled.", info->address);
|
||||
perf_event_disable(bp);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* As a policy, the callback is invoked in a 'trigger-after-execute'
|
||||
* fashion
|
||||
*/
|
||||
if (!info->extraneous_interrupt)
|
||||
perf_bp_event(bp, regs);
|
||||
|
||||
set_dabr(info->address | info->type | DABR_TRANSLATION);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle single-step exceptions following a DABR hit.
|
||||
*/
|
||||
int __kprobes single_step_dabr_instruction(struct die_args *args)
|
||||
{
|
||||
struct pt_regs *regs = args->regs;
|
||||
struct perf_event *bp = NULL;
|
||||
struct arch_hw_breakpoint *bp_info;
|
||||
|
||||
bp = current->thread.last_hit_ubp;
|
||||
/*
|
||||
* Check if we are single-stepping as a result of a
|
||||
* previous HW Breakpoint exception
|
||||
*/
|
||||
if (!bp)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
bp_info = counter_arch_bp(bp);
|
||||
|
||||
/*
|
||||
* We shall invoke the user-defined callback function in the single
|
||||
* stepping handler to confirm to 'trigger-after-execute' semantics
|
||||
*/
|
||||
if (!bp_info->extraneous_interrupt)
|
||||
perf_bp_event(bp, regs);
|
||||
|
||||
set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
|
||||
current->thread.last_hit_ubp = NULL;
|
||||
|
||||
/*
|
||||
* If the process was being single-stepped by ptrace, let the
|
||||
* other single-step actions occur (e.g. generate SIGTRAP).
|
||||
*/
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle debug exception notifications.
|
||||
*/
|
||||
int __kprobes hw_breakpoint_exceptions_notify(
|
||||
struct notifier_block *unused, unsigned long val, void *data)
|
||||
{
|
||||
int ret = NOTIFY_DONE;
|
||||
|
||||
switch (val) {
|
||||
case DIE_DABR_MATCH:
|
||||
ret = hw_breakpoint_handler(data);
|
||||
break;
|
||||
case DIE_SSTEP:
|
||||
ret = single_step_dabr_instruction(data);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the user breakpoints used by ptrace
|
||||
*/
|
||||
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_struct *t = &tsk->thread;
|
||||
|
||||
unregister_hw_breakpoint(t->ptrace_bps[0]);
|
||||
t->ptrace_bps[0] = NULL;
|
||||
}
|
||||
|
||||
void hw_breakpoint_pmu_read(struct perf_event *bp)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
|
@ -25,6 +25,7 @@
|
|||
#include <asm/sections.h> /* _end */
|
||||
#include <asm/prom.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
|
||||
int default_machine_kexec_prepare(struct kimage *image)
|
||||
{
|
||||
|
@ -165,6 +166,7 @@ static void kexec_smp_down(void *arg)
|
|||
while(kexec_all_irq_disabled == 0)
|
||||
cpu_relax();
|
||||
mb(); /* make sure all irqs are disabled before this */
|
||||
hw_breakpoint_disable();
|
||||
/*
|
||||
* Now every CPU has IRQs off, we can clear out any pending
|
||||
* IPIs and be sure that no more will come in after this.
|
||||
|
@ -180,6 +182,7 @@ static void kexec_prepare_cpus_wait(int wait_state)
|
|||
{
|
||||
int my_cpu, i, notified=-1;
|
||||
|
||||
hw_breakpoint_disable();
|
||||
my_cpu = get_cpu();
|
||||
/* Make sure each CPU has atleast made it to the state we need */
|
||||
for_each_online_cpu(i) {
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/kernel_stat.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@ -462,8 +463,14 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
switch_booke_debug_regs(&new->thread);
|
||||
#else
|
||||
/*
|
||||
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
|
||||
* schedule DABR
|
||||
*/
|
||||
#ifndef CONFIG_HAVE_HW_BREAKPOINT
|
||||
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
|
||||
set_dabr(new->thread.dabr);
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -642,7 +649,11 @@ void flush_thread(void)
|
|||
{
|
||||
discard_lazy_cpu_state();
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINTS
|
||||
flush_ptrace_hw_breakpoint(current);
|
||||
#else /* CONFIG_HAVE_HW_BREAKPOINTS */
|
||||
set_debug_reg_defaults(¤t->thread);
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -660,6 +671,9 @@ void prepare_to_copy(struct task_struct *tsk)
|
|||
flush_altivec_to_thread(current);
|
||||
flush_vsx_to_thread(current);
|
||||
flush_spe_to_thread(current);
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
flush_ptrace_hw_breakpoint(tsk);
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#ifdef CONFIG_PPC32
|
||||
#include <linux/module.h>
|
||||
#endif
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -866,9 +868,34 @@ void user_disable_single_step(struct task_struct *task)
|
|||
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
void ptrace_triggered(struct perf_event *bp, int nmi,
|
||||
struct perf_sample_data *data, struct pt_regs *regs)
|
||||
{
|
||||
struct perf_event_attr attr;
|
||||
|
||||
/*
|
||||
* Disable the breakpoint request here since ptrace has defined a
|
||||
* one-shot behaviour for breakpoint exceptions in PPC64.
|
||||
* The SIGTRAP signal is generated automatically for us in do_dabr().
|
||||
* We don't have to do anything about that here
|
||||
*/
|
||||
attr = bp->attr;
|
||||
attr.disabled = true;
|
||||
modify_user_hw_breakpoint(bp, &attr);
|
||||
}
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
||||
unsigned long data)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
int ret;
|
||||
struct thread_struct *thread = &(task->thread);
|
||||
struct perf_event *bp;
|
||||
struct perf_event_attr attr;
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
|
||||
* For embedded processors we support one DAC and no IAC's at the
|
||||
* moment.
|
||||
|
@ -896,6 +923,43 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
|||
/* Ensure breakpoint translation bit is set */
|
||||
if (data && !(data & DABR_TRANSLATION))
|
||||
return -EIO;
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
bp = thread->ptrace_bps[0];
|
||||
if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
|
||||
if (bp) {
|
||||
unregister_hw_breakpoint(bp);
|
||||
thread->ptrace_bps[0] = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (bp) {
|
||||
attr = bp->attr;
|
||||
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
|
||||
arch_bp_generic_fields(data &
|
||||
(DABR_DATA_WRITE | DABR_DATA_READ),
|
||||
&attr.bp_type);
|
||||
ret = modify_user_hw_breakpoint(bp, &attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
thread->ptrace_bps[0] = bp;
|
||||
thread->dabr = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create a new breakpoint request if one doesn't exist already */
|
||||
hw_breakpoint_init(&attr);
|
||||
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
|
||||
arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
|
||||
&attr.bp_type);
|
||||
|
||||
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
|
||||
ptrace_triggered, task);
|
||||
if (IS_ERR(bp)) {
|
||||
thread->ptrace_bps[0] = NULL;
|
||||
return PTR_ERR(bp);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
/* Move contents to the DABR register */
|
||||
task->thread.dabr = data;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/signal.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
|
@ -149,6 +150,8 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
|
|||
if (current->thread.dabr)
|
||||
set_dabr(current->thread.dabr);
|
||||
#endif
|
||||
/* Re-enable the breakpoints for the signal stack */
|
||||
thread_change_pc(current, regs);
|
||||
|
||||
if (is32) {
|
||||
if (ka.sa.sa_flags & SA_SIGINFO)
|
||||
|
|
|
@ -688,7 +688,7 @@ void RunModeException(struct pt_regs *regs)
|
|||
|
||||
void __kprobes single_step_exception(struct pt_regs *regs)
|
||||
{
|
||||
regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
|
||||
clear_single_step(regs);
|
||||
|
||||
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
|
||||
5, SIGTRAP) == NOTIFY_STOP)
|
||||
|
@ -707,10 +707,8 @@ void __kprobes single_step_exception(struct pt_regs *regs)
|
|||
*/
|
||||
static void emulate_single_step(struct pt_regs *regs)
|
||||
{
|
||||
if (single_stepping(regs)) {
|
||||
clear_single_step(regs);
|
||||
_exception(SIGTRAP, regs, TRAP_TRACE, 0);
|
||||
}
|
||||
if (single_stepping(regs))
|
||||
single_step_exception(regs);
|
||||
}
|
||||
|
||||
static inline int __parse_fpscr(unsigned long fpscr)
|
||||
|
|
|
@ -18,8 +18,9 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
|
|||
|
||||
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
|
||||
memcpy_64.o usercopy_64.o mem_64.o string.o
|
||||
obj-$(CONFIG_XMON) += sstep.o
|
||||
obj-$(CONFIG_KPROBES) += sstep.o
|
||||
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
|
||||
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
|
||||
|
||||
ifeq ($(CONFIG_PPC64),y)
|
||||
obj-$(CONFIG_SMP) += locks.o
|
||||
|
|
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
* Floating-point, VMX/Altivec and VSX loads and stores
|
||||
* for use in instruction emulation.
|
||||
*
|
||||
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#define STKFRM (PPC_MIN_STKFRM + 16)
|
||||
|
||||
.macro extab instr,handler
|
||||
.section __ex_table,"a"
|
||||
PPC_LONG \instr,\handler
|
||||
.previous
|
||||
.endm
|
||||
|
||||
.macro inst32 op
|
||||
reg = 0
|
||||
.rept 32
|
||||
20: \op reg,0,r4
|
||||
b 3f
|
||||
extab 20b,99f
|
||||
reg = reg + 1
|
||||
.endr
|
||||
.endm
|
||||
|
||||
/* Get the contents of frN into fr0; N is in r3. */
|
||||
_GLOBAL(get_fpr)
|
||||
mflr r0
|
||||
rlwinm r3,r3,3,0xf8
|
||||
bcl 20,31,1f
|
||||
blr /* fr0 is already in fr0 */
|
||||
nop
|
||||
reg = 1
|
||||
.rept 31
|
||||
fmr fr0,reg
|
||||
blr
|
||||
reg = reg + 1
|
||||
.endr
|
||||
1: mflr r5
|
||||
add r5,r3,r5
|
||||
mtctr r5
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
/* Put the contents of fr0 into frN; N is in r3. */
|
||||
_GLOBAL(put_fpr)
|
||||
mflr r0
|
||||
rlwinm r3,r3,3,0xf8
|
||||
bcl 20,31,1f
|
||||
blr /* fr0 is already in fr0 */
|
||||
nop
|
||||
reg = 1
|
||||
.rept 31
|
||||
fmr reg,fr0
|
||||
blr
|
||||
reg = reg + 1
|
||||
.endr
|
||||
1: mflr r5
|
||||
add r5,r3,r5
|
||||
mtctr r5
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
/* Load FP reg N from float at *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_lfs)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
ori r7,r6,MSR_FP
|
||||
cmpwi cr7,r3,0
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
stfd fr0,STKFRM-16(r1)
|
||||
1: li r9,-EFAULT
|
||||
2: lfs fr0,0(r4)
|
||||
li r9,0
|
||||
3: bl put_fpr
|
||||
beq cr7,4f
|
||||
lfd fr0,STKFRM-16(r1)
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
|
||||
/* Load FP reg N from double at *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_lfd)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
ori r7,r6,MSR_FP
|
||||
cmpwi cr7,r3,0
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
stfd fr0,STKFRM-16(r1)
|
||||
1: li r9,-EFAULT
|
||||
2: lfd fr0,0(r4)
|
||||
li r9,0
|
||||
3: beq cr7,4f
|
||||
bl put_fpr
|
||||
lfd fr0,STKFRM-16(r1)
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
|
||||
/* Store FP reg N to float at *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_stfs)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
ori r7,r6,MSR_FP
|
||||
cmpwi cr7,r3,0
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
stfd fr0,STKFRM-16(r1)
|
||||
bl get_fpr
|
||||
1: li r9,-EFAULT
|
||||
2: stfs fr0,0(r4)
|
||||
li r9,0
|
||||
3: beq cr7,4f
|
||||
lfd fr0,STKFRM-16(r1)
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
|
||||
/* Store FP reg N to double at *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_stfd)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
ori r7,r6,MSR_FP
|
||||
cmpwi cr7,r3,0
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
stfd fr0,STKFRM-16(r1)
|
||||
bl get_fpr
|
||||
1: li r9,-EFAULT
|
||||
2: stfd fr0,0(r4)
|
||||
li r9,0
|
||||
3: beq cr7,4f
|
||||
lfd fr0,STKFRM-16(r1)
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* Get the contents of vrN into vr0; N is in r3. */
|
||||
_GLOBAL(get_vr)
|
||||
mflr r0
|
||||
rlwinm r3,r3,3,0xf8
|
||||
bcl 20,31,1f
|
||||
blr /* vr0 is already in vr0 */
|
||||
nop
|
||||
reg = 1
|
||||
.rept 31
|
||||
vor vr0,reg,reg /* assembler doesn't know vmr? */
|
||||
blr
|
||||
reg = reg + 1
|
||||
.endr
|
||||
1: mflr r5
|
||||
add r5,r3,r5
|
||||
mtctr r5
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
/* Put the contents of vr0 into vrN; N is in r3. */
|
||||
_GLOBAL(put_vr)
|
||||
mflr r0
|
||||
rlwinm r3,r3,3,0xf8
|
||||
bcl 20,31,1f
|
||||
blr /* vr0 is already in vr0 */
|
||||
nop
|
||||
reg = 1
|
||||
.rept 31
|
||||
vor reg,vr0,vr0
|
||||
blr
|
||||
reg = reg + 1
|
||||
.endr
|
||||
1: mflr r5
|
||||
add r5,r3,r5
|
||||
mtctr r5
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
/* Load vector reg N from *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_lvx)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
oris r7,r6,MSR_VEC@h
|
||||
cmpwi cr7,r3,0
|
||||
li r8,STKFRM-16
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
stvx vr0,r1,r8
|
||||
1: li r9,-EFAULT
|
||||
2: lvx vr0,0,r4
|
||||
li r9,0
|
||||
3: beq cr7,4f
|
||||
bl put_vr
|
||||
lvx vr0,r1,r8
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
|
||||
/* Store vector reg N to *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_stvx)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
oris r7,r6,MSR_VEC@h
|
||||
cmpwi cr7,r3,0
|
||||
li r8,STKFRM-16
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
stvx vr0,r1,r8
|
||||
bl get_vr
|
||||
1: li r9,-EFAULT
|
||||
2: stvx vr0,0,r4
|
||||
li r9,0
|
||||
3: beq cr7,4f
|
||||
lvx vr0,r1,r8
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
/* Get the contents of vsrN into vsr0; N is in r3. */
|
||||
_GLOBAL(get_vsr)
|
||||
mflr r0
|
||||
rlwinm r3,r3,3,0x1f8
|
||||
bcl 20,31,1f
|
||||
blr /* vsr0 is already in vsr0 */
|
||||
nop
|
||||
reg = 1
|
||||
.rept 63
|
||||
XXLOR(0,reg,reg)
|
||||
blr
|
||||
reg = reg + 1
|
||||
.endr
|
||||
1: mflr r5
|
||||
add r5,r3,r5
|
||||
mtctr r5
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
/* Put the contents of vsr0 into vsrN; N is in r3. */
|
||||
_GLOBAL(put_vsr)
|
||||
mflr r0
|
||||
rlwinm r3,r3,3,0x1f8
|
||||
bcl 20,31,1f
|
||||
blr /* vr0 is already in vr0 */
|
||||
nop
|
||||
reg = 1
|
||||
.rept 63
|
||||
XXLOR(reg,0,0)
|
||||
blr
|
||||
reg = reg + 1
|
||||
.endr
|
||||
1: mflr r5
|
||||
add r5,r3,r5
|
||||
mtctr r5
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
/* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_lxvd2x)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
oris r7,r6,MSR_VSX@h
|
||||
cmpwi cr7,r3,0
|
||||
li r8,STKFRM-16
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
STXVD2X(0,r1,r8)
|
||||
1: li r9,-EFAULT
|
||||
2: LXVD2X(0,0,r4)
|
||||
li r9,0
|
||||
3: beq cr7,4f
|
||||
bl put_vsr
|
||||
LXVD2X(0,r1,r8)
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
|
||||
/* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
|
||||
_GLOBAL(do_stxvd2x)
|
||||
PPC_STLU r1,-STKFRM(r1)
|
||||
mflr r0
|
||||
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mfmsr r6
|
||||
oris r7,r6,MSR_VSX@h
|
||||
cmpwi cr7,r3,0
|
||||
li r8,STKFRM-16
|
||||
mtmsrd r7
|
||||
isync
|
||||
beq cr7,1f
|
||||
STXVD2X(0,r1,r8)
|
||||
bl get_vsr
|
||||
1: li r9,-EFAULT
|
||||
2: STXVD2X(0,0,r4)
|
||||
li r9,0
|
||||
3: beq cr7,4f
|
||||
LXVD2X(0,r1,r8)
|
||||
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
mtmsrd r6
|
||||
isync
|
||||
mr r3,r9
|
||||
addi r1,r1,STKFRM
|
||||
blr
|
||||
extab 2b,3b
|
||||
|
||||
#endif /* CONFIG_VSX */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -241,6 +241,17 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
|
|||
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function to perform processor-specific cleanup during unregistration
|
||||
*/
|
||||
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
/*
|
||||
* A weak stub function here for those archs that don't define
|
||||
* it inside arch/.../kernel/hw_breakpoint.c
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Contraints to check before allowing this new breakpoint counter:
|
||||
*
|
||||
|
@ -339,6 +350,7 @@ void release_bp_slot(struct perf_event *bp)
|
|||
{
|
||||
mutex_lock(&nr_bp_mutex);
|
||||
|
||||
arch_unregister_hw_breakpoint(bp);
|
||||
__release_bp_slot(bp);
|
||||
|
||||
mutex_unlock(&nr_bp_mutex);
|
||||
|
|
Загрузка…
Ссылка в новой задаче