2019-05-19 15:08:55 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 02:20:36 +04:00
|
|
|
/* ptrace.c: Sparc process tracing support.
|
|
|
|
*
|
2008-02-07 10:02:08 +03:00
|
|
|
* Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
|
2005-04-17 02:20:36 +04:00
|
|
|
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
|
|
|
*
|
|
|
|
* Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
|
|
|
|
* and David Mosberger.
|
|
|
|
*
|
|
|
|
* Added Linux support -miguel (weird, eh?, the original code was meant
|
|
|
|
* to emulate SunOS).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-08 20:51:37 +03:00
|
|
|
#include <linux/sched/task_stack.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/errno.h>
|
2013-09-05 13:17:33 +04:00
|
|
|
#include <linux/export.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/user.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/security.h>
|
2005-07-11 06:29:45 +04:00
|
|
|
#include <linux/seccomp.h>
|
|
|
|
#include <linux/audit.h>
|
2005-05-01 19:59:14 +04:00
|
|
|
#include <linux/signal.h>
|
2008-02-07 10:02:08 +03:00
|
|
|
#include <linux/regset.h>
|
2009-12-11 11:44:47 +03:00
|
|
|
#include <trace/syscall.h>
|
2008-02-07 10:02:08 +03:00
|
|
|
#include <linux/compat.h>
|
|
|
|
#include <linux/elf.h>
|
2013-09-14 16:02:11 +04:00
|
|
|
#include <linux/context_tracking.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <asm/asi.h>
|
2016-12-24 22:46:01 +03:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <asm/psrcompat.h>
|
|
|
|
#include <asm/visasm.h>
|
|
|
|
#include <asm/spitfire.h>
|
2005-09-20 07:11:57 +04:00
|
|
|
#include <asm/page.h>
|
2005-09-30 05:50:34 +04:00
|
|
|
#include <asm/cpudata.h>
|
2008-03-26 10:46:21 +03:00
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
2009-12-11 11:44:47 +03:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/syscalls.h>
|
|
|
|
|
2008-03-26 10:46:21 +03:00
|
|
|
#include "entry.h"
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* #define ALLOW_INIT_TRACING */
|
|
|
|
|
2016-10-13 07:36:13 +03:00
|
|
|
struct pt_regs_offset {
|
|
|
|
const char *name;
|
|
|
|
int offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define REG_OFFSET_NAME(n, r) \
|
|
|
|
{.name = n, .offset = (PT_V9_##r)}
|
|
|
|
#define REG_OFFSET_END {.name = NULL, .offset = 0}
|
|
|
|
|
|
|
|
static const struct pt_regs_offset regoffset_table[] = {
|
|
|
|
REG_OFFSET_NAME("g0", G0),
|
|
|
|
REG_OFFSET_NAME("g1", G1),
|
|
|
|
REG_OFFSET_NAME("g2", G2),
|
|
|
|
REG_OFFSET_NAME("g3", G3),
|
|
|
|
REG_OFFSET_NAME("g4", G4),
|
|
|
|
REG_OFFSET_NAME("g5", G5),
|
|
|
|
REG_OFFSET_NAME("g6", G6),
|
|
|
|
REG_OFFSET_NAME("g7", G7),
|
|
|
|
|
|
|
|
REG_OFFSET_NAME("i0", I0),
|
|
|
|
REG_OFFSET_NAME("i1", I1),
|
|
|
|
REG_OFFSET_NAME("i2", I2),
|
|
|
|
REG_OFFSET_NAME("i3", I3),
|
|
|
|
REG_OFFSET_NAME("i4", I4),
|
|
|
|
REG_OFFSET_NAME("i5", I5),
|
|
|
|
REG_OFFSET_NAME("i6", I6),
|
|
|
|
REG_OFFSET_NAME("i7", I7),
|
|
|
|
|
|
|
|
REG_OFFSET_NAME("tstate", TSTATE),
|
|
|
|
REG_OFFSET_NAME("pc", TPC),
|
|
|
|
REG_OFFSET_NAME("npc", TNPC),
|
|
|
|
REG_OFFSET_NAME("y", Y),
|
|
|
|
REG_OFFSET_NAME("lr", I7),
|
|
|
|
|
|
|
|
REG_OFFSET_END,
|
|
|
|
};
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Called by kernel/ptrace.c when detaching..
|
|
|
|
*
|
|
|
|
* Make sure single step bits etc are not set.
|
|
|
|
*/
|
|
|
|
void ptrace_disable(struct task_struct *child)
|
|
|
|
{
|
|
|
|
/* nothing to do */
|
|
|
|
}
|
|
|
|
|
2005-04-18 05:03:11 +04:00
|
|
|
/* To get the necessary page struct, access_process_vm() first calls
|
|
|
|
* get_user_pages(). This has done a flush_dcache_page() on the
|
|
|
|
* accessed page. Then our caller (copy_{to,from}_user_page()) did
|
|
|
|
* to memcpy to read/write the data from that page.
|
|
|
|
*
|
|
|
|
* Now, the only thing we have to do is:
|
|
|
|
* 1) flush the D-cache if it's possible than an illegal alias
|
|
|
|
* has been created
|
|
|
|
* 2) flush the I-cache if this is pre-cheetah and we did a write
|
|
|
|
*/
|
|
|
|
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
|
|
|
unsigned long uaddr, void *kaddr,
|
|
|
|
unsigned long len, int write)
|
|
|
|
{
|
|
|
|
BUG_ON(len > PAGE_SIZE);
|
|
|
|
|
2006-02-18 02:07:43 +03:00
|
|
|
if (tlb_type == hypervisor)
|
|
|
|
return;
|
|
|
|
|
2008-03-26 14:51:12 +03:00
|
|
|
preempt_disable();
|
|
|
|
|
2005-04-18 05:03:11 +04:00
|
|
|
#ifdef DCACHE_ALIASING_POSSIBLE
|
|
|
|
/* If bit 13 of the kernel address we used to access the
|
|
|
|
* user page is the same as the virtual address that page
|
|
|
|
* is mapped to in the user's address space, we can skip the
|
|
|
|
* D-cache flush.
|
|
|
|
*/
|
2005-09-20 07:11:57 +04:00
|
|
|
if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
|
2005-04-18 05:03:11 +04:00
|
|
|
unsigned long start = __pa(kaddr);
|
|
|
|
unsigned long end = start + len;
|
2005-09-30 05:50:34 +04:00
|
|
|
unsigned long dcache_line_size;
|
|
|
|
|
|
|
|
dcache_line_size = local_cpu_data().dcache_line_size;
|
2005-04-18 05:03:11 +04:00
|
|
|
|
|
|
|
if (tlb_type == spitfire) {
|
2005-09-30 05:50:34 +04:00
|
|
|
for (; start < end; start += dcache_line_size)
|
2005-09-20 07:11:57 +04:00
|
|
|
spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
|
2005-04-18 05:03:11 +04:00
|
|
|
} else {
|
2005-09-30 05:50:34 +04:00
|
|
|
start &= ~(dcache_line_size - 1);
|
|
|
|
for (; start < end; start += dcache_line_size)
|
2005-04-18 05:03:11 +04:00
|
|
|
__asm__ __volatile__(
|
|
|
|
"stxa %%g0, [%0] %1\n\t"
|
|
|
|
"membar #Sync"
|
|
|
|
: /* no outputs */
|
2005-09-20 07:11:57 +04:00
|
|
|
: "r" (start),
|
2005-04-18 05:03:11 +04:00
|
|
|
"i" (ASI_DCACHE_INVALIDATE));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (write && tlb_type == spitfire) {
|
|
|
|
unsigned long start = (unsigned long) kaddr;
|
|
|
|
unsigned long end = start + len;
|
2005-09-30 05:50:34 +04:00
|
|
|
unsigned long icache_line_size;
|
|
|
|
|
|
|
|
icache_line_size = local_cpu_data().icache_line_size;
|
2005-04-18 05:03:11 +04:00
|
|
|
|
2005-09-30 05:50:34 +04:00
|
|
|
for (; start < end; start += icache_line_size)
|
2005-04-18 05:03:11 +04:00
|
|
|
flushi(start);
|
|
|
|
}
|
2008-03-26 14:51:12 +03:00
|
|
|
|
|
|
|
preempt_enable();
|
2005-04-18 05:03:11 +04:00
|
|
|
}
|
2013-09-05 13:17:33 +04:00
|
|
|
EXPORT_SYMBOL_GPL(flush_ptrace_access);
|
2005-04-18 05:03:11 +04:00
|
|
|
|
2008-04-10 06:39:25 +04:00
|
|
|
static int get_from_target(struct task_struct *target, unsigned long uaddr,
|
|
|
|
void *kbuf, int len)
|
|
|
|
{
|
|
|
|
if (target == current) {
|
|
|
|
if (copy_from_user(kbuf, (void __user *) uaddr, len))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
2016-10-13 03:20:20 +03:00
|
|
|
int len2 = access_process_vm(target, uaddr, kbuf, len,
|
|
|
|
FOLL_FORCE);
|
2008-04-10 06:39:25 +04:00
|
|
|
if (len2 != len)
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_to_target(struct task_struct *target, unsigned long uaddr,
|
|
|
|
void *kbuf, int len)
|
|
|
|
{
|
|
|
|
if (target == current) {
|
|
|
|
if (copy_to_user((void __user *) uaddr, kbuf, len))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
2016-10-13 03:20:20 +03:00
|
|
|
int len2 = access_process_vm(target, uaddr, kbuf, len,
|
|
|
|
FOLL_FORCE | FOLL_WRITE);
|
2008-04-10 06:39:25 +04:00
|
|
|
if (len2 != len)
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int regwindow64_get(struct task_struct *target,
|
|
|
|
const struct pt_regs *regs,
|
|
|
|
struct reg_window *wbuf)
|
|
|
|
{
|
|
|
|
unsigned long rw_addr = regs->u_regs[UREG_I6];
|
|
|
|
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 02:18:37 +04:00
|
|
|
if (!test_thread_64bit_stack(rw_addr)) {
|
2008-04-10 06:39:25 +04:00
|
|
|
struct reg_window32 win32;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (get_from_target(target, rw_addr, &win32, sizeof(win32)))
|
|
|
|
return -EFAULT;
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
wbuf->locals[i] = win32.locals[i];
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
wbuf->ins[i] = win32.ins[i];
|
|
|
|
} else {
|
|
|
|
rw_addr += STACK_BIAS;
|
|
|
|
if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int regwindow64_set(struct task_struct *target,
|
|
|
|
const struct pt_regs *regs,
|
|
|
|
struct reg_window *wbuf)
|
|
|
|
{
|
|
|
|
unsigned long rw_addr = regs->u_regs[UREG_I6];
|
|
|
|
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 02:18:37 +04:00
|
|
|
if (!test_thread_64bit_stack(rw_addr)) {
|
2008-04-10 06:39:25 +04:00
|
|
|
struct reg_window32 win32;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
win32.locals[i] = wbuf->locals[i];
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
win32.ins[i] = wbuf->ins[i];
|
|
|
|
|
|
|
|
if (set_to_target(target, rw_addr, &win32, sizeof(win32)))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
rw_addr += STACK_BIAS;
|
|
|
|
if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-02-07 10:02:08 +03:00
|
|
|
enum sparc_regset {
|
|
|
|
REGSET_GENERAL,
|
|
|
|
REGSET_FP,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int genregs64_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-22 08:19:46 +03:00
|
|
|
struct membuf to)
|
2008-02-07 10:02:08 +03:00
|
|
|
{
|
|
|
|
const struct pt_regs *regs = task_pt_regs(target);
|
2020-02-22 08:19:46 +03:00
|
|
|
struct reg_window window;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_write(&to, regs->u_regs, 16 * sizeof(u64));
|
|
|
|
if (!to.left)
|
|
|
|
return 0;
|
|
|
|
if (regwindow64_get(target, regs, &window))
|
|
|
|
return -EFAULT;
|
|
|
|
membuf_write(&to, &window, 16 * sizeof(u64));
|
|
|
|
/* TSTATE, TPC, TNPC */
|
|
|
|
membuf_write(&to, ®s->tstate, 3 * sizeof(u64));
|
|
|
|
return membuf_store(&to, (u64)regs->y);
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int genregs64_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
regs->u_regs,
|
|
|
|
0, 16 * sizeof(u64));
|
2008-04-10 06:39:25 +04:00
|
|
|
if (!ret && count && pos < (32 * sizeof(u64))) {
|
|
|
|
struct reg_window window;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
2008-04-10 06:39:25 +04:00
|
|
|
if (regwindow64_get(target, regs, &window))
|
|
|
|
return -EFAULT;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
2008-04-10 06:39:25 +04:00
|
|
|
&window,
|
2008-02-07 10:02:08 +03:00
|
|
|
16 * sizeof(u64),
|
|
|
|
32 * sizeof(u64));
|
2008-04-10 06:39:25 +04:00
|
|
|
|
|
|
|
if (!ret &&
|
|
|
|
regwindow64_set(target, regs, &window))
|
|
|
|
return -EFAULT;
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
unsigned long tstate;
|
|
|
|
|
|
|
|
/* TSTATE */
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&tstate,
|
|
|
|
32 * sizeof(u64),
|
|
|
|
33 * sizeof(u64));
|
|
|
|
if (!ret) {
|
sparc: Fix debugger syscall restart interactions.
So, forever, we've had this ptrace_signal_deliver implementation
which tries to handle all of the nasties that can occur when the
debugger looks at a process about to take a signal. It's meant
to address all of these issues inside of the kernel so that the
debugger need not be mindful of such things.
Problem is, this doesn't work.
The idea was that we should do the syscall restart business first, so
that the debugger captures that state. Otherwise, if the debugger for
example saves the child's state, makes the child execute something
else, then restores the saved state, we won't handle the syscall
restart properly because we lose the "we're in a syscall" state.
The code here worked for most cases, but if the debugger actually
passes the signal through to the child unaltered, it's possible that
we would do a syscall restart when we shouldn't have.
In particular this breaks the case of debugging a process under a gdb
which is being debugged by yet another gdb. gdb uses sigsuspend
to wait for SIGCHLD of the inferior, but if gdb itself is being
debugged by a top-level gdb we get a ptrace_stop(). The top-level gdb
does a PTRACE_CONT with SIGCHLD to let the inferior gdb see the
signal. But ptrace_signal_deliver() assumed the debugger would cancel
out the signal and therefore did a syscall restart, because the return
error was ERESTARTNOHAND.
Fix this by simply making ptrace_signal_deliver() a nop, and providing
a way for the debugger to control system call restarting properly:
1) Report a "in syscall" software bit in regs->{tstate,psr}.
It is set early on in trap entry to a system call and is fully
visible to the debugger via ptrace() and regsets.
2) Test this bit right before doing a syscall restart. We have
to do a final recheck right after get_signal_to_deliver() in
case the debugger cleared the bit during ptrace_stop().
3) Clear the bit in trap return so we don't accidently try to set
that bit in the real register.
As a result we also get a ptrace_{is,clear}_syscall() for sparc32 just
like sparc64 has.
M68K has this same exact bug, and is now the only other user of the
ptrace_signal_deliver hook. It needs to be fixed in the same exact
way as sparc.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-05-11 13:07:19 +04:00
|
|
|
/* Only the condition codes and the "in syscall"
|
|
|
|
* state can be modified in the %tstate register.
|
2008-02-07 10:02:08 +03:00
|
|
|
*/
|
sparc: Fix debugger syscall restart interactions.
So, forever, we've had this ptrace_signal_deliver implementation
which tries to handle all of the nasties that can occur when the
debugger looks at a process about to take a signal. It's meant
to address all of these issues inside of the kernel so that the
debugger need not be mindful of such things.
Problem is, this doesn't work.
The idea was that we should do the syscall restart business first, so
that the debugger captures that state. Otherwise, if the debugger for
example saves the child's state, makes the child execute something
else, then restores the saved state, we won't handle the syscall
restart properly because we lose the "we're in a syscall" state.
The code here worked for most cases, but if the debugger actually
passes the signal through to the child unaltered, it's possible that
we would do a syscall restart when we shouldn't have.
In particular this breaks the case of debugging a process under a gdb
which is being debugged by yet another gdb. gdb uses sigsuspend
to wait for SIGCHLD of the inferior, but if gdb itself is being
debugged by a top-level gdb we get a ptrace_stop(). The top-level gdb
does a PTRACE_CONT with SIGCHLD to let the inferior gdb see the
signal. But ptrace_signal_deliver() assumed the debugger would cancel
out the signal and therefore did a syscall restart, because the return
error was ERESTARTNOHAND.
Fix this by simply making ptrace_signal_deliver() a nop, and providing
a way for the debugger to control system call restarting properly:
1) Report a "in syscall" software bit in regs->{tstate,psr}.
It is set early on in trap entry to a system call and is fully
visible to the debugger via ptrace() and regsets.
2) Test this bit right before doing a syscall restart. We have
to do a final recheck right after get_signal_to_deliver() in
case the debugger cleared the bit during ptrace_stop().
3) Clear the bit in trap return so we don't accidently try to set
that bit in the real register.
As a result we also get a ptrace_{is,clear}_syscall() for sparc32 just
like sparc64 has.
M68K has this same exact bug, and is now the only other user of the
ptrace_signal_deliver hook. It needs to be fixed in the same exact
way as sparc.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-05-11 13:07:19 +04:00
|
|
|
tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
|
|
|
|
regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
|
2008-02-07 10:02:08 +03:00
|
|
|
regs->tstate |= tstate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
/* TPC, TNPC */
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
®s->tpc,
|
|
|
|
33 * sizeof(u64),
|
|
|
|
35 * sizeof(u64));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret) {
|
2017-03-27 17:10:59 +03:00
|
|
|
unsigned long y = regs->y;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&y,
|
|
|
|
35 * sizeof(u64),
|
|
|
|
36 * sizeof(u64));
|
|
|
|
if (!ret)
|
|
|
|
regs->y = y;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret)
|
2022-10-15 00:22:34 +03:00
|
|
|
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
|
|
|
36 * sizeof(u64), -1);
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fpregs64_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-22 08:19:46 +03:00
|
|
|
struct membuf to)
|
2008-02-07 10:02:08 +03:00
|
|
|
{
|
2020-02-22 08:19:46 +03:00
|
|
|
struct thread_info *t = task_thread_info(target);
|
|
|
|
unsigned long fprs;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
save_and_clear_fpu();
|
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
fprs = t->fpsaved[0];
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
if (fprs & FPRS_DL)
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_write(&to, t->fpregs, 16 * sizeof(u64));
|
2008-02-07 10:02:08 +03:00
|
|
|
else
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_zero(&to, 16 * sizeof(u64));
|
2008-02-07 10:02:08 +03:00
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
if (fprs & FPRS_DU)
|
|
|
|
membuf_write(&to, t->fpregs + 16, 16 * sizeof(u64));
|
|
|
|
else
|
|
|
|
membuf_zero(&to, 16 * sizeof(u64));
|
2008-02-07 10:02:08 +03:00
|
|
|
if (fprs & FPRS_FEF) {
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_store(&to, t->xfsr[0]);
|
|
|
|
membuf_store(&to, t->gsr[0]);
|
2008-02-07 10:02:08 +03:00
|
|
|
} else {
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_zero(&to, 2 * sizeof(u64));
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
2020-02-22 08:19:46 +03:00
|
|
|
return membuf_store(&to, fprs);
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int fpregs64_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
unsigned long *fpregs = task_thread_info(target)->fpregs;
|
|
|
|
unsigned long fprs;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
save_and_clear_fpu();
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
fpregs,
|
|
|
|
0, 32 * sizeof(u64));
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
task_thread_info(target)->xfsr,
|
|
|
|
32 * sizeof(u64),
|
|
|
|
33 * sizeof(u64));
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
task_thread_info(target)->gsr,
|
|
|
|
33 * sizeof(u64),
|
|
|
|
34 * sizeof(u64));
|
|
|
|
|
|
|
|
fprs = task_thread_info(target)->fpsaved[0];
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&fprs,
|
|
|
|
34 * sizeof(u64),
|
|
|
|
35 * sizeof(u64));
|
|
|
|
}
|
|
|
|
|
|
|
|
fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU);
|
|
|
|
task_thread_info(target)->fpsaved[0] = fprs;
|
|
|
|
|
|
|
|
if (!ret)
|
2022-10-15 00:22:34 +03:00
|
|
|
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
|
|
|
35 * sizeof(u64), -1);
|
2008-02-07 10:02:08 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct user_regset sparc64_regsets[] = {
|
|
|
|
/* Format is:
|
|
|
|
* G0 --> G7
|
|
|
|
* O0 --> O7
|
|
|
|
* L0 --> L7
|
|
|
|
* I0 --> I7
|
|
|
|
* TSTATE, TPC, TNPC, Y
|
|
|
|
*/
|
|
|
|
[REGSET_GENERAL] = {
|
|
|
|
.core_note_type = NT_PRSTATUS,
|
2008-09-13 02:01:31 +04:00
|
|
|
.n = 36,
|
2008-02-07 10:02:08 +03:00
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
2020-02-22 08:19:46 +03:00
|
|
|
.regset_get = genregs64_get, .set = genregs64_set
|
2008-02-07 10:02:08 +03:00
|
|
|
},
|
|
|
|
/* Format is:
|
|
|
|
* F0 --> F63
|
|
|
|
* FSR
|
|
|
|
* GSR
|
|
|
|
* FPRS
|
|
|
|
*/
|
|
|
|
[REGSET_FP] = {
|
|
|
|
.core_note_type = NT_PRFPREG,
|
2008-09-13 02:01:31 +04:00
|
|
|
.n = 35,
|
2008-02-07 10:02:08 +03:00
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
2020-02-22 08:19:46 +03:00
|
|
|
.regset_get = fpregs64_get, .set = fpregs64_set
|
2008-02-07 10:02:08 +03:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-05-22 21:21:59 +03:00
|
|
|
static int getregs64_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-22 08:19:46 +03:00
|
|
|
struct membuf to)
|
2020-05-22 21:21:59 +03:00
|
|
|
{
|
|
|
|
const struct pt_regs *regs = task_pt_regs(target);
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u64));
|
|
|
|
membuf_store(&to, (u64)0);
|
|
|
|
membuf_write(&to, ®s->tstate, 3 * sizeof(u64));
|
|
|
|
return membuf_store(&to, (u64)regs->y);
|
2020-05-22 21:21:59 +03:00
|
|
|
}
|
|
|
|
|
2020-06-07 07:34:26 +03:00
|
|
|
static int setregs64_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
|
|
unsigned long y = regs->y;
|
|
|
|
unsigned long tstate;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
regs->u_regs + 1,
|
|
|
|
0 * sizeof(u64),
|
|
|
|
15 * sizeof(u64));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2022-10-15 00:22:34 +03:00
|
|
|
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
|
|
|
15 * sizeof(u64), 16 * sizeof(u64));
|
2020-06-07 07:34:26 +03:00
|
|
|
/* TSTATE */
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&tstate,
|
|
|
|
16 * sizeof(u64),
|
|
|
|
17 * sizeof(u64));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
/* Only the condition codes and the "in syscall"
|
|
|
|
* state can be modified in the %tstate register.
|
|
|
|
*/
|
|
|
|
tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
|
|
|
|
regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
|
|
|
|
regs->tstate |= tstate;
|
|
|
|
|
|
|
|
/* TPC, TNPC */
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
®s->tpc,
|
|
|
|
17 * sizeof(u64),
|
|
|
|
19 * sizeof(u64));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
/* Y */
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&y,
|
|
|
|
19 * sizeof(u64),
|
|
|
|
20 * sizeof(u64));
|
|
|
|
if (!ret)
|
|
|
|
regs->y = y;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-05-22 21:21:59 +03:00
|
|
|
static const struct user_regset ptrace64_regsets[] = {
|
|
|
|
/* Format is:
|
|
|
|
* G1 --> G7
|
|
|
|
* O0 --> O7
|
|
|
|
* 0
|
|
|
|
* TSTATE, TPC, TNPC, Y
|
|
|
|
*/
|
|
|
|
[REGSET_GENERAL] = {
|
2020-06-07 07:34:26 +03:00
|
|
|
.n = 20, .size = sizeof(u64),
|
2020-02-22 08:19:46 +03:00
|
|
|
.regset_get = getregs64_get, .set = setregs64_set,
|
2020-05-22 21:21:59 +03:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view ptrace64_view = {
|
|
|
|
.regsets = ptrace64_regsets, .n = ARRAY_SIZE(ptrace64_regsets)
|
|
|
|
};
|
|
|
|
|
2008-02-07 10:02:08 +03:00
|
|
|
static const struct user_regset_view user_sparc64_view = {
|
|
|
|
.name = "sparc64", .e_machine = EM_SPARCV9,
|
|
|
|
.regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
|
|
|
|
};
|
|
|
|
|
2008-03-26 14:31:50 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-02-07 10:02:08 +03:00
|
|
|
static int genregs32_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-22 08:19:46 +03:00
|
|
|
struct membuf to)
|
2008-02-07 10:02:08 +03:00
|
|
|
{
|
|
|
|
const struct pt_regs *regs = task_pt_regs(target);
|
2020-05-22 19:51:42 +03:00
|
|
|
u32 uregs[16];
|
2020-02-22 08:19:46 +03:00
|
|
|
int i;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
membuf_store(&to, (u32)regs->u_regs[i]);
|
|
|
|
if (!to.left)
|
|
|
|
return 0;
|
|
|
|
if (get_from_target(target, regs->u_regs[UREG_I6],
|
|
|
|
uregs, sizeof(uregs)))
|
|
|
|
return -EFAULT;
|
|
|
|
membuf_write(&to, uregs, 16 * sizeof(u32));
|
|
|
|
membuf_store(&to, (u32)tstate_to_psr(regs->tstate));
|
|
|
|
membuf_store(&to, (u32)(regs->tpc));
|
|
|
|
membuf_store(&to, (u32)(regs->tnpc));
|
|
|
|
membuf_store(&to, (u32)(regs->y));
|
|
|
|
return membuf_zero(&to, 2 * sizeof(u32));
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int genregs32_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
|
|
compat_ulong_t __user *reg_window;
|
|
|
|
const compat_ulong_t *k = kbuf;
|
|
|
|
const compat_ulong_t __user *u = ubuf;
|
|
|
|
compat_ulong_t reg;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
|
|
|
pos /= sizeof(reg);
|
|
|
|
count /= sizeof(reg);
|
|
|
|
|
|
|
|
if (kbuf) {
|
|
|
|
for (; count > 0 && pos < 16; count--)
|
|
|
|
regs->u_regs[pos++] = *k++;
|
|
|
|
|
|
|
|
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
|
2010-04-01 05:05:05 +04:00
|
|
|
reg_window -= 16;
|
2008-04-04 03:55:14 +04:00
|
|
|
if (target == current) {
|
|
|
|
for (; count > 0 && pos < 32; count--) {
|
|
|
|
if (put_user(*k++, ®_window[pos++]))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (; count > 0 && pos < 32; count--) {
|
|
|
|
if (access_process_vm(target,
|
|
|
|
(unsigned long)
|
|
|
|
®_window[pos],
|
|
|
|
(void *) k,
|
2016-10-13 03:20:20 +03:00
|
|
|
sizeof(*k),
|
|
|
|
FOLL_FORCE | FOLL_WRITE)
|
2008-04-04 03:55:14 +04:00
|
|
|
!= sizeof(*k))
|
|
|
|
return -EFAULT;
|
|
|
|
k++;
|
|
|
|
pos++;
|
|
|
|
}
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (; count > 0 && pos < 16; count--) {
|
|
|
|
if (get_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
regs->u_regs[pos++] = reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
|
2010-04-01 05:05:05 +04:00
|
|
|
reg_window -= 16;
|
2008-04-04 03:55:14 +04:00
|
|
|
if (target == current) {
|
|
|
|
for (; count > 0 && pos < 32; count--) {
|
|
|
|
if (get_user(reg, u++) ||
|
|
|
|
put_user(reg, ®_window[pos++]))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (; count > 0 && pos < 32; count--) {
|
2020-05-17 22:37:50 +03:00
|
|
|
if (get_user(reg, u++))
|
2008-04-04 03:55:14 +04:00
|
|
|
return -EFAULT;
|
|
|
|
if (access_process_vm(target,
|
|
|
|
(unsigned long)
|
|
|
|
®_window[pos],
|
2016-10-13 03:20:20 +03:00
|
|
|
®, sizeof(reg),
|
|
|
|
FOLL_FORCE | FOLL_WRITE)
|
2008-04-04 03:55:14 +04:00
|
|
|
!= sizeof(reg))
|
|
|
|
return -EFAULT;
|
|
|
|
pos++;
|
|
|
|
u++;
|
|
|
|
}
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
while (count > 0) {
|
|
|
|
unsigned long tstate;
|
|
|
|
|
|
|
|
if (kbuf)
|
|
|
|
reg = *k++;
|
|
|
|
else if (get_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (pos) {
|
|
|
|
case 32: /* PSR */
|
|
|
|
tstate = regs->tstate;
|
sparc: Fix debugger syscall restart interactions.
So, forever, we've had this ptrace_signal_deliver implementation
which tries to handle all of the nasties that can occur when the
debugger looks at a process about to take a signal. It's meant
to address all of these issues inside of the kernel so that the
debugger need not be mindful of such things.
Problem is, this doesn't work.
The idea was that we should do the syscall restart business first, so
that the debugger captures that state. Otherwise, if the debugger for
example saves the child's state, makes the child execute something
else, then restores the saved state, we won't handle the syscall
restart properly because we lose the "we're in a syscall" state.
The code here worked for most cases, but if the debugger actually
passes the signal through to the child unaltered, it's possible that
we would do a syscall restart when we shouldn't have.
In particular this breaks the case of debugging a process under a gdb
which is being debugged by yet another gdb. gdb uses sigsuspend
to wait for SIGCHLD of the inferior, but if gdb itself is being
debugged by a top-level gdb we get a ptrace_stop(). The top-level gdb
does a PTRACE_CONT with SIGCHLD to let the inferior gdb see the
signal. But ptrace_signal_deliver() assumed the debugger would cancel
out the signal and therefore did a syscall restart, because the return
error was ERESTARTNOHAND.
Fix this by simply making ptrace_signal_deliver() a nop, and providing
a way for the debugger to control system call restarting properly:
1) Report a "in syscall" software bit in regs->{tstate,psr}.
It is set early on in trap entry to a system call and is fully
visible to the debugger via ptrace() and regsets.
2) Test this bit right before doing a syscall restart. We have
to do a final recheck right after get_signal_to_deliver() in
case the debugger cleared the bit during ptrace_stop().
3) Clear the bit in trap return so we don't accidently try to set
that bit in the real register.
As a result we also get a ptrace_{is,clear}_syscall() for sparc32 just
like sparc64 has.
M68K has this same exact bug, and is now the only other user of the
ptrace_signal_deliver hook. It needs to be fixed in the same exact
way as sparc.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-05-11 13:07:19 +04:00
|
|
|
tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
|
2008-02-07 10:02:08 +03:00
|
|
|
tstate |= psr_to_tstate_icc(reg);
|
sparc: Fix debugger syscall restart interactions.
So, forever, we've had this ptrace_signal_deliver implementation
which tries to handle all of the nasties that can occur when the
debugger looks at a process about to take a signal. It's meant
to address all of these issues inside of the kernel so that the
debugger need not be mindful of such things.
Problem is, this doesn't work.
The idea was that we should do the syscall restart business first, so
that the debugger captures that state. Otherwise, if the debugger for
example saves the child's state, makes the child execute something
else, then restores the saved state, we won't handle the syscall
restart properly because we lose the "we're in a syscall" state.
The code here worked for most cases, but if the debugger actually
passes the signal through to the child unaltered, it's possible that
we would do a syscall restart when we shouldn't have.
In particular this breaks the case of debugging a process under a gdb
which is being debugged by yet another gdb. gdb uses sigsuspend
to wait for SIGCHLD of the inferior, but if gdb itself is being
debugged by a top-level gdb we get a ptrace_stop(). The top-level gdb
does a PTRACE_CONT with SIGCHLD to let the inferior gdb see the
signal. But ptrace_signal_deliver() assumed the debugger would cancel
out the signal and therefore did a syscall restart, because the return
error was ERESTARTNOHAND.
Fix this by simply making ptrace_signal_deliver() a nop, and providing
a way for the debugger to control system call restarting properly:
1) Report a "in syscall" software bit in regs->{tstate,psr}.
It is set early on in trap entry to a system call and is fully
visible to the debugger via ptrace() and regsets.
2) Test this bit right before doing a syscall restart. We have
to do a final recheck right after get_signal_to_deliver() in
case the debugger cleared the bit during ptrace_stop().
3) Clear the bit in trap return so we don't accidently try to set
that bit in the real register.
As a result we also get a ptrace_{is,clear}_syscall() for sparc32 just
like sparc64 has.
M68K has this same exact bug, and is now the only other user of the
ptrace_signal_deliver hook. It needs to be fixed in the same exact
way as sparc.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-05-11 13:07:19 +04:00
|
|
|
if (reg & PSR_SYSCALL)
|
|
|
|
tstate |= TSTATE_SYSCALL;
|
2008-02-07 10:02:08 +03:00
|
|
|
regs->tstate = tstate;
|
|
|
|
break;
|
|
|
|
case 33: /* PC */
|
|
|
|
regs->tpc = reg;
|
|
|
|
break;
|
|
|
|
case 34: /* NPC */
|
|
|
|
regs->tnpc = reg;
|
|
|
|
break;
|
|
|
|
case 35: /* Y */
|
|
|
|
regs->y = reg;
|
|
|
|
break;
|
|
|
|
case 36: /* WIM */
|
|
|
|
case 37: /* TBR */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
pos++;
|
|
|
|
count--;
|
|
|
|
}
|
|
|
|
finish:
|
|
|
|
pos *= sizeof(reg);
|
|
|
|
count *= sizeof(reg);
|
|
|
|
|
2022-10-15 00:22:34 +03:00
|
|
|
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
|
|
|
38 * sizeof(reg), -1);
|
|
|
|
return 0;
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int fpregs32_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-22 08:19:46 +03:00
|
|
|
struct membuf to)
|
2008-02-07 10:02:08 +03:00
|
|
|
{
|
2020-02-22 08:19:46 +03:00
|
|
|
struct thread_info *t = task_thread_info(target);
|
|
|
|
bool enabled;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
save_and_clear_fpu();
|
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
enabled = t->fpsaved[0] & FPRS_FEF;
|
2008-02-07 10:02:08 +03:00
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_write(&to, t->fpregs, 32 * sizeof(u32));
|
|
|
|
membuf_zero(&to, sizeof(u32));
|
|
|
|
if (enabled)
|
|
|
|
membuf_store(&to, (u32)t->xfsr[0]);
|
|
|
|
else
|
|
|
|
membuf_zero(&to, sizeof(u32));
|
|
|
|
membuf_store(&to, (u32)((enabled << 8) | (8 << 16)));
|
|
|
|
return membuf_zero(&to, 64 * sizeof(u32));
|
2008-02-07 10:02:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int fpregs32_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
unsigned long *fpregs = task_thread_info(target)->fpregs;
|
|
|
|
unsigned long fprs;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
save_and_clear_fpu();
|
|
|
|
|
|
|
|
fprs = task_thread_info(target)->fpsaved[0];
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
fpregs,
|
|
|
|
0, 32 * sizeof(u32));
|
|
|
|
if (!ret)
|
|
|
|
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
|
|
|
32 * sizeof(u32),
|
|
|
|
33 * sizeof(u32));
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
compat_ulong_t fsr;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&fsr,
|
|
|
|
33 * sizeof(u32),
|
|
|
|
34 * sizeof(u32));
|
|
|
|
if (!ret) {
|
|
|
|
val = task_thread_info(target)->xfsr[0];
|
|
|
|
val &= 0xffffffff00000000UL;
|
|
|
|
val |= fsr;
|
|
|
|
task_thread_info(target)->xfsr[0] = val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fprs |= (FPRS_FEF | FPRS_DL);
|
|
|
|
task_thread_info(target)->fpsaved[0] = fprs;
|
|
|
|
|
|
|
|
if (!ret)
|
2022-10-15 00:22:34 +03:00
|
|
|
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
|
|
|
34 * sizeof(u32), -1);
|
2008-02-07 10:02:08 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct user_regset sparc32_regsets[] = {
|
|
|
|
/* Format is:
|
|
|
|
* G0 --> G7
|
|
|
|
* O0 --> O7
|
|
|
|
* L0 --> L7
|
|
|
|
* I0 --> I7
|
|
|
|
* PSR, PC, nPC, Y, WIM, TBR
|
|
|
|
*/
|
|
|
|
[REGSET_GENERAL] = {
|
|
|
|
.core_note_type = NT_PRSTATUS,
|
2008-09-13 02:01:31 +04:00
|
|
|
.n = 38,
|
2008-02-07 10:02:08 +03:00
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
2020-02-22 08:19:46 +03:00
|
|
|
.regset_get = genregs32_get, .set = genregs32_set
|
2008-02-07 10:02:08 +03:00
|
|
|
},
|
|
|
|
/* Format is:
|
|
|
|
* F0 --> F31
|
|
|
|
* empty 32-bit word
|
|
|
|
* FSR (32--bit word)
|
|
|
|
* FPU QUEUE COUNT (8-bit char)
|
|
|
|
* FPU QUEUE ENTRYSIZE (8-bit char)
|
|
|
|
* FPU ENABLED (8-bit char)
|
|
|
|
* empty 8-bit char
|
|
|
|
* FPU QUEUE (64 32-bit ints)
|
|
|
|
*/
|
|
|
|
[REGSET_FP] = {
|
|
|
|
.core_note_type = NT_PRFPREG,
|
2008-09-13 02:01:31 +04:00
|
|
|
.n = 99,
|
2008-02-07 10:02:08 +03:00
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
2020-02-22 08:19:46 +03:00
|
|
|
.regset_get = fpregs32_get, .set = fpregs32_set
|
2008-02-07 10:02:08 +03:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-05-22 21:21:59 +03:00
|
|
|
static int getregs_get(struct task_struct *target,
|
2020-02-22 08:19:46 +03:00
|
|
|
const struct user_regset *regset,
|
|
|
|
struct membuf to)
|
2020-05-22 21:21:59 +03:00
|
|
|
{
|
|
|
|
const struct pt_regs *regs = task_pt_regs(target);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_store(&to, (u32)tstate_to_psr(regs->tstate));
|
|
|
|
membuf_store(&to, (u32)(regs->tpc));
|
|
|
|
membuf_store(&to, (u32)(regs->tnpc));
|
|
|
|
membuf_store(&to, (u32)(regs->y));
|
2020-05-22 21:21:59 +03:00
|
|
|
for (i = 1; i < 16; i++)
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_store(&to, (u32)regs->u_regs[i]);
|
|
|
|
return to.left;
|
2020-05-22 21:21:59 +03:00
|
|
|
}
|
|
|
|
|
2020-06-07 07:34:26 +03:00
|
|
|
static int setregs_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
|
|
unsigned long tstate;
|
|
|
|
u32 uregs[19];
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
flushw_user();
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
uregs,
|
|
|
|
0, 19 * sizeof(u32));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
tstate = regs->tstate;
|
|
|
|
tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
|
|
|
|
tstate |= psr_to_tstate_icc(uregs[0]);
|
|
|
|
if (uregs[0] & PSR_SYSCALL)
|
|
|
|
tstate |= TSTATE_SYSCALL;
|
|
|
|
regs->tstate = tstate;
|
|
|
|
regs->tpc = uregs[1];
|
|
|
|
regs->tnpc = uregs[2];
|
|
|
|
regs->y = uregs[3];
|
|
|
|
|
|
|
|
for (i = 1; i < 15; i++)
|
|
|
|
regs->u_regs[i] = uregs[3 + i];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-22 21:21:59 +03:00
|
|
|
static int getfpregs_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-22 08:19:46 +03:00
|
|
|
struct membuf to)
|
2020-05-22 21:21:59 +03:00
|
|
|
{
|
2020-02-22 08:19:46 +03:00
|
|
|
struct thread_info *t = task_thread_info(target);
|
2020-05-22 21:21:59 +03:00
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
save_and_clear_fpu();
|
|
|
|
|
2020-02-22 08:19:46 +03:00
|
|
|
membuf_write(&to, t->fpregs, 32 * sizeof(u32));
|
|
|
|
if (t->fpsaved[0] & FPRS_FEF)
|
|
|
|
membuf_store(&to, (u32)t->xfsr[0]);
|
|
|
|
else
|
|
|
|
membuf_zero(&to, sizeof(u32));
|
|
|
|
return membuf_zero(&to, 35 * sizeof(u32));
|
2020-05-22 21:21:59 +03:00
|
|
|
}
|
|
|
|
|
2020-06-07 07:34:26 +03:00
|
|
|
static int setfpregs_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
unsigned long *fpregs = task_thread_info(target)->fpregs;
|
|
|
|
unsigned long fprs;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target == current)
|
|
|
|
save_and_clear_fpu();
|
|
|
|
|
|
|
|
fprs = task_thread_info(target)->fpsaved[0];
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
fpregs,
|
|
|
|
0, 32 * sizeof(u32));
|
|
|
|
if (!ret) {
|
|
|
|
compat_ulong_t fsr;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&fsr,
|
|
|
|
32 * sizeof(u32),
|
|
|
|
33 * sizeof(u32));
|
|
|
|
if (!ret) {
|
|
|
|
val = task_thread_info(target)->xfsr[0];
|
|
|
|
val &= 0xffffffff00000000UL;
|
|
|
|
val |= fsr;
|
|
|
|
task_thread_info(target)->xfsr[0] = val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fprs |= (FPRS_FEF | FPRS_DL);
|
|
|
|
task_thread_info(target)->fpsaved[0] = fprs;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-05-22 21:21:59 +03:00
|
|
|
static const struct user_regset ptrace32_regsets[] = {
|
|
|
|
[REGSET_GENERAL] = {
|
2020-06-07 07:34:26 +03:00
|
|
|
.n = 19, .size = sizeof(u32),
|
2020-02-22 08:19:46 +03:00
|
|
|
.regset_get = getregs_get, .set = setregs_set,
|
2020-05-22 21:21:59 +03:00
|
|
|
},
|
|
|
|
[REGSET_FP] = {
|
2020-06-07 07:34:26 +03:00
|
|
|
.n = 68, .size = sizeof(u32),
|
2020-02-22 08:19:46 +03:00
|
|
|
.regset_get = getfpregs_get, .set = setfpregs_set,
|
2020-05-22 21:21:59 +03:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view ptrace32_view = {
|
|
|
|
.regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets)
|
|
|
|
};
|
|
|
|
|
2008-02-07 10:02:08 +03:00
|
|
|
static const struct user_regset_view user_sparc32_view = {
|
|
|
|
.name = "sparc", .e_machine = EM_SPARC,
|
|
|
|
.regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
|
|
|
|
};
|
2008-03-26 14:31:50 +03:00
|
|
|
#endif /* CONFIG_COMPAT */
|
2008-02-07 10:02:08 +03:00
|
|
|
|
|
|
|
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|
|
|
{
|
2008-03-26 14:31:50 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-02-07 10:02:08 +03:00
|
|
|
if (test_tsk_thread_flag(task, TIF_32BIT))
|
|
|
|
return &user_sparc32_view;
|
2008-03-26 14:31:50 +03:00
|
|
|
#endif
|
2008-02-07 10:02:08 +03:00
|
|
|
return &user_sparc64_view;
|
|
|
|
}
|
|
|
|
|
2008-03-26 14:31:50 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-02-08 09:46:09 +03:00
|
|
|
struct compat_fps {
|
|
|
|
unsigned int regs[32];
|
|
|
|
unsigned int fsr;
|
|
|
|
unsigned int flags;
|
|
|
|
unsigned int extra;
|
|
|
|
unsigned int fpqd;
|
|
|
|
struct compat_fq {
|
|
|
|
unsigned int insnaddr;
|
|
|
|
unsigned int insn;
|
|
|
|
} fpq[16];
|
|
|
|
};
|
|
|
|
|
|
|
|
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|
|
|
compat_ulong_t caddr, compat_ulong_t cdata)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-02-08 09:46:09 +03:00
|
|
|
compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4];
|
|
|
|
struct pt_regs32 __user *pregs;
|
|
|
|
struct compat_fps __user *fps;
|
|
|
|
unsigned long addr2 = caddr2;
|
|
|
|
unsigned long addr = caddr;
|
|
|
|
unsigned long data = cdata;
|
2008-02-07 16:06:12 +03:00
|
|
|
int ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
pregs = (struct pt_regs32 __user *) addr;
|
|
|
|
fps = (struct compat_fps __user *) addr;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
switch (request) {
|
2006-04-02 11:28:10 +04:00
|
|
|
case PTRACE_PEEKUSR:
|
2008-02-07 14:00:17 +03:00
|
|
|
ret = (addr != 0) ? -EIO : 0;
|
|
|
|
break;
|
2006-04-02 11:28:10 +04:00
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
case PTRACE_GETREGS:
|
2020-05-22 21:21:59 +03:00
|
|
|
ret = copy_regset_to_user(child, &ptrace32_view,
|
|
|
|
REGSET_GENERAL, 0,
|
|
|
|
19 * sizeof(u32),
|
|
|
|
pregs);
|
2008-02-07 14:00:17 +03:00
|
|
|
break;
|
2008-02-07 16:06:12 +03:00
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
case PTRACE_SETREGS:
|
2020-06-07 07:34:26 +03:00
|
|
|
ret = copy_regset_from_user(child, &ptrace32_view,
|
|
|
|
REGSET_GENERAL, 0,
|
|
|
|
19 * sizeof(u32),
|
|
|
|
pregs);
|
2008-02-07 14:00:17 +03:00
|
|
|
break;
|
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
case PTRACE_GETFPREGS:
|
2020-05-22 21:21:59 +03:00
|
|
|
ret = copy_regset_to_user(child, &ptrace32_view,
|
|
|
|
REGSET_FP, 0,
|
|
|
|
68 * sizeof(u32),
|
|
|
|
fps);
|
2008-02-07 14:00:17 +03:00
|
|
|
break;
|
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
case PTRACE_SETFPREGS:
|
2020-06-07 07:34:26 +03:00
|
|
|
ret = copy_regset_from_user(child, &ptrace32_view,
|
|
|
|
REGSET_FP, 0,
|
|
|
|
33 * sizeof(u32),
|
|
|
|
fps);
|
2008-02-07 14:00:17 +03:00
|
|
|
break;
|
2008-02-08 09:46:09 +03:00
|
|
|
|
|
|
|
case PTRACE_READTEXT:
|
|
|
|
case PTRACE_READDATA:
|
|
|
|
ret = ptrace_readdata(child, addr,
|
|
|
|
(char __user *)addr2, data);
|
|
|
|
if (ret == data)
|
|
|
|
ret = 0;
|
|
|
|
else if (ret >= 0)
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_WRITETEXT:
|
|
|
|
case PTRACE_WRITEDATA:
|
|
|
|
ret = ptrace_writedata(child, (char __user *) addr2,
|
|
|
|
addr, data);
|
|
|
|
if (ret == data)
|
|
|
|
ret = 0;
|
|
|
|
else if (ret >= 0)
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2008-05-11 08:11:23 +04:00
|
|
|
if (request == PTRACE_SPARC_DETACH)
|
|
|
|
request = PTRACE_DETACH;
|
2008-02-08 09:46:09 +03:00
|
|
|
ret = compat_ptrace_request(child, request, addr, data);
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2008-03-26 14:31:50 +03:00
|
|
|
#endif /* CONFIG_COMPAT */
|
2008-02-08 09:46:09 +03:00
|
|
|
|
|
|
|
struct fps {
|
|
|
|
unsigned int regs[64];
|
|
|
|
unsigned long fsr;
|
|
|
|
};
|
|
|
|
|
2010-10-28 02:33:47 +04:00
|
|
|
long arch_ptrace(struct task_struct *child, long request,
|
|
|
|
unsigned long addr, unsigned long data)
|
2008-02-08 09:46:09 +03:00
|
|
|
{
|
2008-04-10 06:39:25 +04:00
|
|
|
const struct user_regset_view *view = task_user_regset_view(current);
|
2008-02-08 09:46:09 +03:00
|
|
|
unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
|
2008-03-26 10:46:21 +03:00
|
|
|
struct pt_regs __user *pregs;
|
|
|
|
struct fps __user *fps;
|
2010-10-28 02:34:03 +04:00
|
|
|
void __user *addr2p;
|
2008-02-08 09:46:09 +03:00
|
|
|
int ret;
|
|
|
|
|
2010-10-28 02:33:47 +04:00
|
|
|
pregs = (struct pt_regs __user *) addr;
|
|
|
|
fps = (struct fps __user *) addr;
|
2010-10-28 02:34:03 +04:00
|
|
|
addr2p = (void __user *) addr2;
|
2008-03-26 10:46:21 +03:00
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
switch (request) {
|
|
|
|
case PTRACE_PEEKUSR:
|
|
|
|
ret = (addr != 0) ? -EIO : 0;
|
|
|
|
break;
|
2008-02-07 14:00:17 +03:00
|
|
|
|
2008-02-08 09:46:09 +03:00
|
|
|
case PTRACE_GETREGS64:
|
2020-05-22 21:21:59 +03:00
|
|
|
ret = copy_regset_to_user(child, &ptrace64_view,
|
|
|
|
REGSET_GENERAL, 0,
|
|
|
|
19 * sizeof(u64),
|
|
|
|
pregs);
|
2008-02-08 09:46:09 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_SETREGS64:
|
2020-06-07 07:34:26 +03:00
|
|
|
ret = copy_regset_from_user(child, &ptrace64_view,
|
|
|
|
REGSET_GENERAL, 0,
|
|
|
|
19 * sizeof(u64),
|
|
|
|
pregs);
|
2008-02-08 09:46:09 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_GETFPREGS64:
|
|
|
|
ret = copy_regset_to_user(child, view, REGSET_FP,
|
|
|
|
0 * sizeof(u64),
|
|
|
|
33 * sizeof(u64),
|
|
|
|
fps);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_SETFPREGS64:
|
2008-12-03 11:47:28 +03:00
|
|
|
ret = copy_regset_from_user(child, view, REGSET_FP,
|
2008-02-07 16:06:12 +03:00
|
|
|
0 * sizeof(u64),
|
|
|
|
33 * sizeof(u64),
|
|
|
|
fps);
|
2008-02-07 14:00:17 +03:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
case PTRACE_READTEXT:
|
2008-02-07 14:00:17 +03:00
|
|
|
case PTRACE_READDATA:
|
2010-10-28 02:34:03 +04:00
|
|
|
ret = ptrace_readdata(child, addr, addr2p, data);
|
2008-02-07 14:00:17 +03:00
|
|
|
if (ret == data)
|
|
|
|
ret = 0;
|
|
|
|
else if (ret >= 0)
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
case PTRACE_WRITETEXT:
|
2008-02-07 14:00:17 +03:00
|
|
|
case PTRACE_WRITEDATA:
|
2010-10-28 02:34:03 +04:00
|
|
|
ret = ptrace_writedata(child, addr2p, addr, data);
|
2008-02-07 14:00:17 +03:00
|
|
|
if (ret == data)
|
|
|
|
ret = 0;
|
|
|
|
else if (ret >= 0)
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-02-07 14:00:17 +03:00
|
|
|
default:
|
2008-05-11 08:11:23 +04:00
|
|
|
if (request == PTRACE_SPARC_DETACH)
|
|
|
|
request = PTRACE_DETACH;
|
2008-02-07 14:00:17 +03:00
|
|
|
ret = ptrace_request(child, request, addr, data);
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-02-07 14:00:17 +03:00
|
|
|
|
|
|
|
return ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-08-25 07:10:23 +04:00
|
|
|
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-07-27 11:30:50 +04:00
|
|
|
int ret = 0;
|
|
|
|
|
2005-07-11 03:49:28 +04:00
|
|
|
/* do the secure computing check first */
|
2012-04-17 23:48:57 +04:00
|
|
|
secure_computing_strict(regs->u_regs[UREG_G1]);
|
2005-07-11 03:49:28 +04:00
|
|
|
|
2013-09-14 16:02:11 +04:00
|
|
|
if (test_thread_flag(TIF_NOHZ))
|
|
|
|
user_exit();
|
|
|
|
|
2008-08-25 07:10:23 +04:00
|
|
|
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
2022-01-27 20:46:37 +03:00
|
|
|
ret = ptrace_report_syscall_entry(regs);
|
2005-07-11 06:29:45 +04:00
|
|
|
|
2009-12-11 11:44:47 +03:00
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
|
|
|
trace_sys_enter(regs, regs->u_regs[UREG_G1]);
|
|
|
|
|
2014-03-11 21:29:28 +04:00
|
|
|
audit_syscall_entry(regs->u_regs[UREG_G1], regs->u_regs[UREG_I0],
|
|
|
|
regs->u_regs[UREG_I1], regs->u_regs[UREG_I2],
|
2012-01-03 23:23:06 +04:00
|
|
|
regs->u_regs[UREG_I3]);
|
2008-07-27 11:30:50 +04:00
|
|
|
|
|
|
|
return ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-08-25 07:10:23 +04:00
|
|
|
|
|
|
|
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
|
|
|
|
{
|
2013-09-14 16:02:11 +04:00
|
|
|
if (test_thread_flag(TIF_NOHZ))
|
|
|
|
user_exit();
|
|
|
|
|
2012-01-03 23:23:06 +04:00
|
|
|
audit_syscall_exit(regs);
|
2008-08-25 07:10:23 +04:00
|
|
|
|
2009-12-11 11:44:47 +03:00
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
2013-07-26 01:00:53 +04:00
|
|
|
trace_sys_exit(regs, regs->u_regs[UREG_I0]);
|
2009-12-11 11:44:47 +03:00
|
|
|
|
2008-08-25 07:10:23 +04:00
|
|
|
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
2022-01-27 20:46:37 +03:00
|
|
|
ptrace_report_syscall_exit(regs, 0);
|
2013-09-14 16:02:11 +04:00
|
|
|
|
|
|
|
if (test_thread_flag(TIF_NOHZ))
|
|
|
|
user_enter();
|
2008-08-25 07:10:23 +04:00
|
|
|
}
|
2016-10-13 07:36:13 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_query_register_offset() - query register offset from its name
|
|
|
|
* @name: the name of a register
|
|
|
|
*
|
|
|
|
* regs_query_register_offset() returns the offset of a register in struct
|
|
|
|
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
|
|
|
|
*/
|
|
|
|
int regs_query_register_offset(const char *name)
|
|
|
|
{
|
|
|
|
const struct pt_regs_offset *roff;
|
|
|
|
|
|
|
|
for (roff = regoffset_table; roff->name != NULL; roff++)
|
|
|
|
if (!strcmp(roff->name, name))
|
|
|
|
return roff->offset;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-04-24 03:15:51 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_within_kernel_stack() - check the address in the stack
|
|
|
|
* @regs: pt_regs which contains kernel stack pointer.
|
|
|
|
* @addr: address which is checked.
|
|
|
|
*
|
|
|
|
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
|
|
|
|
* If @addr is within the kernel stack, it returns true. If not, returns false.
|
|
|
|
*/
|
|
|
|
static inline int regs_within_kernel_stack(struct pt_regs *regs,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
|
|
|
|
return ((addr & ~(THREAD_SIZE - 1)) ==
|
|
|
|
(ksp & ~(THREAD_SIZE - 1)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_get_kernel_stack_nth() - get Nth entry of the stack
|
|
|
|
* @regs: pt_regs which contains kernel stack pointer.
|
|
|
|
* @n: stack entry number.
|
|
|
|
*
|
|
|
|
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
|
|
|
|
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
|
|
|
|
* this returns 0.
|
|
|
|
*/
|
|
|
|
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
|
|
|
|
{
|
|
|
|
unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
|
|
|
|
unsigned long *addr = (unsigned long *)ksp;
|
|
|
|
addr += n;
|
|
|
|
if (regs_within_kernel_stack(regs, (unsigned long)addr))
|
|
|
|
return *addr;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|