Add arch_ptrace_stop
This adds support to allow asm/ptrace.h to define two new macros, arch_ptrace_stop_needed and arch_ptrace_stop. These control special machine-specific actions to be done before a ptrace stop. The new code compiles away to nothing when the new macros are not defined. This is the case on all machines to begin with. On ia64, these macros will be defined to solve the long-standing issue of ptrace vs register backing store. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Petr Tesarik <ptesarik@suse.cz> Cc: Tony Luck <tony.luck@intel.com> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
941d2380e9
Коммит
1a669c2f16
|
@ -204,6 +204,41 @@ static inline void user_enable_block_step(struct task_struct *task)
|
|||
}
|
||||
#endif /* arch_has_block_step */
|
||||
|
||||
#ifndef arch_ptrace_stop_needed
|
||||
/**
|
||||
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
|
||||
* @code: current->exit_code value ptrace will stop with
|
||||
* @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
|
||||
*
|
||||
* This is called with the siglock held, to decide whether or not it's
|
||||
* necessary to release the siglock and call arch_ptrace_stop() with the
|
||||
* same @code and @info arguments. It can be defined to a constant if
|
||||
* arch_ptrace_stop() is never required, or always is. On machines where
|
||||
* this makes sense, it should be defined to a quick test to optimize out
|
||||
* calling arch_ptrace_stop() when it would be superfluous. For example,
|
||||
* if the thread has not been back to user mode since the last stop, the
|
||||
* thread state might indicate that nothing needs to be done.
|
||||
*/
|
||||
#define arch_ptrace_stop_needed(code, info) (0)
|
||||
#endif
|
||||
|
||||
#ifndef arch_ptrace_stop
|
||||
/**
|
||||
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
|
||||
* @code: current->exit_code value ptrace will stop with
|
||||
* @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
|
||||
*
|
||||
* This is called with no locks held when arch_ptrace_stop_needed() has
|
||||
* just returned nonzero. It is allowed to block, e.g. for user memory
|
||||
* access. The arch can have machine-specific work to be done before
|
||||
* ptrace stops. On ia64, register backing store gets written back to user
|
||||
* memory here. Since this can be costly (requires dropping the siglock),
|
||||
* we only do it when the arch requires it for this particular stop, as
|
||||
* indicated by arch_ptrace_stop_needed().
|
||||
*/
|
||||
#define arch_ptrace_stop(code, info) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1577,6 +1577,17 @@ static inline int may_ptrace_stop(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return nonzero if there is a SIGKILL that should be waking us up.
|
||||
* Called with the siglock held.
|
||||
*/
|
||||
static int sigkill_pending(struct task_struct *tsk)
|
||||
{
|
||||
return ((sigismember(&tsk->pending.signal, SIGKILL) ||
|
||||
sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
|
||||
!unlikely(sigismember(&tsk->blocked, SIGKILL)));
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called with current->sighand->siglock held.
|
||||
*
|
||||
|
@ -1590,6 +1601,26 @@ static inline int may_ptrace_stop(void)
|
|||
*/
|
||||
static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
|
||||
{
|
||||
int killed = 0;
|
||||
|
||||
if (arch_ptrace_stop_needed(exit_code, info)) {
|
||||
/*
|
||||
* The arch code has something special to do before a
|
||||
* ptrace stop. This is allowed to block, e.g. for faults
|
||||
* on user stack pages. We can't keep the siglock while
|
||||
* calling arch_ptrace_stop, so we must release it now.
|
||||
* To preserve proper semantics, we must do this before
|
||||
* any signal bookkeeping like checking group_stop_count.
|
||||
* Meanwhile, a SIGKILL could come in before we retake the
|
||||
* siglock. That must prevent us from sleeping in TASK_TRACED.
|
||||
* So after regaining the lock, we must check for SIGKILL.
|
||||
*/
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
arch_ptrace_stop(exit_code, info);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
killed = sigkill_pending(current);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is a group stop in progress,
|
||||
* we must participate in the bookkeeping.
|
||||
|
@ -1605,7 +1636,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
|
|||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
try_to_freeze();
|
||||
read_lock(&tasklist_lock);
|
||||
if (may_ptrace_stop()) {
|
||||
if (!unlikely(killed) && may_ptrace_stop()) {
|
||||
do_notify_parent_cldstop(current, CLD_TRAPPED);
|
||||
read_unlock(&tasklist_lock);
|
||||
schedule();
|
||||
|
|
Загрузка…
Ссылка в новой задаче