2019-05-19 15:08:55 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2006-11-17 00:38:57 +03:00
|
|
|
#include <linux/sched.h>
|
2017-02-08 20:51:35 +03:00
|
|
|
#include <linux/sched/debug.h>
|
2006-11-17 00:38:57 +03:00
|
|
|
#include <linux/stacktrace.h>
|
|
|
|
#include <linux/thread_info.h>
|
2010-04-21 14:08:11 +04:00
|
|
|
#include <linux/ftrace.h>
|
2011-07-22 21:18:16 +04:00
|
|
|
#include <linux/export.h>
|
2006-11-17 00:38:57 +03:00
|
|
|
#include <asm/ptrace.h>
|
2008-03-25 06:06:24 +03:00
|
|
|
#include <asm/stacktrace.h>
|
2006-11-17 00:38:57 +03:00
|
|
|
|
2008-08-13 05:33:56 +04:00
|
|
|
#include "kstack.h"
|
|
|
|
|
2008-11-28 12:19:41 +03:00
|
|
|
static void __save_stack_trace(struct thread_info *tp,
|
|
|
|
struct stack_trace *trace,
|
|
|
|
bool skip_sched)
|
2006-11-17 00:38:57 +03:00
|
|
|
{
|
2008-08-14 04:17:52 +04:00
|
|
|
unsigned long ksp, fp;
|
2010-04-21 14:08:11 +04:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
struct task_struct *t;
|
|
|
|
int graph = 0;
|
|
|
|
#endif
|
2006-11-17 00:38:57 +03:00
|
|
|
|
2008-11-28 12:19:41 +03:00
|
|
|
if (tp == current_thread_info()) {
|
|
|
|
stack_trace_flush();
|
|
|
|
__asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
|
|
|
|
} else {
|
|
|
|
ksp = tp->ksp;
|
|
|
|
}
|
2006-11-17 00:38:57 +03:00
|
|
|
|
|
|
|
fp = ksp + STACK_BIAS;
|
2010-04-21 14:08:11 +04:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
t = tp->task;
|
|
|
|
#endif
|
2006-11-17 00:38:57 +03:00
|
|
|
do {
|
2008-05-22 05:15:53 +04:00
|
|
|
struct sparc_stackf *sf;
|
2008-04-24 14:28:52 +04:00
|
|
|
struct pt_regs *regs;
|
|
|
|
unsigned long pc;
|
2006-11-17 00:38:57 +03:00
|
|
|
|
2008-08-13 05:33:56 +04:00
|
|
|
if (!kstack_valid(tp, fp))
|
2006-11-17 00:38:57 +03:00
|
|
|
break;
|
|
|
|
|
2008-05-22 05:15:53 +04:00
|
|
|
sf = (struct sparc_stackf *) fp;
|
|
|
|
regs = (struct pt_regs *) (sf + 1);
|
2008-04-24 14:28:52 +04:00
|
|
|
|
2008-08-13 05:33:56 +04:00
|
|
|
if (kstack_is_trap_frame(tp, regs)) {
|
2008-05-22 05:15:53 +04:00
|
|
|
if (!(regs->tstate & TSTATE_PRIV))
|
|
|
|
break;
|
2008-04-24 14:28:52 +04:00
|
|
|
pc = regs->tpc;
|
|
|
|
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
|
|
|
|
} else {
|
2008-05-22 05:15:53 +04:00
|
|
|
pc = sf->callers_pc;
|
|
|
|
fp = (unsigned long)sf->fp + STACK_BIAS;
|
2008-04-24 14:28:52 +04:00
|
|
|
}
|
|
|
|
|
2006-11-17 00:38:57 +03:00
|
|
|
if (trace->skip > 0)
|
|
|
|
trace->skip--;
|
2010-04-21 14:08:11 +04:00
|
|
|
else if (!skip_sched || !in_sched_functions(pc)) {
|
2008-04-24 14:28:52 +04:00
|
|
|
trace->entries[trace->nr_entries++] = pc;
|
2010-04-21 14:08:11 +04:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
|
2018-12-07 20:51:27 +03:00
|
|
|
struct ftrace_ret_stack *ret_stack;
|
|
|
|
ret_stack = ftrace_graph_get_ret_stack(t,
|
|
|
|
graph);
|
|
|
|
if (ret_stack) {
|
|
|
|
pc = ret_stack->ret;
|
2010-04-21 14:08:11 +04:00
|
|
|
if (trace->nr_entries <
|
|
|
|
trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = pc;
|
|
|
|
graph++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2006-11-17 00:38:57 +03:00
|
|
|
} while (trace->nr_entries < trace->max_entries);
|
|
|
|
}
|
2008-11-28 12:19:41 +03:00
|
|
|
|
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
__save_stack_trace(current_thread_info(), trace, false);
|
|
|
|
}
|
2008-07-03 11:17:55 +04:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
2008-11-28 12:19:41 +03:00
|
|
|
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
struct thread_info *tp = task_thread_info(tsk);
|
|
|
|
|
|
|
|
__save_stack_trace(tp, trace, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|