2019-05-30 02:57:47 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-02-16 13:41:36 +03:00
|
|
|
/*
|
|
|
|
* arch/arm/kernel/unwind.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008 ARM Limited
|
|
|
|
*
|
|
|
|
* Stack unwinding support for ARM
|
|
|
|
*
|
|
|
|
* An ARM EABI version of gcc is required to generate the unwind
|
|
|
|
* tables. For information about the structure of the unwind tables,
|
|
|
|
* see "Exception Handling ABI for the ARM Architecture" at:
|
|
|
|
*
|
|
|
|
* http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
|
|
|
|
*/
|
|
|
|
|
2010-05-21 15:32:07 +04:00
|
|
|
#ifndef __CHECKER__
|
2009-10-30 14:06:05 +03:00
|
|
|
#if !defined (__ARM_EABI__)
|
|
|
|
#warning Your compiler does not have EABI support.
|
|
|
|
#warning ARM unwind is known to compile only with EABI compilers.
|
|
|
|
#warning Change compiler or disable ARM_UNWIND option.
|
|
|
|
#endif
|
2010-05-21 15:32:07 +04:00
|
|
|
#endif /* __CHECKER__ */
|
2009-10-30 14:06:05 +03:00
|
|
|
|
2009-02-16 13:41:36 +03:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
2011-07-22 18:58:34 +04:00
|
|
|
#include <linux/export.h>
|
2009-02-16 13:41:36 +03:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
|
2021-10-05 10:15:39 +03:00
|
|
|
#include <asm/sections.h>
|
2009-02-16 13:41:36 +03:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include <asm/traps.h>
|
|
|
|
#include <asm/unwind.h>
|
|
|
|
|
|
|
|
/* Dummy functions to avoid linker complaints */
|
|
|
|
void __aeabi_unwind_cpp_pr0(void)
|
|
|
|
{
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
|
|
|
|
|
|
|
|
void __aeabi_unwind_cpp_pr1(void)
|
|
|
|
{
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
|
|
|
|
|
|
|
|
void __aeabi_unwind_cpp_pr2(void)
|
|
|
|
{
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
|
|
|
|
|
|
|
|
struct unwind_ctrl_block {
|
|
|
|
unsigned long vrs[16]; /* virtual register set */
|
2011-12-05 12:39:59 +04:00
|
|
|
const unsigned long *insn; /* pointer to the current instructions word */
|
2021-10-05 10:15:37 +03:00
|
|
|
unsigned long sp_low; /* lowest value of sp allowed */
|
2014-02-24 14:17:36 +04:00
|
|
|
unsigned long sp_high; /* highest value of sp allowed */
|
ARM: unwind: track location of LR value in stack frame
The ftrace graph tracer needs to override the return address of an
instrumented function, in order to install a hook that gets invoked when
the function returns again.
Currently, we only support this when building for ARM using GCC with
frame pointers, as in this case, it is guaranteed that the function will
reload LR from [FP, #-4] in all cases, and we can simply pass that
address to the ftrace code.
In order to support this for configurations that rely on the EABI
unwinder, such as Thumb2 builds, make the unwinder keep track of the
address from which LR was unwound, permitting ftrace to make use of this
in a subsequent patch.
Drop the call to is_kernel_text_address(), which is problematic in terms
of ftrace recursion, given that it may be instrumented itself. The call
is redundant anyway, as no unwind directives will be found unless the PC
points to memory that is known to contain executable code.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
2022-01-24 18:49:09 +03:00
|
|
|
unsigned long *lr_addr; /* address of LR value on the stack */
|
2014-02-24 14:17:36 +04:00
|
|
|
/*
|
|
|
|
* 1 : check for stack overflow for each register pop.
|
|
|
|
* 0 : save overhead if there is plenty of stack remaining.
|
|
|
|
*/
|
|
|
|
int check_each_pop;
|
2009-02-16 13:41:36 +03:00
|
|
|
int entries; /* number of entries left to interpret */
|
|
|
|
int byte; /* current byte number in the instructions word */
|
|
|
|
};
|
|
|
|
|
|
|
|
enum regs {
|
2009-07-24 15:32:54 +04:00
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
FP = 7,
|
|
|
|
#else
|
2009-02-16 13:41:36 +03:00
|
|
|
FP = 11,
|
2009-07-24 15:32:54 +04:00
|
|
|
#endif
|
2009-02-16 13:41:36 +03:00
|
|
|
SP = 13,
|
|
|
|
LR = 14,
|
|
|
|
PC = 15
|
|
|
|
};
|
|
|
|
|
2011-12-05 12:39:59 +04:00
|
|
|
extern const struct unwind_idx __start_unwind_idx[];
|
|
|
|
static const struct unwind_idx *__origin_unwind_idx;
|
|
|
|
extern const struct unwind_idx __stop_unwind_idx[];
|
2009-02-16 13:41:36 +03:00
|
|
|
|
2019-02-13 19:14:42 +03:00
|
|
|
static DEFINE_RAW_SPINLOCK(unwind_lock);
|
2009-02-16 13:41:36 +03:00
|
|
|
static LIST_HEAD(unwind_tables);
|
|
|
|
|
|
|
|
/* Convert a prel31 symbol to an absolute address */
|
|
|
|
#define prel31_to_addr(ptr) \
|
|
|
|
({ \
|
|
|
|
/* sign-extend to 32 bits */ \
|
|
|
|
long offset = (((long)*(ptr)) << 1) >> 1; \
|
|
|
|
(unsigned long)(ptr) + offset; \
|
|
|
|
})
|
|
|
|
|
|
|
|
/*
|
2011-12-05 12:39:59 +04:00
|
|
|
* Binary search in the unwind index. The entries are
|
2009-02-16 13:41:36 +03:00
|
|
|
* guaranteed to be sorted in ascending order by the linker.
|
2011-12-05 12:39:59 +04:00
|
|
|
*
|
|
|
|
* start = first entry
|
|
|
|
* origin = first entry with positive offset (or stop if there is no such entry)
|
|
|
|
* stop - 1 = last entry
|
2009-02-16 13:41:36 +03:00
|
|
|
*/
|
2011-12-05 12:39:59 +04:00
|
|
|
static const struct unwind_idx *search_index(unsigned long addr,
|
|
|
|
const struct unwind_idx *start,
|
|
|
|
const struct unwind_idx *origin,
|
|
|
|
const struct unwind_idx *stop)
|
2009-02-16 13:41:36 +03:00
|
|
|
{
|
2011-12-05 12:39:59 +04:00
|
|
|
unsigned long addr_prel31;
|
|
|
|
|
|
|
|
pr_debug("%s(%08lx, %p, %p, %p)\n",
|
|
|
|
__func__, addr, start, origin, stop);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* only search in the section with the matching sign. This way the
|
|
|
|
* prel31 numbers can be compared as unsigned longs.
|
|
|
|
*/
|
|
|
|
if (addr < (unsigned long)start)
|
|
|
|
/* negative offsets: [start; origin) */
|
|
|
|
stop = origin;
|
|
|
|
else
|
|
|
|
/* positive offsets: [origin; stop) */
|
|
|
|
start = origin;
|
|
|
|
|
|
|
|
/* prel31 for address relavive to start */
|
|
|
|
addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
|
2009-02-16 13:41:36 +03:00
|
|
|
|
2011-12-05 12:39:59 +04:00
|
|
|
while (start < stop - 1) {
|
|
|
|
const struct unwind_idx *mid = start + ((stop - start) >> 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As addr_prel31 is relative to start an offset is needed to
|
|
|
|
* make it relative to mid.
|
|
|
|
*/
|
|
|
|
if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
|
|
|
|
mid->addr_offset)
|
|
|
|
stop = mid;
|
|
|
|
else {
|
|
|
|
/* keep addr_prel31 relative to start */
|
|
|
|
addr_prel31 -= ((unsigned long)mid -
|
|
|
|
(unsigned long)start);
|
|
|
|
start = mid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(start->addr_offset <= addr_prel31))
|
|
|
|
return start;
|
|
|
|
else {
|
2014-09-16 23:41:43 +04:00
|
|
|
pr_warn("unwind: Unknown symbol address %08lx\n", addr);
|
2009-02-16 13:41:36 +03:00
|
|
|
return NULL;
|
2011-12-05 12:39:59 +04:00
|
|
|
}
|
|
|
|
}
|
2009-02-16 13:41:36 +03:00
|
|
|
|
2011-12-05 12:39:59 +04:00
|
|
|
static const struct unwind_idx *unwind_find_origin(
|
|
|
|
const struct unwind_idx *start, const struct unwind_idx *stop)
|
|
|
|
{
|
|
|
|
pr_debug("%s(%p, %p)\n", __func__, start, stop);
|
2011-12-16 00:47:56 +04:00
|
|
|
while (start < stop) {
|
2011-12-05 12:39:59 +04:00
|
|
|
const struct unwind_idx *mid = start + ((stop - start) >> 1);
|
2009-02-16 13:41:36 +03:00
|
|
|
|
2011-12-05 12:39:59 +04:00
|
|
|
if (mid->addr_offset >= 0x40000000)
|
|
|
|
/* negative offset */
|
2011-12-16 00:47:56 +04:00
|
|
|
start = mid + 1;
|
2009-02-16 13:41:36 +03:00
|
|
|
else
|
2011-12-05 12:39:59 +04:00
|
|
|
/* positive offset */
|
|
|
|
stop = mid;
|
2009-02-16 13:41:36 +03:00
|
|
|
}
|
2011-12-05 12:39:59 +04:00
|
|
|
pr_debug("%s -> %p\n", __func__, stop);
|
|
|
|
return stop;
|
2009-02-16 13:41:36 +03:00
|
|
|
}
|
|
|
|
|
2011-12-05 12:39:59 +04:00
|
|
|
static const struct unwind_idx *unwind_find_idx(unsigned long addr)
|
2009-02-16 13:41:36 +03:00
|
|
|
{
|
2011-12-05 12:39:59 +04:00
|
|
|
const struct unwind_idx *idx = NULL;
|
2009-02-16 13:41:36 +03:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
pr_debug("%s(%08lx)\n", __func__, addr);
|
|
|
|
|
2011-12-05 12:39:59 +04:00
|
|
|
if (core_kernel_text(addr)) {
|
|
|
|
if (unlikely(!__origin_unwind_idx))
|
|
|
|
__origin_unwind_idx =
|
|
|
|
unwind_find_origin(__start_unwind_idx,
|
|
|
|
__stop_unwind_idx);
|
|
|
|
|
2009-02-16 13:41:36 +03:00
|
|
|
/* main unwind table */
|
|
|
|
idx = search_index(addr, __start_unwind_idx,
|
2011-12-05 12:39:59 +04:00
|
|
|
__origin_unwind_idx,
|
|
|
|
__stop_unwind_idx);
|
|
|
|
} else {
|
2009-02-16 13:41:36 +03:00
|
|
|
/* module unwind tables */
|
|
|
|
struct unwind_table *table;
|
|
|
|
|
2019-02-13 19:14:42 +03:00
|
|
|
raw_spin_lock_irqsave(&unwind_lock, flags);
|
2009-02-16 13:41:36 +03:00
|
|
|
list_for_each_entry(table, &unwind_tables, list) {
|
|
|
|
if (addr >= table->begin_addr &&
|
|
|
|
addr < table->end_addr) {
|
|
|
|
idx = search_index(addr, table->start,
|
2011-12-05 12:39:59 +04:00
|
|
|
table->origin,
|
|
|
|
table->stop);
|
2010-08-19 18:20:37 +04:00
|
|
|
/* Move-to-front to exploit common traces */
|
|
|
|
list_move(&table->list, &unwind_tables);
|
2009-02-16 13:41:36 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-02-13 19:14:42 +03:00
|
|
|
raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
2009-02-16 13:41:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("%s: idx = %p\n", __func__, idx);
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
|
|
|
|
{
|
|
|
|
unsigned long ret;
|
|
|
|
|
|
|
|
if (ctrl->entries <= 0) {
|
2014-09-16 23:41:43 +04:00
|
|
|
pr_warn("unwind: Corrupt unwind table\n");
|
2009-02-16 13:41:36 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
|
|
|
|
|
|
|
|
if (ctrl->byte == 0) {
|
|
|
|
ctrl->insn++;
|
|
|
|
ctrl->entries--;
|
|
|
|
ctrl->byte = 3;
|
|
|
|
} else
|
|
|
|
ctrl->byte--;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-24 14:17:36 +04:00
|
|
|
/* Before poping a register check whether it is feasible or not */
|
|
|
|
static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
|
|
|
|
unsigned long **vsp, unsigned int reg)
|
|
|
|
{
|
|
|
|
if (unlikely(ctrl->check_each_pop))
|
|
|
|
if (*vsp >= (unsigned long *)ctrl->sp_high)
|
|
|
|
return -URC_FAILURE;
|
|
|
|
|
2020-10-26 01:50:09 +03:00
|
|
|
/* Use READ_ONCE_NOCHECK here to avoid this memory access
|
|
|
|
* from being tracked by KASAN.
|
|
|
|
*/
|
|
|
|
ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
|
ARM: unwind: track location of LR value in stack frame
The ftrace graph tracer needs to override the return address of an
instrumented function, in order to install a hook that gets invoked when
the function returns again.
Currently, we only support this when building for ARM using GCC with
frame pointers, as in this case, it is guaranteed that the function will
reload LR from [FP, #-4] in all cases, and we can simply pass that
address to the ftrace code.
In order to support this for configurations that rely on the EABI
unwinder, such as Thumb2 builds, make the unwinder keep track of the
address from which LR was unwound, permitting ftrace to make use of this
in a subsequent patch.
Drop the call to is_kernel_text_address(), which is problematic in terms
of ftrace recursion, given that it may be instrumented itself. The call
is redundant anyway, as no unwind directives will be found unless the PC
points to memory that is known to contain executable code.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
2022-01-24 18:49:09 +03:00
|
|
|
if (reg == 14)
|
|
|
|
ctrl->lr_addr = *vsp;
|
2020-10-26 01:50:09 +03:00
|
|
|
(*vsp)++;
|
2014-02-24 14:17:36 +04:00
|
|
|
return URC_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper functions to execute the instructions */
|
|
|
|
static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
|
|
|
|
unsigned long mask)
|
|
|
|
{
|
|
|
|
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
|
|
|
|
int load_sp, reg = 4;
|
|
|
|
|
|
|
|
load_sp = mask & (1 << (13 - 4));
|
|
|
|
while (mask) {
|
|
|
|
if (mask & 1)
|
|
|
|
if (unwind_pop_register(ctrl, &vsp, reg))
|
|
|
|
return -URC_FAILURE;
|
|
|
|
mask >>= 1;
|
|
|
|
reg++;
|
|
|
|
}
|
2021-10-05 10:15:37 +03:00
|
|
|
if (!load_sp) {
|
2014-02-24 14:17:36 +04:00
|
|
|
ctrl->vrs[SP] = (unsigned long)vsp;
|
2021-10-05 10:15:37 +03:00
|
|
|
} else {
|
|
|
|
ctrl->sp_low = ctrl->vrs[SP];
|
|
|
|
ctrl->sp_high = ALIGN(ctrl->sp_low, THREAD_SIZE);
|
|
|
|
}
|
2014-02-24 14:17:36 +04:00
|
|
|
|
|
|
|
return URC_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
|
|
|
|
unsigned long insn)
|
|
|
|
{
|
|
|
|
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
|
|
|
|
int reg;
|
|
|
|
|
|
|
|
/* pop R4-R[4+bbb] */
|
|
|
|
for (reg = 4; reg <= 4 + (insn & 7); reg++)
|
|
|
|
if (unwind_pop_register(ctrl, &vsp, reg))
|
|
|
|
return -URC_FAILURE;
|
|
|
|
|
2014-05-08 18:54:26 +04:00
|
|
|
if (insn & 0x8)
|
2014-02-24 14:17:36 +04:00
|
|
|
if (unwind_pop_register(ctrl, &vsp, 14))
|
|
|
|
return -URC_FAILURE;
|
|
|
|
|
|
|
|
ctrl->vrs[SP] = (unsigned long)vsp;
|
|
|
|
|
|
|
|
return URC_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
|
|
|
|
unsigned long mask)
|
|
|
|
{
|
|
|
|
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
|
|
|
|
int reg = 0;
|
|
|
|
|
|
|
|
/* pop R0-R3 according to mask */
|
|
|
|
while (mask) {
|
|
|
|
if (mask & 1)
|
|
|
|
if (unwind_pop_register(ctrl, &vsp, reg))
|
|
|
|
return -URC_FAILURE;
|
|
|
|
mask >>= 1;
|
|
|
|
reg++;
|
|
|
|
}
|
|
|
|
ctrl->vrs[SP] = (unsigned long)vsp;
|
|
|
|
|
|
|
|
return URC_OK;
|
|
|
|
}
|
|
|
|
|
2009-02-16 13:41:36 +03:00
|
|
|
/*
|
|
|
|
* Execute the current unwind instruction.
|
|
|
|
*/
|
|
|
|
static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
|
|
|
|
{
|
|
|
|
unsigned long insn = unwind_get_byte(ctrl);
|
2014-02-24 14:17:36 +04:00
|
|
|
int ret = URC_OK;
|
2009-02-16 13:41:36 +03:00
|
|
|
|
|
|
|
pr_debug("%s: insn = %08lx\n", __func__, insn);
|
|
|
|
|
|
|
|
if ((insn & 0xc0) == 0x00)
|
|
|
|
ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
|
2021-10-05 10:15:37 +03:00
|
|
|
else if ((insn & 0xc0) == 0x40) {
|
2009-02-16 13:41:36 +03:00
|
|
|
ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
|
2021-10-05 10:15:37 +03:00
|
|
|
ctrl->sp_low = ctrl->vrs[SP];
|
|
|
|
} else if ((insn & 0xf0) == 0x80) {
|
2009-02-16 13:41:36 +03:00
|
|
|
unsigned long mask;
|
|
|
|
|
|
|
|
insn = (insn << 8) | unwind_get_byte(ctrl);
|
|
|
|
mask = insn & 0x0fff;
|
|
|
|
if (mask == 0) {
|
2014-09-16 23:41:43 +04:00
|
|
|
pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n",
|
|
|
|
insn);
|
2009-02-16 13:41:36 +03:00
|
|
|
return -URC_FAILURE;
|
|
|
|
}
|
|
|
|
|
2014-02-24 14:17:36 +04:00
|
|
|
ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
2009-02-16 13:41:36 +03:00
|
|
|
} else if ((insn & 0xf0) == 0x90 &&
|
2021-10-05 10:15:37 +03:00
|
|
|
(insn & 0x0d) != 0x0d) {
|
2009-02-16 13:41:36 +03:00
|
|
|
ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
|
2021-10-05 10:15:37 +03:00
|
|
|
ctrl->sp_low = ctrl->vrs[SP];
|
|
|
|
ctrl->sp_high = ALIGN(ctrl->sp_low, THREAD_SIZE);
|
|
|
|
} else if ((insn & 0xf0) == 0xa0) {
|
2014-02-24 14:17:36 +04:00
|
|
|
ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
2009-02-16 13:41:36 +03:00
|
|
|
} else if (insn == 0xb0) {
|
2009-06-19 19:42:11 +04:00
|
|
|
if (ctrl->vrs[PC] == 0)
|
|
|
|
ctrl->vrs[PC] = ctrl->vrs[LR];
|
2009-02-16 13:41:36 +03:00
|
|
|
/* no further processing */
|
|
|
|
ctrl->entries = 0;
|
|
|
|
} else if (insn == 0xb1) {
|
|
|
|
unsigned long mask = unwind_get_byte(ctrl);
|
|
|
|
|
|
|
|
if (mask == 0 || mask & 0xf0) {
|
2014-09-16 23:41:43 +04:00
|
|
|
pr_warn("unwind: Spare encoding %04lx\n",
|
|
|
|
(insn << 8) | mask);
|
2009-02-16 13:41:36 +03:00
|
|
|
return -URC_FAILURE;
|
|
|
|
}
|
|
|
|
|
2014-02-24 14:17:36 +04:00
|
|
|
ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
2009-02-16 13:41:36 +03:00
|
|
|
} else if (insn == 0xb2) {
|
|
|
|
unsigned long uleb128 = unwind_get_byte(ctrl);
|
|
|
|
|
|
|
|
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
|
|
|
|
} else {
|
2014-09-16 23:41:43 +04:00
|
|
|
pr_warn("unwind: Unhandled instruction %02lx\n", insn);
|
2009-02-16 13:41:36 +03:00
|
|
|
return -URC_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
|
|
|
|
ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
|
|
|
|
|
2014-02-24 14:17:36 +04:00
|
|
|
error:
|
|
|
|
return ret;
|
2009-02-16 13:41:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unwind a single frame starting with *sp for the symbol at *pc. It
|
|
|
|
* updates the *pc and *sp with the new values.
|
|
|
|
*/
|
|
|
|
int unwind_frame(struct stackframe *frame)
|
|
|
|
{
|
2011-12-05 12:39:59 +04:00
|
|
|
const struct unwind_idx *idx;
|
2009-02-16 13:41:36 +03:00
|
|
|
struct unwind_ctrl_block ctrl;
|
|
|
|
|
2014-02-24 14:17:36 +04:00
|
|
|
/* store the highest address on the stack to avoid crossing it*/
|
2021-10-05 10:15:37 +03:00
|
|
|
ctrl.sp_low = frame->sp;
|
2021-09-23 10:15:53 +03:00
|
|
|
ctrl.sp_high = ALIGN(ctrl.sp_low - THREAD_SIZE, THREAD_ALIGN)
|
|
|
|
+ THREAD_SIZE;
|
2009-02-16 13:41:36 +03:00
|
|
|
|
|
|
|
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
|
|
|
|
frame->pc, frame->lr, frame->sp);
|
|
|
|
|
|
|
|
idx = unwind_find_idx(frame->pc);
|
|
|
|
if (!idx) {
|
ARM: 9183/1: unwind: avoid spurious warnings on bogus code addresses
Corentin reports that since commit 538b9265c063 ("ARM: unwind: track
location of LR value in stack frame"), numerous spurious warnings are
emitted into the kernel log:
[ 0.000000] unwind: Index not found c0f0c440
[ 0.000000] unwind: Index not found 00000000
[ 0.000000] unwind: Index not found c0f0c440
[ 0.000000] unwind: Index not found 00000000
This is due to the fact that the commit in question removes a check
whether the PC value in the unwound frame is actually a kernel text
address, on the assumption that such an address will not be associated
with valid unwind data to begin with, which is checked right after.
The reason for removing this check was that unwind_frame() will be
called by the ftrace graph tracer code, which means that it can no
longer be safely instrumented itself, or any code that it calls, as it
could cause infinite recursion.
In order to prevent the spurious diagnostics, let's add back the call to
kernel_text_address(), but this time, only call it if no unwind data
could be found for the address in question. This is more efficient for
the common successful case, and should avoid any unintended recursion,
considering that kernel_text_address() will only be called if no unwind
data was found.
Cc: Corentin Labbe <clabbe.montjoie@gmail.com>
Fixes: 538b9265c063 ("ARM: unwind: track location of LR value in stack frame")
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2022-03-02 14:36:56 +03:00
|
|
|
if (frame->pc && kernel_text_address(frame->pc))
|
|
|
|
pr_warn("unwind: Index not found %08lx\n", frame->pc);
|
2009-02-16 13:41:36 +03:00
|
|
|
return -URC_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrl.vrs[FP] = frame->fp;
|
|
|
|
ctrl.vrs[SP] = frame->sp;
|
|
|
|
ctrl.vrs[LR] = frame->lr;
|
|
|
|
ctrl.vrs[PC] = 0;
|
|
|
|
|
|
|
|
if (idx->insn == 1)
|
|
|
|
/* can't unwind */
|
|
|
|
return -URC_FAILURE;
|
ARM: unwind: disregard unwind info before stack frame is set up
When unwinding the stack from a stack overflow, we are likely to start
from a stack push instruction, given that this is the most common way to
grow the stack for compiler emitted code. This push instruction rarely
appears anywhere else than at offset 0x0 of the function, and if it
doesn't, the compiler tends to split up the unwind annotations, given
that the stack frame layout is apparently not the same throughout the
function.
This means that, in the general case, if the frame's PC points at the
first instruction covered by a certain unwind entry, there is no way the
stack frame that the unwind entry describes could have been created yet,
and so we are still on the stack frame of the caller in that case. So
treat this as a special case, and return with the new PC taken from the
frame's LR, without applying the unwind transformations to the virtual
register set.
This permits us to unwind the call stack on stack overflow when the
overflow was caused by a stack push on function entry.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Keith Packard <keithpac@amazon.com>
Tested-by: Marc Zyngier <maz@kernel.org>
Tested-by: Vladimir Murzin <vladimir.murzin@arm.com> # ARMv7M
2021-10-04 09:46:38 +03:00
|
|
|
else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
|
|
|
|
/*
|
|
|
|
* Unwinding is tricky when we're halfway through the prologue,
|
|
|
|
* since the stack frame that the unwinder expects may not be
|
|
|
|
* fully set up yet. However, one thing we do know for sure is
|
|
|
|
* that if we are unwinding from the very first instruction of
|
|
|
|
* a function, we are still effectively in the stack frame of
|
|
|
|
* the caller, and the unwind info has no relevance yet.
|
|
|
|
*/
|
|
|
|
if (frame->pc == frame->lr)
|
|
|
|
return -URC_FAILURE;
|
|
|
|
frame->sp_low = frame->sp;
|
|
|
|
frame->pc = frame->lr;
|
|
|
|
return URC_OK;
|
|
|
|
} else if ((idx->insn & 0x80000000) == 0)
|
2009-02-16 13:41:36 +03:00
|
|
|
/* prel31 to the unwind table */
|
|
|
|
ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
|
|
|
|
else if ((idx->insn & 0xff000000) == 0x80000000)
|
|
|
|
/* only personality routine 0 supported in the index */
|
|
|
|
ctrl.insn = &idx->insn;
|
|
|
|
else {
|
2014-09-16 23:41:43 +04:00
|
|
|
pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n",
|
|
|
|
idx->insn, idx);
|
2009-02-16 13:41:36 +03:00
|
|
|
return -URC_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check the personality routine */
|
|
|
|
if ((*ctrl.insn & 0xff000000) == 0x80000000) {
|
|
|
|
ctrl.byte = 2;
|
|
|
|
ctrl.entries = 1;
|
|
|
|
} else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
|
|
|
|
ctrl.byte = 1;
|
|
|
|
ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
|
|
|
|
} else {
|
2014-09-16 23:41:43 +04:00
|
|
|
pr_warn("unwind: Unsupported personality routine %08lx at %p\n",
|
|
|
|
*ctrl.insn, ctrl.insn);
|
2009-02-16 13:41:36 +03:00
|
|
|
return -URC_FAILURE;
|
|
|
|
}
|
|
|
|
|
2014-02-24 14:17:36 +04:00
|
|
|
ctrl.check_each_pop = 0;
|
|
|
|
|
2009-02-16 13:41:36 +03:00
|
|
|
while (ctrl.entries > 0) {
|
2014-02-24 14:17:36 +04:00
|
|
|
int urc;
|
|
|
|
if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
|
|
|
|
ctrl.check_each_pop = 1;
|
|
|
|
urc = unwind_exec_insn(&ctrl);
|
2009-02-16 13:41:36 +03:00
|
|
|
if (urc < 0)
|
|
|
|
return urc;
|
2021-10-05 10:15:37 +03:00
|
|
|
if (ctrl.vrs[SP] < ctrl.sp_low || ctrl.vrs[SP] > ctrl.sp_high)
|
2009-06-19 19:42:11 +04:00
|
|
|
return -URC_FAILURE;
|
2009-02-16 13:41:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrl.vrs[PC] == 0)
|
|
|
|
ctrl.vrs[PC] = ctrl.vrs[LR];
|
|
|
|
|
2009-06-19 19:42:11 +04:00
|
|
|
/* check for infinite loop */
|
2019-12-22 19:39:40 +03:00
|
|
|
if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
|
2009-06-19 19:42:11 +04:00
|
|
|
return -URC_FAILURE;
|
|
|
|
|
2009-02-16 13:41:36 +03:00
|
|
|
frame->fp = ctrl.vrs[FP];
|
|
|
|
frame->sp = ctrl.vrs[SP];
|
|
|
|
frame->lr = ctrl.vrs[LR];
|
|
|
|
frame->pc = ctrl.vrs[PC];
|
2021-10-05 10:15:39 +03:00
|
|
|
frame->sp_low = ctrl.sp_low;
|
ARM: unwind: track location of LR value in stack frame
The ftrace graph tracer needs to override the return address of an
instrumented function, in order to install a hook that gets invoked when
the function returns again.
Currently, we only support this when building for ARM using GCC with
frame pointers, as in this case, it is guaranteed that the function will
reload LR from [FP, #-4] in all cases, and we can simply pass that
address to the ftrace code.
In order to support this for configurations that rely on the EABI
unwinder, such as Thumb2 builds, make the unwinder keep track of the
address from which LR was unwound, permitting ftrace to make use of this
in a subsequent patch.
Drop the call to is_kernel_text_address(), which is problematic in terms
of ftrace recursion, given that it may be instrumented itself. The call
is redundant anyway, as no unwind directives will be found unless the PC
points to memory that is known to contain executable code.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
2022-01-24 18:49:09 +03:00
|
|
|
frame->lr_addr = ctrl.lr_addr;
|
2009-02-16 13:41:36 +03:00
|
|
|
|
|
|
|
return URC_OK;
|
|
|
|
}
|
|
|
|
|
2020-06-09 07:30:10 +03:00
|
|
|
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
|
|
|
const char *loglvl)
|
2009-02-16 13:41:36 +03:00
|
|
|
{
|
|
|
|
struct stackframe frame;
|
|
|
|
|
|
|
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
|
|
|
|
|
|
|
if (!tsk)
|
|
|
|
tsk = current;
|
|
|
|
|
|
|
|
if (regs) {
|
2014-06-03 22:49:14 +04:00
|
|
|
arm_get_current_stackframe(regs, &frame);
|
2010-03-04 17:33:16 +03:00
|
|
|
/* PC might be corrupted, use LR in that case. */
|
2014-06-03 22:49:14 +04:00
|
|
|
if (!kernel_text_address(regs->ARM_pc))
|
|
|
|
frame.pc = regs->ARM_lr;
|
2009-02-16 13:41:36 +03:00
|
|
|
} else if (tsk == current) {
|
|
|
|
frame.fp = (unsigned long)__builtin_frame_address(0);
|
2014-09-27 03:31:10 +04:00
|
|
|
frame.sp = current_stack_pointer;
|
2009-02-16 13:41:36 +03:00
|
|
|
frame.lr = (unsigned long)__builtin_return_address(0);
|
|
|
|
frame.pc = (unsigned long)unwind_backtrace;
|
|
|
|
} else {
|
|
|
|
/* task blocked in __switch_to */
|
|
|
|
frame.fp = thread_saved_fp(tsk);
|
|
|
|
frame.sp = thread_saved_sp(tsk);
|
|
|
|
/*
|
|
|
|
* The function calling __switch_to cannot be a leaf function
|
|
|
|
* so LR is recovered from the stack.
|
|
|
|
*/
|
|
|
|
frame.lr = 0;
|
|
|
|
frame.pc = thread_saved_pc(tsk);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
int urc;
|
|
|
|
unsigned long where = frame.pc;
|
|
|
|
|
|
|
|
urc = unwind_frame(&frame);
|
|
|
|
if (urc < 0)
|
|
|
|
break;
|
2021-10-05 10:15:39 +03:00
|
|
|
if (in_entry_text(where))
|
|
|
|
dump_mem(loglvl, "Exception stack", frame.sp_low,
|
|
|
|
frame.sp_low + sizeof(struct pt_regs));
|
|
|
|
|
|
|
|
dump_backtrace_entry(where, frame.pc, 0, loglvl);
|
2009-02-16 13:41:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
|
|
|
|
unsigned long text_addr,
|
|
|
|
unsigned long text_size)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
|
|
|
|
|
|
|
|
pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
|
|
|
|
text_addr, text_size);
|
|
|
|
|
|
|
|
if (!tab)
|
|
|
|
return tab;
|
|
|
|
|
2011-12-05 12:39:59 +04:00
|
|
|
tab->start = (const struct unwind_idx *)start;
|
|
|
|
tab->stop = (const struct unwind_idx *)(start + size);
|
|
|
|
tab->origin = unwind_find_origin(tab->start, tab->stop);
|
2009-02-16 13:41:36 +03:00
|
|
|
tab->begin_addr = text_addr;
|
|
|
|
tab->end_addr = text_addr + text_size;
|
|
|
|
|
2019-02-13 19:14:42 +03:00
|
|
|
raw_spin_lock_irqsave(&unwind_lock, flags);
|
2009-02-16 13:41:36 +03:00
|
|
|
list_add_tail(&tab->list, &unwind_tables);
|
2019-02-13 19:14:42 +03:00
|
|
|
raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
2009-02-16 13:41:36 +03:00
|
|
|
|
|
|
|
return tab;
|
|
|
|
}
|
|
|
|
|
|
|
|
void unwind_table_del(struct unwind_table *tab)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!tab)
|
|
|
|
return;
|
|
|
|
|
2019-02-13 19:14:42 +03:00
|
|
|
raw_spin_lock_irqsave(&unwind_lock, flags);
|
2009-02-16 13:41:36 +03:00
|
|
|
list_del(&tab->list);
|
2019-02-13 19:14:42 +03:00
|
|
|
raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
2009-02-16 13:41:36 +03:00
|
|
|
|
|
|
|
kfree(tab);
|
|
|
|
}
|