2020-03-20 13:20:18 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
#ifndef _POWERPC_PERF_CALLCHAIN_H
|
|
|
|
#define _POWERPC_PERF_CALLCHAIN_H
|
|
|
|
|
|
|
|
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
|
|
|
struct pt_regs *regs);
|
|
|
|
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
|
|
|
struct pt_regs *regs);
|
|
|
|
|
|
|
|
static inline bool invalid_user_sp(unsigned long sp)
|
|
|
|
{
|
|
|
|
unsigned long mask = is_32bit_task() ? 3 : 7;
|
|
|
|
unsigned long top = STACK_TOP - (is_32bit_task() ? 16 : 32);
|
|
|
|
|
|
|
|
return (!sp || (sp & mask) || (sp > top));
|
|
|
|
}
|
|
|
|
|
2020-04-07 00:00:22 +03:00
|
|
|
/*
|
|
|
|
* On 32-bit we just access the address and let hash_page create a
|
|
|
|
* HPTE if necessary, so there is no need to fall back to reading
|
|
|
|
* the page tables. Since this is called at interrupt level,
|
|
|
|
* do_page_fault() won't treat a DSI as a page fault.
|
|
|
|
*/
|
|
|
|
static inline int __read_user_stack(const void __user *ptr, void *ret,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
unsigned long addr = (unsigned long)ptr;
|
|
|
|
|
|
|
|
if (addr > TASK_SIZE - size || (addr & (size - 1)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
powerpc/64s/hash: Make hash faults work in NMI context
Hash faults are not resoved in NMI context, instead causing the access
to fail. This is done because perf interrupts can get backtraces
including walking the user stack, and taking a hash fault on those could
deadlock on the HPTE lock if the perf interrupt hits while the same HPTE
lock is being held by the hash fault code. The user-access for the stack
walking will notice the access failed and deal with that in the perf
code.
The reason to allow perf interrupts in is to better profile hash faults.
The problem with this is any hash fault on a kernel access that happens
in NMI context will crash, because kernel accesses must not fail.
Hard lockups, system reset, machine checks that access vmalloc space
including modules and including stack backtracing and symbol lookup in
modules, per-cpu data, etc could all run into this problem.
Fix this by disallowing perf interrupts in the hash fault code (the
direct hash fault is covered by MSR[EE]=0 so the PMI disable just needs
to extend to the preload case). This simplifies the tricky logic in hash
faults and perf, at the cost of reduced profiling of hash faults.
perf can still latch addresses when interrupts are disabled, it just
won't get the stack trace at that point, so it would still find hot
spots, just sometimes with confusing stack chains.
An alternative could be to allow perf interrupts here but always do the
slowpath stack walk if we are in nmi context, but that slows down all
perf interrupt stack walking on hash though and it does not remove as
much tricky code.
Reported-by: Laurent Dufour <ldufour@linux.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Tested-by: Laurent Dufour <ldufour@linux.ibm.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220204035348.545435-1-npiggin@gmail.com
2022-02-04 06:53:48 +03:00
|
|
|
return copy_from_user_nofault(ret, ptr, size);
|
2020-04-07 00:00:22 +03:00
|
|
|
}
|
|
|
|
|
2020-03-20 13:20:18 +03:00
|
|
|
#endif /* _POWERPC_PERF_CALLCHAIN_H */
|