Merge branch 'for-next/uaccess' into for-next/core
* for-next/uaccess: : uaccess routines clean-up and set_fs() removal arm64: mark __system_matches_cap as __maybe_unused arm64: uaccess: remove vestigal UAO support arm64: uaccess: remove redundant PAN toggling arm64: uaccess: remove addr_limit_user_check() arm64: uaccess: remove set_fs() arm64: uaccess cleanup macro naming arm64: uaccess: split user/kernel routines arm64: uaccess: refactor __{get,put}_user arm64: uaccess: simplify __copy_user_flushcache() arm64: uaccess: rename privileged uaccess routines arm64: sdei: explicitly simulate PAN/UAO entry arm64: sdei: move uaccess logic to arch/arm64/ arm64: head.S: always initialize PSTATE arm64: head.S: cleanup SCTLR_ELx initialization arm64: head.S: rename el2_setup -> init_kernel_el arm64: add C wrappers for SET_PSTATE_*() arm64: ensure ERET from kthread is illegal
This commit is contained in:
Коммит
e0f7a8d5e8
|
@ -195,7 +195,6 @@ config ARM64
|
|||
select PCI_SYSCALL if PCI
|
||||
select POWER_RESET
|
||||
select POWER_SUPPLY
|
||||
select SET_FS
|
||||
select SPARSE_IRQ
|
||||
select SWIOTLB
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
|
@ -1428,27 +1427,6 @@ endmenu
|
|||
|
||||
menu "ARMv8.2 architectural features"
|
||||
|
||||
config ARM64_UAO
|
||||
bool "Enable support for User Access Override (UAO)"
|
||||
default y
|
||||
help
|
||||
User Access Override (UAO; part of the ARMv8.2 Extensions)
|
||||
causes the 'unprivileged' variant of the load/store instructions to
|
||||
be overridden to be privileged.
|
||||
|
||||
This option changes get_user() and friends to use the 'unprivileged'
|
||||
variant of the load/store instructions. This ensures that user-space
|
||||
really did have access to the supplied memory. When addr_limit is
|
||||
set to kernel memory the UAO bit will be set, allowing privileged
|
||||
access to kernel memory.
|
||||
|
||||
Choosing this option will cause copy_to_user() et al to use user-space
|
||||
memory permissions.
|
||||
|
||||
The feature is detected at runtime, the kernel will use the
|
||||
regular load/store instructions if the cpu does not implement the
|
||||
feature.
|
||||
|
||||
config ARM64_PMEM
|
||||
bool "Enable support for persistent memory"
|
||||
select ARCH_HAS_PMEM_API
|
||||
|
|
|
@ -59,62 +59,32 @@ alternative_else_nop_endif
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Generate the assembly for UAO alternatives with exception table entries.
|
||||
* Generate the assembly for LDTR/STTR with exception table entries.
|
||||
* This is complicated as there is no post-increment or pair versions of the
|
||||
* unprivileged instructions, and USER() only works for single instructions.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_UAO
|
||||
.macro uao_ldp l, reg1, reg2, addr, post_inc
|
||||
alternative_if_not ARM64_HAS_UAO
|
||||
8888: ldp \reg1, \reg2, [\addr], \post_inc;
|
||||
8889: nop;
|
||||
nop;
|
||||
alternative_else
|
||||
ldtr \reg1, [\addr];
|
||||
ldtr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
alternative_endif
|
||||
.macro user_ldp l, reg1, reg2, addr, post_inc
|
||||
8888: ldtr \reg1, [\addr];
|
||||
8889: ldtr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
_asm_extable 8889b,\l;
|
||||
.endm
|
||||
|
||||
.macro uao_stp l, reg1, reg2, addr, post_inc
|
||||
alternative_if_not ARM64_HAS_UAO
|
||||
8888: stp \reg1, \reg2, [\addr], \post_inc;
|
||||
8889: nop;
|
||||
nop;
|
||||
alternative_else
|
||||
sttr \reg1, [\addr];
|
||||
sttr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
alternative_endif
|
||||
.macro user_stp l, reg1, reg2, addr, post_inc
|
||||
8888: sttr \reg1, [\addr];
|
||||
8889: sttr \reg2, [\addr, #8];
|
||||
add \addr, \addr, \post_inc;
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
_asm_extable 8889b,\l;
|
||||
.endm
|
||||
|
||||
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
|
||||
alternative_if_not ARM64_HAS_UAO
|
||||
8888: \inst \reg, [\addr], \post_inc;
|
||||
nop;
|
||||
alternative_else
|
||||
\alt_inst \reg, [\addr];
|
||||
add \addr, \addr, \post_inc;
|
||||
alternative_endif
|
||||
.macro user_ldst l, inst, reg, addr, post_inc
|
||||
8888: \inst \reg, [\addr];
|
||||
add \addr, \addr, \post_inc;
|
||||
|
||||
_asm_extable 8888b,\l;
|
||||
.endm
|
||||
#else
|
||||
.macro uao_ldp l, reg1, reg2, addr, post_inc
|
||||
USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
|
||||
.endm
|
||||
.macro uao_stp l, reg1, reg2, addr, post_inc
|
||||
USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
|
||||
.endm
|
||||
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
|
||||
USER(\l, \inst \reg, [\addr], \post_inc)
|
||||
.endm
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
#define ARM64_WORKAROUND_CAVIUM_23154 6
|
||||
#define ARM64_WORKAROUND_834220 7
|
||||
#define ARM64_HAS_NO_HW_PREFETCH 8
|
||||
#define ARM64_HAS_UAO 9
|
||||
#define ARM64_ALT_PAN_NOT_UAO 10
|
||||
#define ARM64_HAS_VIRT_HOST_EXTN 11
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 12
|
||||
#define ARM64_HAS_32BIT_EL0 13
|
||||
|
|
|
@ -667,10 +667,16 @@ static __always_inline bool system_supports_fpsimd(void)
|
|||
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
|
||||
}
|
||||
|
||||
static inline bool system_uses_hw_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PAN) &&
|
||||
cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
static inline bool system_uses_ttbr0_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
!system_uses_hw_pan();
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_sve(void)
|
||||
|
@ -762,6 +768,13 @@ static inline bool cpu_has_hw_af(void)
|
|||
ID_AA64MMFR1_HADBS_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool cpu_has_pan(void)
|
||||
{
|
||||
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
return cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_PAN_SHIFT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_AMU_EXTN
|
||||
/* Check whether the cpu supports the Activity Monitors Unit (AMU) */
|
||||
extern bool cpu_has_amu_feat(int cpu);
|
||||
|
|
|
@ -10,6 +10,5 @@
|
|||
#include <linux/sched.h>
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
void uao_thread_switch(struct task_struct *next);
|
||||
|
||||
#endif /* __ASM_EXEC_H */
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
do { \
|
||||
unsigned int loops = FUTEX_MAX_LOOPS; \
|
||||
\
|
||||
uaccess_enable(); \
|
||||
uaccess_enable_privileged(); \
|
||||
asm volatile( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %w1, %2\n" \
|
||||
|
@ -39,7 +39,7 @@ do { \
|
|||
"+r" (loops) \
|
||||
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
|
||||
: "memory"); \
|
||||
uaccess_disable(); \
|
||||
uaccess_disable_privileged(); \
|
||||
} while (0)
|
||||
|
||||
static inline int
|
||||
|
@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
|||
return -EFAULT;
|
||||
|
||||
uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
uaccess_enable();
|
||||
uaccess_enable_privileged();
|
||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %w1, %2\n"
|
||||
|
@ -118,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
|||
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
|
||||
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
|
||||
: "memory");
|
||||
uaccess_disable();
|
||||
uaccess_disable_privileged();
|
||||
|
||||
if (!ret)
|
||||
*uval = val;
|
||||
|
|
|
@ -8,9 +8,6 @@
|
|||
#ifndef __ASM_PROCESSOR_H
|
||||
#define __ASM_PROCESSOR_H
|
||||
|
||||
#define KERNEL_DS UL(-1)
|
||||
#define USER_DS ((UL(1) << VA_BITS) - 1)
|
||||
|
||||
/*
|
||||
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
|
||||
* no point in shifting all network buffers by 2 bytes just to make some IP
|
||||
|
@ -48,6 +45,7 @@
|
|||
|
||||
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
|
||||
#define TASK_SIZE_64 (UL(1) << vabits_actual)
|
||||
#define TASK_SIZE_MAX (UL(1) << VA_BITS)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
|
||||
|
|
|
@ -16,6 +16,11 @@
|
|||
#define CurrentEL_EL1 (1 << 2)
|
||||
#define CurrentEL_EL2 (2 << 2)
|
||||
|
||||
#define INIT_PSTATE_EL1 \
|
||||
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h)
|
||||
#define INIT_PSTATE_EL2 \
|
||||
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
|
||||
|
||||
/*
|
||||
* PMR values used to mask/unmask interrupts.
|
||||
*
|
||||
|
@ -188,8 +193,7 @@ struct pt_regs {
|
|||
s32 syscallno;
|
||||
u32 unused2;
|
||||
#endif
|
||||
|
||||
u64 orig_addr_limit;
|
||||
u64 sdei_ttbr1;
|
||||
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
|
||||
u64 pmr_save;
|
||||
u64 stackframe[2];
|
||||
|
|
|
@ -98,6 +98,10 @@
|
|||
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
|
||||
#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
|
||||
|
||||
#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
|
||||
#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
|
||||
#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
|
||||
|
||||
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
|
||||
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
|
||||
|
||||
|
@ -578,6 +582,9 @@
|
|||
#define ENDIAN_SET_EL2 0
|
||||
#endif
|
||||
|
||||
#define INIT_SCTLR_EL2_MMU_OFF \
|
||||
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
|
||||
|
||||
/* SCTLR_EL1 specific flags. */
|
||||
#define SCTLR_EL1_ATA0 (BIT(42))
|
||||
|
||||
|
@ -611,12 +618,15 @@
|
|||
#define ENDIAN_SET_EL1 0
|
||||
#endif
|
||||
|
||||
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
|
||||
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
|
||||
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
|
||||
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
|
||||
SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\
|
||||
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
|
||||
#define INIT_SCTLR_EL1_MMU_OFF \
|
||||
(ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
|
||||
|
||||
#define INIT_SCTLR_EL1_MMU_ON \
|
||||
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
|
||||
SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
|
||||
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
|
||||
SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
|
||||
SCTLR_EL1_RES1)
|
||||
|
||||
/* MAIR_ELx memory attributes (used by Linux) */
|
||||
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
|
||||
|
|
|
@ -18,14 +18,11 @@ struct task_struct;
|
|||
#include <asm/stack_pointer.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
typedef unsigned long mm_segment_t;
|
||||
|
||||
/*
|
||||
* low level task data that entry.S needs immediate access to.
|
||||
*/
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
mm_segment_t addr_limit; /* address limit */
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
u64 ttbr0; /* saved TTBR0_EL1 */
|
||||
#endif
|
||||
|
@ -66,8 +63,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
|
||||
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
|
||||
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
|
||||
#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
|
||||
#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
|
||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
|
||||
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
||||
|
@ -93,7 +89,6 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_SVE (1 << TIF_SVE)
|
||||
|
@ -101,7 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
||||
_TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT)
|
||||
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT)
|
||||
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
||||
|
@ -119,7 +114,6 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||
{ \
|
||||
.flags = _TIF_FOREIGN_FPSTATE, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
INIT_SCS \
|
||||
}
|
||||
|
||||
|
|
|
@ -24,44 +24,18 @@
|
|||
#include <asm/memory.h>
|
||||
#include <asm/extable.h>
|
||||
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current_thread_info()->addr_limit = fs;
|
||||
|
||||
/*
|
||||
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||
* the wrong address limit to access_ok under speculation.
|
||||
*/
|
||||
spec_bar();
|
||||
|
||||
/* On user-mode return, check fs is correct */
|
||||
set_thread_flag(TIF_FSCHECK);
|
||||
|
||||
/*
|
||||
* Enable/disable UAO so that copy_to_user() etc can access
|
||||
* kernel memory with the unprivileged instructions.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
|
||||
else
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
|
||||
CONFIG_ARM64_UAO));
|
||||
}
|
||||
|
||||
#define uaccess_kernel() (get_fs() == KERNEL_DS)
|
||||
#define HAVE_GET_KERNEL_NOFAULT
|
||||
|
||||
/*
|
||||
* Test whether a block of memory is a valid user space address.
|
||||
* Returns 1 if the range is valid, 0 otherwise.
|
||||
*
|
||||
* This is equivalent to the following test:
|
||||
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
||||
* (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
|
||||
*/
|
||||
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
|
||||
{
|
||||
unsigned long ret, limit = current_thread_info()->addr_limit;
|
||||
unsigned long ret, limit = TASK_SIZE_MAX - 1;
|
||||
|
||||
/*
|
||||
* Asynchronous I/O running in a kernel thread does not have the
|
||||
|
@ -94,7 +68,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
|
|||
}
|
||||
|
||||
#define access_ok(addr, size) __range_ok(addr, size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
#define _ASM_EXTABLE(from, to) \
|
||||
" .pushsection __ex_table, \"a\"\n" \
|
||||
|
@ -186,47 +159,26 @@ static inline void __uaccess_enable_hw_pan(void)
|
|||
CONFIG_ARM64_PAN));
|
||||
}
|
||||
|
||||
#define __uaccess_disable(alt) \
|
||||
do { \
|
||||
if (!uaccess_ttbr0_disable()) \
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
|
||||
CONFIG_ARM64_PAN)); \
|
||||
} while (0)
|
||||
|
||||
#define __uaccess_enable(alt) \
|
||||
do { \
|
||||
if (!uaccess_ttbr0_enable()) \
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
|
||||
CONFIG_ARM64_PAN)); \
|
||||
} while (0)
|
||||
|
||||
static inline void uaccess_disable(void)
|
||||
static inline void uaccess_disable_privileged(void)
|
||||
{
|
||||
__uaccess_disable(ARM64_HAS_PAN);
|
||||
if (uaccess_ttbr0_disable())
|
||||
return;
|
||||
|
||||
__uaccess_enable_hw_pan();
|
||||
}
|
||||
|
||||
static inline void uaccess_enable(void)
|
||||
static inline void uaccess_enable_privileged(void)
|
||||
{
|
||||
__uaccess_enable(ARM64_HAS_PAN);
|
||||
if (uaccess_ttbr0_enable())
|
||||
return;
|
||||
|
||||
__uaccess_disable_hw_pan();
|
||||
}
|
||||
|
||||
/*
|
||||
* These functions are no-ops when UAO is present.
|
||||
*/
|
||||
static inline void uaccess_disable_not_uao(void)
|
||||
{
|
||||
__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
|
||||
}
|
||||
|
||||
static inline void uaccess_enable_not_uao(void)
|
||||
{
|
||||
__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the
|
||||
* current addr_limit. In case the pointer is tagged (has the top byte set),
|
||||
* untag the pointer before checking.
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum
|
||||
* user address. In case the pointer is tagged (has the top byte set), untag
|
||||
* the pointer before checking.
|
||||
*/
|
||||
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
||||
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
|
@ -237,7 +189,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|||
" bics xzr, %3, %2\n"
|
||||
" csel %0, %1, xzr, eq\n"
|
||||
: "=&r" (safe_ptr)
|
||||
: "r" (ptr), "r" (current_thread_info()->addr_limit),
|
||||
: "r" (ptr), "r" (TASK_SIZE_MAX - 1),
|
||||
"r" (untagged_addr(ptr))
|
||||
: "cc");
|
||||
|
||||
|
@ -253,10 +205,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|||
* The "__xxx_error" versions set the third argument to -EFAULT if an error
|
||||
* occurs, and leave it unchanged on success.
|
||||
*/
|
||||
#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
||||
#define __get_mem_asm(load, reg, x, addr, err) \
|
||||
asm volatile( \
|
||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
||||
alt_instr " " reg "1, [%2]\n", feature) \
|
||||
"1: " load " " reg "1, [%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup, \"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -268,35 +219,36 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|||
: "+r" (err), "=&r" (x) \
|
||||
: "r" (addr), "i" (-EFAULT))
|
||||
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
#define __raw_get_mem(ldr, x, ptr, err) \
|
||||
do { \
|
||||
unsigned long __gu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_enable_not_uao(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
uaccess_disable_not_uao(); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_get_mem("ldtr", x, ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
|
@ -318,10 +270,19 @@ do { \
|
|||
|
||||
#define get_user __get_user
|
||||
|
||||
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
int __gkn_err = 0; \
|
||||
\
|
||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||
(__force type *)(src), __gkn_err); \
|
||||
if (unlikely(__gkn_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
|
||||
#define __put_mem_asm(store, reg, x, addr, err) \
|
||||
asm volatile( \
|
||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
||||
alt_instr " " reg "1, [%2]\n", feature) \
|
||||
"1: " store " " reg "1, [%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -332,32 +293,33 @@ do { \
|
|||
: "+r" (err) \
|
||||
: "r" (x), "r" (addr), "i" (-EFAULT))
|
||||
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
#define __raw_put_mem(str, x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_enable_not_uao(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
|
||||
(err), ARM64_HAS_UAO); \
|
||||
__put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
uaccess_disable_not_uao(); \
|
||||
} while (0)
|
||||
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_put_mem("sttr", x, ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
|
@ -381,14 +343,24 @@ do { \
|
|||
|
||||
#define put_user __put_user
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
int __pkn_err = 0; \
|
||||
\
|
||||
__raw_put_mem("str", *((type *)(src)), \
|
||||
(__force type *)(dst), __pkn_err); \
|
||||
if (unlikely(__pkn_err)) \
|
||||
goto err_label; \
|
||||
} while(0)
|
||||
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_from_user(to, from, n) \
|
||||
({ \
|
||||
unsigned long __acfu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__acfu_ret = __arch_copy_from_user((to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
__acfu_ret; \
|
||||
})
|
||||
|
||||
|
@ -396,10 +368,10 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
|
|||
#define raw_copy_to_user(to, from, n) \
|
||||
({ \
|
||||
unsigned long __actu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
|
||||
(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
__actu_ret; \
|
||||
})
|
||||
|
||||
|
@ -407,10 +379,10 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
|
|||
#define raw_copy_in_user(to, from, n) \
|
||||
({ \
|
||||
unsigned long __aciu_ret; \
|
||||
uaccess_enable_not_uao(); \
|
||||
uaccess_ttbr0_enable(); \
|
||||
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
uaccess_disable_not_uao(); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
__aciu_ret; \
|
||||
})
|
||||
|
||||
|
@ -421,9 +393,9 @@ extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned lo
|
|||
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (access_ok(to, n)) {
|
||||
uaccess_enable_not_uao();
|
||||
uaccess_ttbr0_enable();
|
||||
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
||||
uaccess_disable_not_uao();
|
||||
uaccess_ttbr0_disable();
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ static void __init register_insn_emulation_sysctl(void)
|
|||
|
||||
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
|
||||
do { \
|
||||
uaccess_enable(); \
|
||||
uaccess_enable_privileged(); \
|
||||
__asm__ __volatile__( \
|
||||
" mov %w3, %w7\n" \
|
||||
"0: ldxr"B" %w2, [%4]\n" \
|
||||
|
@ -302,7 +302,7 @@ do { \
|
|||
"i" (-EFAULT), \
|
||||
"i" (__SWP_LL_SC_LOOPS) \
|
||||
: "memory"); \
|
||||
uaccess_disable(); \
|
||||
uaccess_disable_privileged(); \
|
||||
} while (0)
|
||||
|
||||
#define __user_swp_asm(data, addr, res, temp, temp2) \
|
||||
|
|
|
@ -30,7 +30,6 @@ int main(void)
|
|||
BLANK();
|
||||
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||||
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
||||
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
||||
#endif
|
||||
|
@ -70,7 +69,7 @@ int main(void)
|
|||
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
|
||||
DEFINE(S_PC, offsetof(struct pt_regs, pc));
|
||||
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
|
||||
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
|
||||
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
|
||||
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
|
||||
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
|
|
|
@ -153,10 +153,6 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
|
|||
.width = 0, \
|
||||
}
|
||||
|
||||
/* meta feature for alternatives */
|
||||
static bool __maybe_unused
|
||||
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
|
||||
|
||||
static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
|
||||
|
||||
static bool __system_matches_cap(unsigned int n);
|
||||
|
@ -1605,7 +1601,7 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
|
|||
WARN_ON_ONCE(in_interrupt());
|
||||
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
|
||||
asm(SET_PSTATE_PAN(1));
|
||||
set_pstate_pan(1);
|
||||
}
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
|
||||
|
@ -1775,28 +1771,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
.matches = has_no_hw_prefetch,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_UAO
|
||||
{
|
||||
.desc = "User Access Override",
|
||||
.capability = ARM64_HAS_UAO,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.sys_reg = SYS_ID_AA64MMFR2_EL1,
|
||||
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
|
||||
.min_field_value = 1,
|
||||
/*
|
||||
* We rely on stop_machine() calling uao_thread_switch() to set
|
||||
* UAO immediately after patching.
|
||||
*/
|
||||
},
|
||||
#endif /* CONFIG_ARM64_UAO */
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
{
|
||||
.capability = ARM64_ALT_PAN_NOT_UAO,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = cpufeature_pan_not_uao,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
{
|
||||
.desc = "Virtualization Host Extensions",
|
||||
|
@ -2667,7 +2641,7 @@ bool this_cpu_has_cap(unsigned int n)
|
|||
* - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
|
||||
* In all other cases cpus_have_{const_}cap() should be used.
|
||||
*/
|
||||
static bool __system_matches_cap(unsigned int n)
|
||||
static bool __maybe_unused __system_matches_cap(unsigned int n)
|
||||
{
|
||||
if (n < ARM64_NCAPS) {
|
||||
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
|
||||
|
@ -2747,12 +2721,6 @@ void __init setup_cpu_features(void)
|
|||
ARCH_DMA_MINALIGN);
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
|
||||
}
|
||||
|
||||
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
|
|
@ -216,12 +216,6 @@ alternative_else_nop_endif
|
|||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
get_current_task tsk
|
||||
/* Save the task's original addr_limit and set USER_DS */
|
||||
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #USER_DS
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
mrs x23, spsr_el1
|
||||
|
@ -279,12 +273,6 @@ alternative_else_nop_endif
|
|||
.macro kernel_exit, el
|
||||
.if \el != 0
|
||||
disable_daif
|
||||
|
||||
/* Restore the task's original addr_limit. */
|
||||
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
|
||||
/* No need to restore UAO, it will be restored from SPSR_EL1 */
|
||||
.endif
|
||||
|
||||
/* Restore pmr */
|
||||
|
@ -999,10 +987,9 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
|
|||
mov x4, xzr
|
||||
|
||||
/*
|
||||
* Use reg->interrupted_regs.addr_limit to remember whether to unmap
|
||||
* the kernel on exit.
|
||||
* Remember whether to unmap the kernel on exit.
|
||||
*/
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x4, tramp_vectors + PAGE_SIZE
|
||||
|
@ -1023,7 +1010,7 @@ NOKPROBE(__sdei_asm_entry_trampoline)
|
|||
* x4: struct sdei_registered_event argument from registration time.
|
||||
*/
|
||||
SYM_CODE_START(__sdei_asm_exit_trampoline)
|
||||
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
|
||||
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
|
||||
cbnz x4, 1f
|
||||
|
||||
tramp_unmap_kernel tmp=x4
|
||||
|
|
|
@ -89,7 +89,7 @@
|
|||
*/
|
||||
SYM_CODE_START(primary_entry)
|
||||
bl preserve_boot_args
|
||||
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
|
||||
bl init_kernel_el // w0=cpu_boot_mode
|
||||
adrp x23, __PHYS_OFFSET
|
||||
and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
|
||||
bl set_cpu_boot_mode_flag
|
||||
|
@ -467,24 +467,33 @@ EXPORT_SYMBOL(kimage_vaddr)
|
|||
.section ".idmap.text","awx"
|
||||
|
||||
/*
|
||||
* If we're fortunate enough to boot at EL2, ensure that the world is
|
||||
* sane before dropping to EL1.
|
||||
* Starting from EL2 or EL1, configure the CPU to execute at the highest
|
||||
* reachable EL supported by the kernel in a chosen default state. If dropping
|
||||
* from EL2 to EL1, configure EL2 before configuring EL1.
|
||||
*
|
||||
* Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
|
||||
* SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
|
||||
*
|
||||
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
|
||||
* booted in EL1 or EL2 respectively.
|
||||
*/
|
||||
SYM_FUNC_START(el2_setup)
|
||||
msr SPsel, #1 // We want to use SP_EL{1,2}
|
||||
SYM_FUNC_START(init_kernel_el)
|
||||
mrs x0, CurrentEL
|
||||
cmp x0, #CurrentEL_EL2
|
||||
b.eq 1f
|
||||
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
|
||||
msr sctlr_el1, x0
|
||||
mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
|
||||
isb
|
||||
ret
|
||||
b.eq init_el2
|
||||
|
||||
1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
|
||||
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr sctlr_el1, x0
|
||||
isb
|
||||
mov_q x0, INIT_PSTATE_EL1
|
||||
msr spsr_el1, x0
|
||||
msr elr_el1, lr
|
||||
mov w0, #BOOT_CPU_MODE_EL1
|
||||
eret
|
||||
|
||||
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
|
||||
msr sctlr_el2, x0
|
||||
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
|
@ -593,9 +602,12 @@ set_hcr:
|
|||
|
||||
cbz x2, install_el2_stub
|
||||
|
||||
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
||||
isb
|
||||
ret
|
||||
mov_q x0, INIT_PSTATE_EL2
|
||||
msr spsr_el2, x0
|
||||
msr elr_el2, lr
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
eret
|
||||
|
||||
SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
|
||||
/*
|
||||
|
@ -605,7 +617,7 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
|
|||
* requires no configuration, and all non-hyp-specific EL2 setup
|
||||
* will be done via the _EL1 system register aliases in __cpu_setup.
|
||||
*/
|
||||
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr sctlr_el1, x0
|
||||
|
||||
/* Coprocessor traps. */
|
||||
|
@ -627,14 +639,13 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
|
|||
7: adr_l x0, __hyp_stub_vectors
|
||||
msr vbar_el2, x0
|
||||
|
||||
/* spsr */
|
||||
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
isb
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
msr elr_el2, lr
|
||||
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
eret
|
||||
SYM_FUNC_END(el2_setup)
|
||||
SYM_FUNC_END(init_kernel_el)
|
||||
|
||||
/*
|
||||
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
|
||||
|
@ -684,7 +695,7 @@ SYM_DATA_END(__early_cpu_boot_status)
|
|||
* cores are held until we're ready for them to initialise.
|
||||
*/
|
||||
SYM_FUNC_START(secondary_holding_pen)
|
||||
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
|
||||
bl init_kernel_el // w0=cpu_boot_mode
|
||||
bl set_cpu_boot_mode_flag
|
||||
mrs x0, mpidr_el1
|
||||
mov_q x1, MPIDR_HWID_BITMASK
|
||||
|
@ -702,7 +713,7 @@ SYM_FUNC_END(secondary_holding_pen)
|
|||
* be used where CPUs are brought online dynamically by the kernel.
|
||||
*/
|
||||
SYM_FUNC_START(secondary_entry)
|
||||
bl el2_setup // Drop to EL1
|
||||
bl init_kernel_el // w0=cpu_boot_mode
|
||||
bl set_cpu_boot_mode_flag
|
||||
b secondary_startup
|
||||
SYM_FUNC_END(secondary_entry)
|
||||
|
|
|
@ -422,16 +422,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|||
if (clone_flags & CLONE_SETTLS)
|
||||
p->thread.uw.tp_value = tls;
|
||||
} else {
|
||||
/*
|
||||
* A kthread has no context to ERET to, so ensure any buggy
|
||||
* ERET is treated as an illegal exception return.
|
||||
*
|
||||
* When a user task is created from a kthread, childregs will
|
||||
* be initialized by start_thread() or start_compat_thread().
|
||||
*/
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->pstate = PSR_MODE_EL1h;
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
|
||||
cpus_have_const_cap(ARM64_HAS_UAO))
|
||||
childregs->pstate |= PSR_UAO_BIT;
|
||||
|
||||
spectre_v4_enable_task_mitigation(p);
|
||||
|
||||
if (system_uses_irq_prio_masking())
|
||||
childregs->pmr_save = GIC_PRIO_IRQON;
|
||||
childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
|
||||
|
||||
p->thread.cpu_context.x19 = stack_start;
|
||||
p->thread.cpu_context.x20 = stk_sz;
|
||||
|
@ -461,17 +460,6 @@ static void tls_thread_switch(struct task_struct *next)
|
|||
write_sysreg(*task_user_tls(next), tpidr_el0);
|
||||
}
|
||||
|
||||
/* Restore the UAO state depending on next's addr_limit */
|
||||
void uao_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
|
||||
if (task_thread_info(next)->addr_limit == KERNEL_DS)
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
|
||||
else
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Force SSBS state on context-switch, since it may be lost after migrating
|
||||
* from a CPU which treats the bit as RES0 in a heterogeneous system.
|
||||
|
@ -555,7 +543,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
|||
hw_breakpoint_thread_switch(next);
|
||||
contextidr_thread_switch(next);
|
||||
entry_task_switch(next);
|
||||
uao_thread_switch(next);
|
||||
ssbs_thread_switch(next);
|
||||
erratum_1418040_thread_switch(prev, next);
|
||||
|
||||
|
|
|
@ -538,12 +538,12 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
|
|||
|
||||
if (spectre_v4_mitigations_off()) {
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
|
||||
asm volatile(SET_PSTATE_SSBS(1));
|
||||
set_pstate_ssbs(1);
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
|
||||
asm volatile(SET_PSTATE_SSBS(0));
|
||||
set_pstate_ssbs(0);
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
|
|
|
@ -178,12 +178,6 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
|
|||
sdei_api_event_context(i, ®s->regs[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* We didn't take an exception to get here, set PAN. UAO will be cleared
|
||||
* by sdei_event_handler()s force_uaccess_begin() call.
|
||||
*/
|
||||
__uaccess_enable_hw_pan();
|
||||
|
||||
err = sdei_event_handler(regs, arg);
|
||||
if (err)
|
||||
return SDEI_EV_FAILED;
|
||||
|
@ -222,12 +216,39 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
|
|||
return vbar + 0x480;
|
||||
}
|
||||
|
||||
static void __kprobes notrace __sdei_pstate_entry(void)
|
||||
{
|
||||
/*
|
||||
* The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
|
||||
* whether PSTATE bits are inherited unchanged or generated from
|
||||
* scratch, and the TF-A implementation always clears PAN and always
|
||||
* clears UAO. There are no other known implementations.
|
||||
*
|
||||
* Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
|
||||
* PSTATE is modified upon architectural exceptions, and so PAN is
|
||||
* either inherited or set per SCTLR_ELx.SPAN, and UAO is always
|
||||
* cleared.
|
||||
*
|
||||
* We must explicitly reset PAN to the expected state, including
|
||||
* clearing it when the host isn't using it, in case a VM had it set.
|
||||
*/
|
||||
if (system_uses_hw_pan())
|
||||
set_pstate_pan(1);
|
||||
else if (cpu_has_pan())
|
||||
set_pstate_pan(0);
|
||||
}
|
||||
|
||||
asmlinkage __kprobes notrace unsigned long
|
||||
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
/*
|
||||
* We didn't take an exception to get here, so the HW hasn't
|
||||
* set/cleared bits in PSTATE that we may rely on. Initialize PAN.
|
||||
*/
|
||||
__sdei_pstate_entry();
|
||||
|
||||
nmi_enter();
|
||||
|
||||
ret = _sdei_handler(regs, arg);
|
||||
|
|
|
@ -922,9 +922,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
|||
trace_hardirqs_off();
|
||||
|
||||
do {
|
||||
/* Check valid user FS if needed */
|
||||
addr_limit_user_check();
|
||||
|
||||
if (thread_flags & _TIF_NEED_RESCHED) {
|
||||
/* Unmask Debug and SError for the next task */
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
|
|
|
@ -99,7 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
|
|||
|
||||
.pushsection ".idmap.text", "awx"
|
||||
SYM_CODE_START(cpu_resume)
|
||||
bl el2_setup // if in EL2 drop to EL1 cleanly
|
||||
bl init_kernel_el
|
||||
bl __cpu_setup
|
||||
/* enable the MMU early - so we can access sleep_save_stash by va */
|
||||
adrp x1, swapper_pg_dir
|
||||
|
|
|
@ -58,7 +58,6 @@ void notrace __cpu_suspend_exit(void)
|
|||
* features that might not have been set correctly.
|
||||
*/
|
||||
__uaccess_enable_hw_pan();
|
||||
uao_thread_switch(current);
|
||||
|
||||
/*
|
||||
* Restore HW breakpoint registers to sane values
|
||||
|
|
|
@ -24,20 +24,20 @@ SYM_FUNC_START(__arch_clear_user)
|
|||
subs x1, x1, #8
|
||||
b.mi 2f
|
||||
1:
|
||||
uao_user_alternative 9f, str, sttr, xzr, x0, 8
|
||||
user_ldst 9f, sttr, xzr, x0, 8
|
||||
subs x1, x1, #8
|
||||
b.pl 1b
|
||||
2: adds x1, x1, #4
|
||||
b.mi 3f
|
||||
uao_user_alternative 9f, str, sttr, wzr, x0, 4
|
||||
user_ldst 9f, sttr, wzr, x0, 4
|
||||
sub x1, x1, #4
|
||||
3: adds x1, x1, #2
|
||||
b.mi 4f
|
||||
uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
|
||||
user_ldst 9f, sttrh, wzr, x0, 2
|
||||
sub x1, x1, #2
|
||||
4: adds x1, x1, #1
|
||||
b.mi 5f
|
||||
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
|
||||
user_ldst 9f, sttrb, wzr, x0, 0
|
||||
5: mov x0, #0
|
||||
ret
|
||||
SYM_FUNC_END(__arch_clear_user)
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
*/
|
||||
|
||||
.macro ldrb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strb1 reg, ptr, val
|
||||
|
@ -29,7 +29,7 @@
|
|||
.endm
|
||||
|
||||
.macro ldrh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strh1 reg, ptr, val
|
||||
|
@ -37,7 +37,7 @@
|
|||
.endm
|
||||
|
||||
.macro ldr1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro str1 reg, ptr, val
|
||||
|
@ -45,7 +45,7 @@
|
|||
.endm
|
||||
|
||||
.macro ldp1 reg1, reg2, ptr, val
|
||||
uao_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro stp1 reg1, reg2, ptr, val
|
||||
|
|
|
@ -22,35 +22,35 @@
|
|||
* x0 - bytes not copied
|
||||
*/
|
||||
.macro ldrb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldrh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro strh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldr1 reg, ptr, val
|
||||
uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val
|
||||
user_ldst 9998f, ldtr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro str1 reg, ptr, val
|
||||
uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldp1 reg1, reg2, ptr, val
|
||||
uao_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_ldp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro stp1 reg1, reg2, ptr, val
|
||||
uao_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
end .req x5
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
.endm
|
||||
|
||||
.macro strb1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrb, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldrh1 reg, ptr, val
|
||||
|
@ -32,7 +32,7 @@
|
|||
.endm
|
||||
|
||||
.macro strh1 reg, ptr, val
|
||||
uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttrh, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldr1 reg, ptr, val
|
||||
|
@ -40,7 +40,7 @@
|
|||
.endm
|
||||
|
||||
.macro str1 reg, ptr, val
|
||||
uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val
|
||||
user_ldst 9998f, sttr, \reg, \ptr, \val
|
||||
.endm
|
||||
|
||||
.macro ldp1 reg1, reg2, ptr, val
|
||||
|
@ -48,7 +48,7 @@
|
|||
.endm
|
||||
|
||||
.macro stp1 reg1, reg2, ptr, val
|
||||
uao_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
user_stp 9998f, \reg1, \reg2, \ptr, \val
|
||||
.endm
|
||||
|
||||
end .req x5
|
||||
|
|
|
@ -67,7 +67,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
|
|||
mov x3, x1
|
||||
cbz x2, 2f
|
||||
1:
|
||||
uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0
|
||||
user_ldst 2f, ldtrb, w4, x1, 0
|
||||
lsl x4, x4, #MTE_TAG_SHIFT
|
||||
stg x4, [x0], #MTE_GRANULE_SIZE
|
||||
add x1, x1, #1
|
||||
|
@ -94,7 +94,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
|
|||
1:
|
||||
ldg x4, [x1]
|
||||
ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
|
||||
uao_user_alternative 2f, strb, sttrb, w4, x0, 0
|
||||
user_ldst 2f, sttrb, w4, x0, 0
|
||||
add x0, x0, #1
|
||||
add x1, x1, #MTE_GRANULE_SIZE
|
||||
subs x2, x2, #1
|
||||
|
|
|
@ -30,9 +30,7 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
|
|||
{
|
||||
unsigned long rc;
|
||||
|
||||
uaccess_enable_not_uao();
|
||||
rc = __arch_copy_from_user(to, from, n);
|
||||
uaccess_disable_not_uao();
|
||||
rc = raw_copy_from_user(to, from, n);
|
||||
|
||||
/* See above */
|
||||
__clean_dcache_area_pop(to, n - rc);
|
||||
|
|
|
@ -482,11 +482,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
|
|||
}
|
||||
|
||||
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die_kernel_fault("access to user memory with fs=KERNEL_DS",
|
||||
addr, esr, regs);
|
||||
|
||||
if (is_el1_instruction_abort(esr))
|
||||
die_kernel_fault("execution of user memory",
|
||||
addr, esr, regs);
|
||||
|
|
|
@ -489,6 +489,6 @@ SYM_FUNC_START(__cpu_setup)
|
|||
/*
|
||||
* Prepare SCTLR
|
||||
*/
|
||||
mov_q x0, SCTLR_EL1_SET
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_ON
|
||||
ret // return to head.S
|
||||
SYM_FUNC_END(__cpu_setup)
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* The call to use to reach the firmware.
|
||||
|
@ -1092,26 +1091,13 @@ int sdei_event_handler(struct pt_regs *regs,
|
|||
struct sdei_registered_event *arg)
|
||||
{
|
||||
int err;
|
||||
mm_segment_t orig_addr_limit;
|
||||
u32 event_num = arg->event_num;
|
||||
|
||||
/*
|
||||
* Save restore 'fs'.
|
||||
* The architecture's entry code save/restores 'fs' when taking an
|
||||
* exception from the kernel. This ensures addr_limit isn't inherited
|
||||
* if you interrupted something that allowed the uaccess routines to
|
||||
* access kernel memory.
|
||||
* Do the same here because this doesn't come via the same entry code.
|
||||
*/
|
||||
orig_addr_limit = force_uaccess_begin();
|
||||
|
||||
err = arg->callback(event_num, regs, arg->callback_arg);
|
||||
if (err)
|
||||
pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
|
||||
event_num, smp_processor_id(), err);
|
||||
|
||||
force_uaccess_end(orig_addr_limit);
|
||||
|
||||
return err;
|
||||
}
|
||||
NOKPROBE_SYMBOL(sdei_event_handler);
|
||||
|
|
Загрузка…
Ссылка в новой задаче