Merge branch 'for-next/mte-async-kernel-mode' into for-next/core

* for-next/mte-async-kernel-mode:
  : Add MTE asynchronous kernel mode support
  kasan, arm64: tests supports for HW_TAGS async mode
  arm64: mte: Report async tag faults before suspend
  arm64: mte: Enable async tag check fault
  arm64: mte: Conditionally compile mte_enable_kernel_*()
  arm64: mte: Enable TCO in functions that can read beyond buffer limits
  kasan: Add report for async mode
  arm64: mte: Drop arch_enable_tagging()
  kasan: Add KASAN mode kernel parameter
  arm64: mte: Add asynchronous mode support
This commit is contained in:
Catalin Marinas 2021-04-15 14:00:47 +01:00
Родитель a1e1eddef2 e80a76aa1a
Коммит 604df13d7a
14 изменённых файлов: 329 добавлений и 23 удалений

Просмотреть файл

@ -161,6 +161,15 @@ particular KASAN features.
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
- ``kasan.mode=sync`` or ``=async`` controls whether KASAN is configured in
synchronous or asynchronous mode of execution (default: ``sync``).
Synchronous mode: a bad access is detected immediately when a tag
check fault occurs.
Asynchronous mode: a bad access detection is delayed. When a tag check
fault occurs, the information is stored in hardware (in the TFSR_EL1
register for arm64). The kernel periodically checks the hardware and
only reports tag faults during these checks.
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
traces collection (default: ``on``).

Просмотреть файл

@ -243,8 +243,10 @@ static inline const void *__tag_set(const void *addr, u8 tag)
}
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging() mte_enable_kernel()
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_set_tagging_report_once(state) mte_set_report_once(state)
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)

Просмотреть файл

@ -77,7 +77,8 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
} while (curr != end);
}
void mte_enable_kernel(void);
void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
void mte_init_tags(u64 max_tag);
void mte_set_report_once(bool state);
@ -104,7 +105,11 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
}
static inline void mte_enable_kernel(void)
static inline void mte_enable_kernel_sync(void)
{
}
static inline void mte_enable_kernel_async(void)
{
}

Просмотреть файл

@ -41,6 +41,7 @@ void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void flush_mte_state(void);
void mte_thread_switch(struct task_struct *next);
void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
@ -64,6 +65,9 @@ static inline void flush_mte_state(void)
static inline void mte_thread_switch(struct task_struct *next)
{
}
static inline void mte_suspend_enter(void)
{
}
static inline void mte_suspend_exit(void)
{
}
@ -84,5 +88,49 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
#endif /* CONFIG_ARM64_MTE */
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_mode);
static inline bool system_uses_mte_async_mode(void)
{
return static_branch_unlikely(&mte_async_mode);
}
void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
mte_check_tfsr_el1();
}
static inline void mte_check_tfsr_exit(void)
{
/*
* The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
* is required.
*/
dsb(nsh);
isb();
mte_check_tfsr_el1();
}
#else
static inline bool system_uses_mte_async_mode(void)
{
return false;
}
static inline void mte_check_tfsr_el1(void)
{
}
static inline void mte_check_tfsr_entry(void)
{
}
static inline void mte_check_tfsr_exit(void)
{
}
#endif /* CONFIG_KASAN_HW_TAGS */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_H */

Просмотреть файл

@ -20,6 +20,7 @@
#include <asm/cpufeature.h>
#include <asm/mmu.h>
#include <asm/mte.h>
#include <asm/ptrace.h>
#include <asm/memory.h>
#include <asm/extable.h>
@ -188,6 +189,23 @@ static inline void __uaccess_enable_tco(void)
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
/*
* These functions disable tag checking only if in MTE async mode
* since the sync mode generates exceptions synchronously and the
* nofault or load_unaligned_zeropad can handle them.
*/
static inline void __uaccess_disable_tco_async(void)
{
if (system_uses_mte_async_mode())
__uaccess_disable_tco();
}
static inline void __uaccess_enable_tco_async(void)
{
if (system_uses_mte_async_mode())
__uaccess_enable_tco();
}
static inline void uaccess_disable_privileged(void)
{
__uaccess_disable_tco();
@ -307,8 +325,10 @@ do { \
do { \
int __gkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(dst)), \
(__force type *)(src), __gkn_err); \
__uaccess_disable_tco_async(); \
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
@ -380,8 +400,10 @@ do { \
do { \
int __pkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(src)), \
(__force type *)(dst), __pkn_err); \
__uaccess_disable_tco_async(); \
if (unlikely(__pkn_err)) \
goto err_label; \
} while(0)

Просмотреть файл

@ -55,6 +55,8 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, offset;
__uaccess_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
"1: ldr %0, %3\n"
@ -76,6 +78,8 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
: "=&r" (ret), "=&r" (offset)
: "r" (addr), "Q" (*(unsigned long *)addr));
__uaccess_disable_tco_async();
return ret;
}

Просмотреть файл

@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
mte_check_tfsr_entry();
}
/*
@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
mte_check_tfsr_exit();
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
@ -293,6 +297,8 @@ asmlinkage void noinstr enter_from_user_mode(void)
asmlinkage void noinstr exit_to_user_mode(void)
{
mte_check_tfsr_exit();
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
user_enter_irqoff();

Просмотреть файл

@ -26,6 +26,12 @@ u64 gcr_kernel_excl __ro_after_init;
static bool report_fault_once = true;
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DEFINE_STATIC_KEY_FALSE(mte_async_mode);
EXPORT_SYMBOL_GPL(mte_async_mode);
#endif
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
@ -107,13 +113,45 @@ void mte_init_tags(u64 max_tag)
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
}
void mte_enable_kernel(void)
static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{
/* Enable MTE Sync Mode for EL1. */
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
isb();
pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
}
#ifdef CONFIG_KASAN_HW_TAGS
void mte_enable_kernel_sync(void)
{
/*
* Make sure we enter this function when no PE has set
* async mode previously.
*/
WARN_ONCE(system_uses_mte_async_mode(),
"MTE async mode enabled system wide!");
__mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
}
void mte_enable_kernel_async(void)
{
__mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
/*
* MTE async mode is set system wide by the first PE that
* executes this function.
*
* Note: If in future KASAN acquires a runtime switching
* mode in between sync and async, this strategy needs
* to be reviewed.
*/
if (!system_uses_mte_async_mode())
static_branch_enable(&mte_async_mode);
}
#endif
void mte_set_report_once(bool state)
{
WRITE_ONCE(report_fault_once, state);
@ -124,6 +162,29 @@ bool mte_report_once(void)
return READ_ONCE(report_fault_once);
}
#ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void)
{
u64 tfsr_el1;
if (!system_supports_mte())
return;
tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/*
* Note: isb() is not required after this direct write
* because there is no indirect read subsequent to it
* (per ARM DDI 0487F.c table D13-1).
*/
write_sysreg_s(0, SYS_TFSR_EL1);
kasan_report_async();
}
}
#endif
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
@ -189,6 +250,35 @@ void mte_thread_switch(struct task_struct *next)
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
else
isb();
/*
* Check if an async tag exception occurred at EL1.
*
* Note: On the context switch path we rely on the dsb() present
* in __switch_to() to guarantee that the indirect writes to TFSR_EL1
* are synchronized before this point.
* isb() above is required for the same reason.
*
*/
mte_check_tfsr_el1();
}
void mte_suspend_enter(void)
{
if (!system_supports_mte())
return;
/*
* The barriers are required to guarantee that the indirect writes
* to TFSR_EL1 are synchronized before we report the state.
*/
dsb(nsh);
isb();
/* Report SYS_TFSR_EL1 before suspend entry */
mte_check_tfsr_el1();
}
void mte_suspend_exit(void)

Просмотреть файл

@ -91,6 +91,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
unsigned long flags;
struct sleep_stack_data state;
/* Report any MTE async fault before going to suspend */
mte_suspend_enter();
/*
* From this point debug exceptions are disabled to prevent
* updates to mdscr register (saved and restored along with

Просмотреть файл

@ -376,6 +376,12 @@ static inline void *kasan_reset_tag(const void *addr)
#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
#ifdef CONFIG_KASAN_HW_TAGS
void kasan_report_async(void);
#endif /* CONFIG_KASAN_HW_TAGS */
#ifdef CONFIG_KASAN_SW_TAGS
void __init kasan_init_sw_tags(void);
#else

Просмотреть файл

@ -69,10 +69,10 @@ static void kasan_test_exit(struct kunit *test)
* resource named "kasan_data". Do not use this name for KUnit resources
* outside of KASAN tests.
*
* For hardware tag-based KASAN, when a tag fault happens, tag checking is
* normally auto-disabled. When this happens, this test handler reenables
* tag checking. As tag checking can be only disabled or enabled per CPU, this
* handler disables migration (preemption).
* For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
* checking is auto-disabled. When this happens, this test handler reenables
* tag checking. As tag checking can be only disabled or enabled per CPU,
* this handler disables migration (preemption).
*
* Since the compiler doesn't see that the expression can change the fail_data
* fields, it can reorder or optimize away the accesses to those fields.
@ -80,7 +80,8 @@ static void kasan_test_exit(struct kunit *test)
* expression to prevent that.
*/
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
!kasan_async_mode_enabled()) \
migrate_disable(); \
WRITE_ONCE(fail_data.report_expected, true); \
WRITE_ONCE(fail_data.report_found, false); \
@ -92,12 +93,16 @@ static void kasan_test_exit(struct kunit *test)
barrier(); \
expression; \
barrier(); \
if (kasan_async_mode_enabled()) \
kasan_force_async_fault(); \
barrier(); \
KUNIT_EXPECT_EQ(test, \
READ_ONCE(fail_data.report_expected), \
READ_ONCE(fail_data.report_found)); \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
!kasan_async_mode_enabled()) { \
if (READ_ONCE(fail_data.report_found)) \
kasan_enable_tagging(); \
kasan_enable_tagging_sync(); \
migrate_enable(); \
} \
} while (0)

Просмотреть файл

@ -25,6 +25,12 @@ enum kasan_arg {
KASAN_ARG_ON,
};
enum kasan_arg_mode {
KASAN_ARG_MODE_DEFAULT,
KASAN_ARG_MODE_SYNC,
KASAN_ARG_MODE_ASYNC,
};
enum kasan_arg_stacktrace {
KASAN_ARG_STACKTRACE_DEFAULT,
KASAN_ARG_STACKTRACE_OFF,
@ -38,6 +44,7 @@ enum kasan_arg_fault {
};
static enum kasan_arg kasan_arg __ro_after_init;
static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
@ -45,6 +52,10 @@ static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
EXPORT_SYMBOL(kasan_flag_enabled);
/* Whether the asynchronous mode is enabled. */
bool kasan_flag_async __ro_after_init;
EXPORT_SYMBOL_GPL(kasan_flag_async);
/* Whether to collect alloc/free stack traces. */
DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
@ -68,6 +79,23 @@ static int __init early_kasan_flag(char *arg)
}
early_param("kasan", early_kasan_flag);
/* kasan.mode=sync/async */
static int __init early_kasan_mode(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "sync"))
kasan_arg_mode = KASAN_ARG_MODE_SYNC;
else if (!strcmp(arg, "async"))
kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
else
return -EINVAL;
return 0;
}
early_param("kasan.mode", early_kasan_mode);
/* kasan.stacktrace=off/on */
static int __init early_kasan_flag_stacktrace(char *arg)
{
@ -115,7 +143,15 @@ void kasan_init_hw_tags_cpu(void)
return;
hw_init_tags(KASAN_TAG_MAX);
hw_enable_tagging();
/*
* Enable async mode only when explicitly requested through
* the command line.
*/
if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
hw_enable_tagging_async();
else
hw_enable_tagging_sync();
}
/* kasan_init_hw_tags() is called once on boot CPU. */
@ -132,6 +168,22 @@ void __init kasan_init_hw_tags(void)
/* Enable KASAN. */
static_branch_enable(&kasan_flag_enabled);
switch (kasan_arg_mode) {
case KASAN_ARG_MODE_DEFAULT:
/*
* Default to sync mode.
* Do nothing, kasan_flag_async keeps its default value.
*/
break;
case KASAN_ARG_MODE_SYNC:
/* Do nothing, kasan_flag_async keeps its default value. */
break;
case KASAN_ARG_MODE_ASYNC:
/* Async mode enabled. */
kasan_flag_async = true;
break;
}
switch (kasan_arg_stacktrace) {
case KASAN_ARG_STACKTRACE_DEFAULT:
/* Default to enabling stack trace collection. */
@ -194,10 +246,16 @@ void kasan_set_tagging_report_once(bool state)
}
EXPORT_SYMBOL_GPL(kasan_set_tagging_report_once);
void kasan_enable_tagging(void)
void kasan_enable_tagging_sync(void)
{
hw_enable_tagging();
hw_enable_tagging_sync();
}
EXPORT_SYMBOL_GPL(kasan_enable_tagging);
EXPORT_SYMBOL_GPL(kasan_enable_tagging_sync);
void kasan_force_async_fault(void)
{
hw_force_async_tag_fault();
}
EXPORT_SYMBOL_GPL(kasan_force_async_fault);
#endif

Просмотреть файл

@ -7,20 +7,37 @@
#include <linux/stackdepot.h>
#ifdef CONFIG_KASAN_HW_TAGS
#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
extern bool kasan_flag_async __ro_after_init;
static inline bool kasan_stack_collection_enabled(void)
{
return static_branch_unlikely(&kasan_flag_stacktrace);
}
static inline bool kasan_async_mode_enabled(void)
{
return kasan_flag_async;
}
#else
static inline bool kasan_stack_collection_enabled(void)
{
return true;
}
static inline bool kasan_async_mode_enabled(void)
{
return false;
}
#endif
extern bool kasan_flag_panic __ro_after_init;
extern bool kasan_flag_async __ro_after_init;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
@ -275,8 +292,11 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS
#ifndef arch_enable_tagging
#define arch_enable_tagging()
#ifndef arch_enable_tagging_sync
#define arch_enable_tagging_sync()
#endif
#ifndef arch_enable_tagging_async
#define arch_enable_tagging_async()
#endif
#ifndef arch_init_tags
#define arch_init_tags(max_tag)
@ -284,6 +304,9 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#ifndef arch_set_tagging_report_once
#define arch_set_tagging_report_once(state)
#endif
#ifndef arch_force_async_tag_fault
#define arch_force_async_tag_fault()
#endif
#ifndef arch_get_random_tag
#define arch_get_random_tag() (0xFF)
#endif
@ -294,16 +317,19 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define arch_set_mem_tag_range(addr, size, tag) ((void *)(addr))
#endif
#define hw_enable_tagging() arch_enable_tagging()
#define hw_enable_tagging_sync() arch_enable_tagging_sync()
#define hw_enable_tagging_async() arch_enable_tagging_async()
#define hw_init_tags(max_tag) arch_init_tags(max_tag)
#define hw_set_tagging_report_once(state) arch_set_tagging_report_once(state)
#define hw_force_async_tag_fault() arch_force_async_tag_fault()
#define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
#define hw_set_mem_tag_range(addr, size, tag) arch_set_mem_tag_range((addr), (size), (tag))
#else /* CONFIG_KASAN_HW_TAGS */
#define hw_enable_tagging()
#define hw_enable_tagging_sync()
#define hw_enable_tagging_async()
#define hw_set_tagging_report_once(state)
#endif /* CONFIG_KASAN_HW_TAGS */
@ -311,12 +337,14 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_set_tagging_report_once(bool state);
void kasan_enable_tagging(void);
void kasan_enable_tagging_sync(void);
void kasan_force_async_fault(void);
#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
static inline void kasan_set_tagging_report_once(bool state) { }
static inline void kasan_enable_tagging(void) { }
static inline void kasan_enable_tagging_sync(void) { }
static inline void kasan_force_async_fault(void) { }
#endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */

Просмотреть файл

@ -87,7 +87,8 @@ static void start_report(unsigned long *flags)
static void end_report(unsigned long *flags, unsigned long addr)
{
trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
if (!kasan_async_mode_enabled())
trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, *flags);
@ -360,6 +361,25 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
end_report(&flags, (unsigned long)object);
}
#ifdef CONFIG_KASAN_HW_TAGS
void kasan_report_async(void)
{
unsigned long flags;
#if IS_ENABLED(CONFIG_KUNIT)
if (current->kunit_test)
kasan_update_kunit_status(current->kunit_test);
#endif /* IS_ENABLED(CONFIG_KUNIT) */
start_report(&flags);
pr_err("BUG: KASAN: invalid-access\n");
pr_err("Asynchronous mode enabled: no access details available\n");
pr_err("\n");
dump_stack();
end_report(&flags, 0);
}
#endif /* CONFIG_KASAN_HW_TAGS */
static void __kasan_report(unsigned long addr, size_t size, bool is_write,
unsigned long ip)
{