x86/mce: Recover from poison found while copying from user space
Existing kernel code can only recover from a machine check on code that is tagged in the exception table with a fault handling recovery path. Add two new fields in the task structure to pass information from machine check handler to the "task_work" that is queued to run before the task returns to user mode: + mce_vaddr: will be initialized to the user virtual address of the fault in the case where the fault occurred in the kernel copying data from a user address. This is so that kill_me_maybe() can provide that information to the user SIGBUS handler. + mce_kflags: copy of the struct mce.kflags needed by kill_me_maybe() to determine if mce_vaddr is applicable to this error. Add code to recover from a machine check while copying data from user space to the kernel. Action for this case is the same as if the user touched the poison directly; unmap the page and send a SIGBUS to the task. Use a new helper function to share common code between the "fault in user mode" case and the "fault while copying from user" case. New code paths will be activated by the next patch which sets MCE_IN_KERNEL_COPYIN. Suggested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20201006210910.21062-6-tony.luck@intel.com
This commit is contained in:
Родитель
a2f73400e4
Коммит
c0ab7ffce2
|
@ -1260,6 +1260,21 @@ static void kill_me_maybe(struct callback_head *cb)
|
|||
kill_me_now(cb);
|
||||
}
|
||||
|
||||
static void queue_task_work(struct mce *m, int kill_it)
|
||||
{
|
||||
current->mce_addr = m->addr;
|
||||
current->mce_kflags = m->kflags;
|
||||
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
||||
current->mce_whole_page = whole_page(m);
|
||||
|
||||
if (kill_it)
|
||||
current->mce_kill_me.func = kill_me_now;
|
||||
else
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
|
||||
task_work_add(current, ¤t->mce_kill_me, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The actual machine check handler. This only handles real
|
||||
* exceptions when something got corrupted coming in through int 18.
|
||||
|
@ -1401,13 +1416,8 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
|||
/* If this triggers there is no way to recover. Die hard. */
|
||||
BUG_ON(!on_thread_stack() || !user_mode(regs));
|
||||
|
||||
current->mce_addr = m.addr;
|
||||
current->mce_ripv = !!(m.mcgstatus & MCG_STATUS_RIPV);
|
||||
current->mce_whole_page = whole_page(&m);
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
if (kill_it)
|
||||
current->mce_kill_me.func = kill_me_now;
|
||||
task_work_add(current, ¤t->mce_kill_me, true);
|
||||
queue_task_work(&m, kill_it);
|
||||
|
||||
} else {
|
||||
/*
|
||||
* Handle an MCE which has happened in kernel space but from
|
||||
|
@ -1422,6 +1432,9 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
|||
if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
|
||||
mce_panic("Failed kernel mode recovery", &m, msg);
|
||||
}
|
||||
|
||||
if (m.kflags & MCE_IN_KERNEL_COPYIN)
|
||||
queue_task_work(&m, kill_it);
|
||||
}
|
||||
out:
|
||||
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
|
||||
|
|
|
@ -1308,6 +1308,8 @@ struct task_struct {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
void __user *mce_vaddr;
|
||||
__u64 mce_kflags;
|
||||
u64 mce_addr;
|
||||
__u64 mce_ripv : 1,
|
||||
mce_whole_page : 1,
|
||||
|
|
Загрузка…
Ссылка в новой задаче