x86/fpu: Merge the two code paths in __fpu__restore_sig()

The ia32_fxstate case (32bit with fxsr) and the other (64bit frames or
32bit frames without fxsr) restore both from kernel memory and sanitize
the content.

The !ia32_fxstate version restores missing xstates from "init state"
while the ia32_fxstate doesn't and skips it.

Merge the two code paths and keep the !ia32_fxstate one. Copy only the
user_i387_ia32_struct data structure in the ia32_fxstate.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Dave Hansen <dave.hansen@intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
Cc: kvm ML <kvm@vger.kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190403164156.19645-23-bigeasy@linutronix.de
This commit is contained in:
Sebastian Andrzej Siewior 2019-04-03 18:41:51 +02:00 коммит произвёл Borislav Petkov
Родитель 926b21f37b
Коммит c2ff9e9a3d
1 изменённых файлов: 66 добавлений и 97 удалений

Просмотреть файл

@ -263,12 +263,17 @@ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
{
struct user_i387_ia32_struct *envp = NULL;
int state_size = fpu_kernel_xstate_size;
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
int state_size = fpu_kernel_xstate_size;
struct user_i387_ia32_struct env;
union fpregs_state *state;
u64 xfeatures = 0;
int fx_only = 0;
int ret = 0;
void *tmp;
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
@ -303,105 +308,69 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
}
}
tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
state = PTR_ALIGN(tmp, 64);
if ((unsigned long)buf_fx % 64)
fx_only = 1;
/*
* For 32-bit frames with fxstate, copy the fxstate so it can be
* reconstructed later.
*/
if (ia32_fxstate) {
/*
* For 32-bit frames with fxstate, copy the user state to the
* thread's fpu state, reconstruct fxstate from the fsave
* header. Validate and sanitize the copied state.
*/
struct user_i387_ia32_struct env;
union fpregs_state *state;
int err = 0;
void *tmp;
tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
state = PTR_ALIGN(tmp, 64);
if (using_compacted_format()) {
err = copy_user_to_xstate(&state->xsave, buf_fx);
} else {
err = __copy_from_user(&state->xsave, buf_fx, state_size);
if (!err && state_size > offsetof(struct xregs_state, header))
err = validate_xstate_header(&state->xsave.header);
}
if (err || __copy_from_user(&env, buf, sizeof(env))) {
err = -1;
} else {
sanitize_restored_xstate(state, &env, xfeatures, fx_only);
copy_kernel_to_fpregs(state);
}
kfree(tmp);
return err;
} else {
union fpregs_state *state;
void *tmp;
int ret;
tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
state = PTR_ALIGN(tmp, 64);
/*
* For 64-bit frames and 32-bit fsave frames, restore the user
* state to the registers directly (with exceptions handled).
*/
if ((unsigned long)buf_fx % 64)
fx_only = 1;
if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask & ~xfeatures;
if (using_compacted_format()) {
ret = copy_user_to_xstate(&state->xsave, buf_fx);
} else {
ret = __copy_from_user(&state->xsave, buf_fx, state_size);
if (!ret && state_size > offsetof(struct xregs_state, header))
ret = validate_xstate_header(&state->xsave.header);
}
if (ret)
goto err_out;
sanitize_restored_xstate(state, NULL, xfeatures, fx_only);
if (unlikely(init_bv))
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
} else if (use_fxsr()) {
ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
if (ret)
goto err_out;
if (use_xsave()) {
u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
}
state->fxsave.mxcsr &= mxcsr_feature_mask;
ret = copy_kernel_to_fxregs_err(&state->fxsave);
} else {
ret = __copy_from_user(&state->fsave, buf_fx, state_size);
if (ret)
goto err_out;
ret = copy_kernel_to_fregs_err(&state->fsave);
}
err_out:
kfree(tmp);
if (ret) {
fpu__clear(fpu);
return -1;
}
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
goto err_out;
envp = &env;
}
return 0;
if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask & ~xfeatures;
if (using_compacted_format()) {
ret = copy_user_to_xstate(&state->xsave, buf_fx);
} else {
ret = __copy_from_user(&state->xsave, buf_fx, state_size);
if (!ret && state_size > offsetof(struct xregs_state, header))
ret = validate_xstate_header(&state->xsave.header);
}
if (ret)
goto err_out;
sanitize_restored_xstate(state, envp, xfeatures, fx_only);
if (unlikely(init_bv))
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
} else if (use_fxsr()) {
ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
if (ret)
goto err_out;
sanitize_restored_xstate(state, envp, xfeatures, fx_only);
if (use_xsave()) {
u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
}
ret = copy_kernel_to_fxregs_err(&state->fxsave);
} else {
ret = __copy_from_user(&state->fsave, buf_fx, state_size);
if (ret)
goto err_out;
ret = copy_kernel_to_fregs_err(&state->fsave);
}
err_out:
kfree(tmp);
if (ret)
fpu__clear(fpu);
return ret;
}
static inline int xstate_sigframe_size(void)