powerpc: Prepare for splitting giveup_{fpu, altivec, vsx} in two
This prepares for the decoupling of saving {fpu,altivec,vsx} registers and marking {fpu,altivec,vsx} as being unused by a thread. Currently giveup_{fpu,altivec,vsx}() does both however optimisations to task switching can be made if these two operations are decoupled. save_all() will permit the saving of registers to thread structs and leave threads MSR with bits enabled. This patch introduces no functional change. Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
70fe3d980f
Коммит
de2a20aa72
|
@ -75,6 +75,14 @@
|
|||
#define MSR_HV 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* To be used in shared book E/book S, this avoids needing to worry about
|
||||
* book S/book E in shared code
|
||||
*/
|
||||
#ifndef MSR_SPE
|
||||
#define MSR_SPE 0
|
||||
#endif
|
||||
|
||||
#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
|
||||
#define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */
|
||||
#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
|
||||
|
|
|
@ -34,6 +34,7 @@ static inline void disable_kernel_fp(void)
|
|||
msr_check_and_clear(MSR_FP);
|
||||
}
|
||||
#else
|
||||
static inline void __giveup_fpu(struct task_struct *t) { }
|
||||
static inline void flush_fp_to_thread(struct task_struct *t) { }
|
||||
#endif
|
||||
|
||||
|
@ -46,6 +47,8 @@ static inline void disable_kernel_altivec(void)
|
|||
{
|
||||
msr_check_and_clear(MSR_VEC);
|
||||
}
|
||||
#else
|
||||
static inline void __giveup_altivec(struct task_struct *t) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
|
@ -57,6 +60,8 @@ static inline void disable_kernel_vsx(void)
|
|||
{
|
||||
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
||||
}
|
||||
#else
|
||||
static inline void __giveup_vsx(struct task_struct *t) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
|
@ -68,6 +73,8 @@ static inline void disable_kernel_spe(void)
|
|||
{
|
||||
msr_check_and_clear(MSR_SPE);
|
||||
}
|
||||
#else
|
||||
static inline void __giveup_spe(struct task_struct *t) { }
|
||||
#endif
|
||||
|
||||
static inline void clear_task_ebb(struct task_struct *t)
|
||||
|
|
|
@ -444,12 +444,41 @@ void restore_math(struct pt_regs *regs)
|
|||
regs->msr = msr;
|
||||
}
|
||||
|
||||
void save_all(struct task_struct *tsk)
|
||||
{
|
||||
unsigned long usermsr;
|
||||
|
||||
if (!tsk->thread.regs)
|
||||
return;
|
||||
|
||||
usermsr = tsk->thread.regs->msr;
|
||||
|
||||
if ((usermsr & msr_all_available) == 0)
|
||||
return;
|
||||
|
||||
msr_check_and_set(msr_all_available);
|
||||
|
||||
if (usermsr & MSR_FP)
|
||||
__giveup_fpu(tsk);
|
||||
|
||||
if (usermsr & MSR_VEC)
|
||||
__giveup_altivec(tsk);
|
||||
|
||||
if (usermsr & MSR_VSX)
|
||||
__giveup_vsx(tsk);
|
||||
|
||||
if (usermsr & MSR_SPE)
|
||||
__giveup_spe(tsk);
|
||||
|
||||
msr_check_and_clear(msr_all_available);
|
||||
}
|
||||
|
||||
void flush_all_to_thread(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.regs) {
|
||||
preempt_disable();
|
||||
BUG_ON(tsk != current);
|
||||
giveup_all(tsk);
|
||||
save_all(tsk);
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
if (tsk->thread.regs->msr & MSR_SPE)
|
||||
|
|
Загрузка…
Ссылка в новой задаче