[PATCH] powerpc: Fix handling of fpscr on 64-bit
The recent merge of fpu.S broken the handling of fpscr for ARCH=powerpc and CONFIG_PPC64=y. FP registers could be corrupted, leading to strange random application crashes. The confusion arises, because the thread_struct has (and requires) a 64-bit area to save the fpscr, because we use load/store double instructions to get it in to/out of the FPU. However, only the low 32-bits are actually used, so we want to treat it as a 32-bit quantity when manipulating its bits to avoid extra load/stores on 32-bit. This patch replaces the current definition with a structure of two 32-bit quantities (pad and val), to clarify things as much as is possible. The 'val' field is used when manipulating bits, the structure itself is used when obtaining the address for loading/unloading the value from the FPU. While we're at it, consolidate the 4 (!) almost identical versions of cvt_fd() and cvt_df() (arch/ppc/kernel/misc.S, arch/ppc64/kernel/misc.S, arch/powerpc/kernel/misc_32.S, arch/powerpc/kernel/misc_64.S) into a single version in fpu.S. The new version takes a pointer to thread_struct and applies the correct offset itself, rather than a pointer to the fpscr field itself, again to avoid confusion as to which is the correct field to use. Finally, this patch makes ARCH=ppc64 also use the consolidated fpu.S code, which it previously did not. Built for G5 (ARCH=ppc64 and ARCH=powerpc), 32-bit powermac (ARCH=ppc and ARCH=powerpc) and Walnut (ARCH=ppc, CONFIG_MATH_EMULATION=y). Booted on G5 (ARCH=powerpc) and things which previously fell over no longer do. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Родитель
fda262b897
Коммит
25c8a78b1e
|
@ -29,7 +29,6 @@ extra-$(CONFIG_44x) := head_44x.o
|
|||
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
|
||||
extra-$(CONFIG_8xx) := head_8xx.o
|
||||
extra-$(CONFIG_PPC64) += entry_64.o
|
||||
extra-$(CONFIG_PPC_FPU) += fpu.o
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y += process.o init_task.o time.o \
|
||||
|
@ -49,7 +48,7 @@ else
|
|||
# stuff used from here for ARCH=ppc or ARCH=ppc64
|
||||
obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o
|
||||
|
||||
fpux-$(CONFIG_PPC32) += fpu.o
|
||||
extra-$(CONFIG_PPC_FPU) += $(fpux-y)
|
||||
|
||||
endif
|
||||
|
||||
extra-$(CONFIG_PPC_FPU) += fpu.o
|
||||
|
|
|
@ -48,7 +48,7 @@ _GLOBAL(load_up_fpu)
|
|||
addi r4,r4,THREAD /* want last_task_used_math->thread */
|
||||
SAVE_32FPRS(0, r4)
|
||||
mffs fr0
|
||||
stfd fr0,THREAD_FPSCR-4(r4)
|
||||
stfd fr0,THREAD_FPSCR(r4)
|
||||
LDL r5,PT_REGS(r4)
|
||||
tophys(r5,r5)
|
||||
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
|
@ -71,7 +71,7 @@ _GLOBAL(load_up_fpu)
|
|||
or r12,r12,r4
|
||||
std r12,_MSR(r1)
|
||||
#endif
|
||||
lfd fr0,THREAD_FPSCR-4(r5)
|
||||
lfd fr0,THREAD_FPSCR(r5)
|
||||
mtfsf 0xff,fr0
|
||||
REST_32FPRS(0, r5)
|
||||
#ifndef CONFIG_SMP
|
||||
|
@ -104,7 +104,7 @@ _GLOBAL(giveup_fpu)
|
|||
CMPI 0,r5,0
|
||||
SAVE_32FPRS(0, r3)
|
||||
mffs fr0
|
||||
stfd fr0,THREAD_FPSCR-4(r3)
|
||||
stfd fr0,THREAD_FPSCR(r3)
|
||||
beq 1f
|
||||
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
li r3,MSR_FP|MSR_FE0|MSR_FE1
|
||||
|
@ -117,3 +117,28 @@ _GLOBAL(giveup_fpu)
|
|||
STL r5,OFF(last_task_used_math)(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
|
||||
/*
|
||||
* These are used in the alignment trap handler when emulating
|
||||
* single-precision loads and stores.
|
||||
* We restore and save the fpscr so the task gets the same result
|
||||
* and exceptions as if the cpu had performed the load or store.
|
||||
*/
|
||||
|
||||
_GLOBAL(cvt_fd)
|
||||
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfs 0,0(r3)
|
||||
stfd 0,0(r4)
|
||||
mffs 0
|
||||
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
|
||||
blr
|
||||
|
||||
_GLOBAL(cvt_df)
|
||||
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfd 0,0(r3)
|
||||
stfs 0,0(r4)
|
||||
mffs 0
|
||||
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
|
||||
blr
|
||||
|
|
|
@ -992,33 +992,6 @@ _GLOBAL(_get_SP)
|
|||
mr r3,r1 /* Close enough */
|
||||
blr
|
||||
|
||||
/*
|
||||
* These are used in the alignment trap handler when emulating
|
||||
* single-precision loads and stores.
|
||||
* We restore and save the fpscr so the task gets the same result
|
||||
* and exceptions as if the cpu had performed the load or store.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
_GLOBAL(cvt_fd)
|
||||
lfd 0,-4(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfs 0,0(r3)
|
||||
stfd 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,-4(r5)
|
||||
blr
|
||||
|
||||
_GLOBAL(cvt_df)
|
||||
lfd 0,-4(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfd 0,0(r3)
|
||||
stfs 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,-4(r5)
|
||||
blr
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create a kernel thread
|
||||
* kernel_thread(fn, arg, flags)
|
||||
|
|
|
@ -462,25 +462,6 @@ _GLOBAL(_outsl_ns)
|
|||
sync
|
||||
blr
|
||||
|
||||
|
||||
_GLOBAL(cvt_fd)
|
||||
lfd 0,0(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfs 0,0(r3)
|
||||
stfd 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,0(r5)
|
||||
blr
|
||||
|
||||
_GLOBAL(cvt_df)
|
||||
lfd 0,0(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfd 0,0(r3)
|
||||
stfs 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,0(r5)
|
||||
blr
|
||||
|
||||
/*
|
||||
* identify_cpu and calls setup_cpu
|
||||
* In: r3 = base of the cpu_specs array
|
||||
|
|
|
@ -665,7 +665,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
|
||||
current->thread.fpscr = 0;
|
||||
current->thread.fpscr.val = 0;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
memset(current->thread.vr, 0, sizeof(current->thread.vr));
|
||||
memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
|
||||
|
|
|
@ -403,7 +403,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
|||
ELF_NFPREG * sizeof(double)))
|
||||
return 1;
|
||||
|
||||
current->thread.fpscr = 0; /* turn off all fp exceptions */
|
||||
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* save altivec registers */
|
||||
|
|
|
@ -549,7 +549,7 @@ static void parse_fpe(struct pt_regs *regs)
|
|||
|
||||
flush_fp_to_thread(current);
|
||||
|
||||
fpscr = current->thread.fpscr;
|
||||
fpscr = current->thread.fpscr.val;
|
||||
|
||||
/* Invalid operation */
|
||||
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
|
||||
|
|
|
@ -375,7 +375,7 @@ fix_alignment(struct pt_regs *regs)
|
|||
#ifdef CONFIG_PPC_FPU
|
||||
preempt_disable();
|
||||
enable_kernel_fp();
|
||||
cvt_fd(&data.f, &data.d, ¤t->thread.fpscr);
|
||||
cvt_fd(&data.f, &data.d, ¤t->thread);
|
||||
preempt_enable();
|
||||
#else
|
||||
return 0;
|
||||
|
@ -385,7 +385,7 @@ fix_alignment(struct pt_regs *regs)
|
|||
#ifdef CONFIG_PPC_FPU
|
||||
preempt_disable();
|
||||
enable_kernel_fp();
|
||||
cvt_df(&data.d, &data.f, ¤t->thread.fpscr);
|
||||
cvt_df(&data.d, &data.f, ¤t->thread);
|
||||
preempt_enable();
|
||||
#else
|
||||
return 0;
|
||||
|
|
|
@ -967,33 +967,6 @@ _GLOBAL(_get_SP)
|
|||
mr r3,r1 /* Close enough */
|
||||
blr
|
||||
|
||||
/*
|
||||
* These are used in the alignment trap handler when emulating
|
||||
* single-precision loads and stores.
|
||||
* We restore and save the fpscr so the task gets the same result
|
||||
* and exceptions as if the cpu had performed the load or store.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
_GLOBAL(cvt_fd)
|
||||
lfd 0,-4(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfs 0,0(r3)
|
||||
stfd 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,-4(r5)
|
||||
blr
|
||||
|
||||
_GLOBAL(cvt_df)
|
||||
lfd 0,-4(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfd 0,0(r3)
|
||||
stfs 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,-4(r5)
|
||||
blr
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create a kernel thread
|
||||
* kernel_thread(fn, arg, flags)
|
||||
|
|
|
@ -542,7 +542,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
|
|||
last_task_used_spe = NULL;
|
||||
#endif
|
||||
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
|
||||
current->thread.fpscr = 0;
|
||||
current->thread.fpscr.val = 0;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
memset(current->thread.vr, 0, sizeof(current->thread.vr));
|
||||
memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
|
||||
|
|
|
@ -659,7 +659,7 @@ void program_check_exception(struct pt_regs *regs)
|
|||
giveup_fpu(current);
|
||||
preempt_enable();
|
||||
|
||||
fpscr = current->thread.fpscr;
|
||||
fpscr = current->thread.fpscr.val;
|
||||
fpscr &= fpscr << 22; /* mask summary bits with enables */
|
||||
if (fpscr & FPSCR_VX)
|
||||
code = FPE_FLTINV;
|
||||
|
|
|
@ -166,7 +166,7 @@ extern int fp_pack_ds(void *, long, unsigned long, unsigned long, long, long);
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define __FPU_FPSCR (current->thread.fpscr)
|
||||
#define __FPU_FPSCR (current->thread.fpscr.val)
|
||||
|
||||
/* We only actually write to the destination register
|
||||
* if exceptions signalled (if any) will not trap.
|
||||
|
|
|
@ -197,6 +197,9 @@ config BOOTX_TEXT
|
|||
config POWER4
|
||||
def_bool y
|
||||
|
||||
config PPC_FPU
|
||||
def_bool y
|
||||
|
||||
config POWER4_ONLY
|
||||
bool "Optimize for POWER4"
|
||||
default n
|
||||
|
|
|
@ -80,6 +80,7 @@ endif
|
|||
CFLAGS += $(call cc-option,-funit-at-a-time)
|
||||
|
||||
head-y := arch/ppc64/kernel/head.o
|
||||
head-y += arch/powerpc/kernel/fpu.o
|
||||
|
||||
libs-y += arch/ppc64/lib/
|
||||
core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
|
||||
|
|
|
@ -313,7 +313,7 @@ fix_alignment(struct pt_regs *regs)
|
|||
/* Doing stfs, have to convert to single */
|
||||
preempt_disable();
|
||||
enable_kernel_fp();
|
||||
cvt_df(¤t->thread.fpr[reg], (float *)&data.v[4], ¤t->thread.fpscr);
|
||||
cvt_df(¤t->thread.fpr[reg], (float *)&data.v[4], ¤t->thread);
|
||||
disable_kernel_fp();
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ fix_alignment(struct pt_regs *regs)
|
|||
/* Doing lfs, have to convert to double */
|
||||
preempt_disable();
|
||||
enable_kernel_fp();
|
||||
cvt_fd((float *)&data.v[4], ¤t->thread.fpr[reg], ¤t->thread.fpscr);
|
||||
cvt_fd((float *)&data.v[4], ¤t->thread.fpr[reg], ¤t->thread);
|
||||
disable_kernel_fp();
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ _stext:
|
|||
_GLOBAL(__start)
|
||||
/* NOP this out unconditionally */
|
||||
BEGIN_FTR_SECTION
|
||||
b .__start_initialization_multiplatform
|
||||
b .__start_initialization_multiplatform
|
||||
END_FTR_SECTION(0, 1)
|
||||
#endif /* CONFIG_PPC_MULTIPLATFORM */
|
||||
|
||||
|
@ -747,6 +747,7 @@ bad_stack:
|
|||
* any task or sent any task a signal, you should use
|
||||
* ret_from_except or ret_from_except_lite instead of this.
|
||||
*/
|
||||
.globl fast_exception_return
|
||||
fast_exception_return:
|
||||
ld r12,_MSR(r1)
|
||||
ld r11,_NIP(r1)
|
||||
|
@ -858,62 +859,6 @@ fp_unavailable_common:
|
|||
bl .kernel_fp_unavailable_exception
|
||||
BUG_OPCODE
|
||||
|
||||
/*
|
||||
* load_up_fpu(unused, unused, tsk)
|
||||
* Disable FP for the task which had the FPU previously,
|
||||
* and save its floating-point registers in its thread_struct.
|
||||
* Enables the FPU for use in the kernel on return.
|
||||
* On SMP we know the fpu is free, since we give it up every
|
||||
* switch (ie, no lazy save of the FP registers).
|
||||
* On entry: r13 == 'current' && last_task_used_math != 'current'
|
||||
*/
|
||||
_STATIC(load_up_fpu)
|
||||
mfmsr r5 /* grab the current MSR */
|
||||
ori r5,r5,MSR_FP
|
||||
mtmsrd r5 /* enable use of fpu now */
|
||||
isync
|
||||
/*
|
||||
* For SMP, we don't do lazy FPU switching because it just gets too
|
||||
* horrendously complex, especially when a task switches from one CPU
|
||||
* to another. Instead we call giveup_fpu in switch_to.
|
||||
*
|
||||
*/
|
||||
#ifndef CONFIG_SMP
|
||||
ld r3,last_task_used_math@got(r2)
|
||||
ld r4,0(r3)
|
||||
cmpdi 0,r4,0
|
||||
beq 1f
|
||||
/* Save FP state to last_task_used_math's THREAD struct */
|
||||
addi r4,r4,THREAD
|
||||
SAVE_32FPRS(0, r4)
|
||||
mffs fr0
|
||||
stfd fr0,THREAD_FPSCR(r4)
|
||||
/* Disable FP for last_task_used_math */
|
||||
ld r5,PT_REGS(r4)
|
||||
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
li r6,MSR_FP|MSR_FE0|MSR_FE1
|
||||
andc r4,r4,r6
|
||||
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#endif /* CONFIG_SMP */
|
||||
/* enable use of FP after return */
|
||||
ld r4,PACACURRENT(r13)
|
||||
addi r5,r4,THREAD /* Get THREAD */
|
||||
ld r4,THREAD_FPEXC_MODE(r5)
|
||||
ori r12,r12,MSR_FP
|
||||
or r12,r12,r4
|
||||
std r12,_MSR(r1)
|
||||
lfd fr0,THREAD_FPSCR(r5)
|
||||
mtfsf 0xff,fr0
|
||||
REST_32FPRS(0, r5)
|
||||
#ifndef CONFIG_SMP
|
||||
/* Update last_task_used_math to 'current' */
|
||||
subi r4,r5,THREAD /* Back to 'current' */
|
||||
std r4,0(r3)
|
||||
#endif /* CONFIG_SMP */
|
||||
/* restore registers and return */
|
||||
b fast_exception_return
|
||||
|
||||
.align 7
|
||||
.globl altivec_unavailable_common
|
||||
altivec_unavailable_common:
|
||||
|
|
|
@ -451,25 +451,6 @@ _GLOBAL(_outsl_ns)
|
|||
sync
|
||||
blr
|
||||
|
||||
|
||||
_GLOBAL(cvt_fd)
|
||||
lfd 0,0(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfs 0,0(r3)
|
||||
stfd 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,0(r5)
|
||||
blr
|
||||
|
||||
_GLOBAL(cvt_df)
|
||||
lfd 0,0(r5) /* load up fpscr value */
|
||||
mtfsf 0xff,0
|
||||
lfd 0,0(r3)
|
||||
stfs 0,0(r4)
|
||||
mffs 0 /* save new fpscr value */
|
||||
stfd 0,0(r5)
|
||||
blr
|
||||
|
||||
/*
|
||||
* identify_cpu and calls setup_cpu
|
||||
* In: r3 = base of the cpu_specs array
|
||||
|
@ -655,38 +636,6 @@ _GLOBAL(disable_kernel_fp)
|
|||
isync
|
||||
blr
|
||||
|
||||
/*
|
||||
* giveup_fpu(tsk)
|
||||
* Disable FP for the task given as the argument,
|
||||
* and save the floating-point registers in its thread_struct.
|
||||
* Enables the FPU for use in the kernel on return.
|
||||
*/
|
||||
_GLOBAL(giveup_fpu)
|
||||
mfmsr r5
|
||||
ori r5,r5,MSR_FP
|
||||
mtmsrd r5 /* enable use of fpu now */
|
||||
isync
|
||||
cmpdi 0,r3,0
|
||||
beqlr- /* if no previous owner, done */
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
ld r5,PT_REGS(r3)
|
||||
cmpdi 0,r5,0
|
||||
SAVE_32FPRS(0, r3)
|
||||
mffs fr0
|
||||
stfd fr0,THREAD_FPSCR(r3)
|
||||
beq 1f
|
||||
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
li r3,MSR_FP|MSR_FE0|MSR_FE1
|
||||
andc r4,r4,r3 /* disable FP for previous task */
|
||||
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#ifndef CONFIG_SMP
|
||||
li r5,0
|
||||
ld r4,last_task_used_math@got(r2)
|
||||
std r5,0(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
|
||||
#if 0 /* this has no callers for now */
|
||||
|
|
|
@ -133,7 +133,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
|
|||
flush_fp_to_thread(current);
|
||||
|
||||
/* Make sure signal doesn't get spurrious FP exceptions */
|
||||
current->thread.fpscr = 0;
|
||||
current->thread.fpscr.val = 0;
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
err |= __put_user(v_regs, &sc->v_regs);
|
||||
|
|
|
@ -162,10 +162,11 @@ struct thread_struct {
|
|||
unsigned long dbcr1;
|
||||
#endif
|
||||
double fpr[32]; /* Complete floating point set */
|
||||
#ifdef CONFIG_PPC32
|
||||
unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */
|
||||
#endif
|
||||
unsigned long fpscr; /* Floating point status */
|
||||
struct { /* fpr ... fpscr must be contiguous */
|
||||
|
||||
unsigned int pad;
|
||||
unsigned int val; /* Floating point status */
|
||||
} fpscr;
|
||||
int fpexc_mode; /* floating-point exception mode */
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long start_tb; /* Start purr when proc switched in */
|
||||
|
@ -207,7 +208,7 @@ struct thread_struct {
|
|||
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
||||
.fs = KERNEL_DS, \
|
||||
.fpr = {0}, \
|
||||
.fpscr = 0, \
|
||||
.fpscr = { .val = 0, }, \
|
||||
.fpexc_mode = MSR_FE0|MSR_FE1, \
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -132,8 +132,8 @@ extern int emulate_altivec(struct pt_regs *);
|
|||
extern void giveup_spe(struct task_struct *);
|
||||
extern void load_up_spe(struct task_struct *);
|
||||
extern int fix_alignment(struct pt_regs *);
|
||||
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
|
||||
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
|
||||
extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
|
||||
extern void cvt_df(double *from, float *to, struct thread_struct *thread);
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
extern void flush_altivec_to_thread(struct task_struct *);
|
||||
|
|
|
@ -82,8 +82,8 @@ extern int emulate_altivec(struct pt_regs *);
|
|||
extern void giveup_spe(struct task_struct *);
|
||||
extern void load_up_spe(struct task_struct *);
|
||||
extern int fix_alignment(struct pt_regs *);
|
||||
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
|
||||
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
|
||||
extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
|
||||
extern void cvt_df(double *from, float *to, struct thread_struct *thread);
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
extern void flush_altivec_to_thread(struct task_struct *);
|
||||
|
|
|
@ -120,8 +120,8 @@ extern void giveup_altivec(struct task_struct *);
|
|||
extern void disable_kernel_altivec(void);
|
||||
extern void enable_kernel_altivec(void);
|
||||
extern int emulate_altivec(struct pt_regs *);
|
||||
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
|
||||
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
|
||||
extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
|
||||
extern void cvt_df(double *from, float *to, struct thread_struct *thread);
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
extern void flush_altivec_to_thread(struct task_struct *);
|
||||
|
|
Загрузка…
Ссылка в новой задаче