[ARM] vfp: make fpexc bit names less verbose
Use the fpexc abbreviated names instead of long verbose names for fpexc bits. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Родитель
21d1ca0453
Коммит
228adef16d
|
@ -74,14 +74,14 @@ vfp_support_entry:
|
||||||
|
|
||||||
VFPFMRX r1, FPEXC @ Is the VFP enabled?
|
VFPFMRX r1, FPEXC @ Is the VFP enabled?
|
||||||
DBGSTR1 "fpexc %08x", r1
|
DBGSTR1 "fpexc %08x", r1
|
||||||
tst r1, #FPEXC_ENABLE
|
tst r1, #FPEXC_EN
|
||||||
bne look_for_VFP_exceptions @ VFP is already enabled
|
bne look_for_VFP_exceptions @ VFP is already enabled
|
||||||
|
|
||||||
DBGSTR1 "enable %x", r10
|
DBGSTR1 "enable %x", r10
|
||||||
ldr r3, last_VFP_context_address
|
ldr r3, last_VFP_context_address
|
||||||
orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set
|
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
|
||||||
ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer
|
ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer
|
||||||
bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled
|
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
|
||||||
cmp r4, r10
|
cmp r4, r10
|
||||||
beq check_for_exception @ we are returning to the same
|
beq check_for_exception @ we are returning to the same
|
||||||
@ process, so the registers are
|
@ process, so the registers are
|
||||||
|
@ -124,7 +124,7 @@ no_old_VFP_process:
|
||||||
VFPFMXR FPSCR, r5 @ restore status
|
VFPFMXR FPSCR, r5 @ restore status
|
||||||
|
|
||||||
check_for_exception:
|
check_for_exception:
|
||||||
tst r1, #FPEXC_EXCEPTION
|
tst r1, #FPEXC_EX
|
||||||
bne process_exception @ might as well handle the pending
|
bne process_exception @ might as well handle the pending
|
||||||
@ exception before retrying branch
|
@ exception before retrying branch
|
||||||
@ out before setting an FPEXC that
|
@ out before setting an FPEXC that
|
||||||
|
@ -136,10 +136,10 @@ check_for_exception:
|
||||||
|
|
||||||
|
|
||||||
look_for_VFP_exceptions:
|
look_for_VFP_exceptions:
|
||||||
tst r1, #FPEXC_EXCEPTION
|
tst r1, #FPEXC_EX
|
||||||
bne process_exception
|
bne process_exception
|
||||||
VFPFMRX r5, FPSCR
|
VFPFMRX r5, FPSCR
|
||||||
tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION !
|
tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EX !
|
||||||
bne process_exception
|
bne process_exception
|
||||||
|
|
||||||
@ Fall into hand on to next handler - appropriate coproc instr
|
@ Fall into hand on to next handler - appropriate coproc instr
|
||||||
|
|
|
@ -53,7 +53,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
|
||||||
* case the thread migrates to a different CPU. The
|
* case the thread migrates to a different CPU. The
|
||||||
* restoring is done lazily.
|
* restoring is done lazily.
|
||||||
*/
|
*/
|
||||||
if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) {
|
if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
|
||||||
vfp_save_state(last_VFP_context[cpu], fpexc);
|
vfp_save_state(last_VFP_context[cpu], fpexc);
|
||||||
last_VFP_context[cpu]->hard.cpu = cpu;
|
last_VFP_context[cpu]->hard.cpu = cpu;
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
|
||||||
* Always disable VFP so we can lazily save/restore the
|
* Always disable VFP so we can lazily save/restore the
|
||||||
* old state.
|
* old state.
|
||||||
*/
|
*/
|
||||||
fmxr(FPEXC, fpexc & ~FPEXC_ENABLE);
|
fmxr(FPEXC, fpexc & ~FPEXC_EN);
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,13 +81,13 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
|
||||||
*/
|
*/
|
||||||
memset(vfp, 0, sizeof(union vfp_state));
|
memset(vfp, 0, sizeof(union vfp_state));
|
||||||
|
|
||||||
vfp->hard.fpexc = FPEXC_ENABLE;
|
vfp->hard.fpexc = FPEXC_EN;
|
||||||
vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
|
vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable VFP to ensure we initialise it first.
|
* Disable VFP to ensure we initialise it first.
|
||||||
*/
|
*/
|
||||||
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
|
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* flush and release case: Per-thread VFP cleanup. */
|
/* flush and release case: Per-thread VFP cleanup. */
|
||||||
|
@ -229,7 +229,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||||||
/*
|
/*
|
||||||
* Enable access to the VFP so we can handle the bounce.
|
* Enable access to the VFP so we can handle the bounce.
|
||||||
*/
|
*/
|
||||||
fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));
|
fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));
|
||||||
|
|
||||||
orig_fpscr = fpscr = fmrx(FPSCR);
|
orig_fpscr = fpscr = fmrx(FPSCR);
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||||||
/*
|
/*
|
||||||
* Modify fpscr to indicate the number of iterations remaining
|
* Modify fpscr to indicate the number of iterations remaining
|
||||||
*/
|
*/
|
||||||
if (fpexc & FPEXC_EXCEPTION) {
|
if (fpexc & FPEXC_EX) {
|
||||||
u32 len;
|
u32 len;
|
||||||
|
|
||||||
len = fpexc + (1 << FPEXC_LENGTH_BIT);
|
len = fpexc + (1 << FPEXC_LENGTH_BIT);
|
||||||
|
|
|
@ -26,8 +26,8 @@
|
||||||
#define FPSID_REV_MASK (0xF << FPSID_REV_BIT)
|
#define FPSID_REV_MASK (0xF << FPSID_REV_BIT)
|
||||||
|
|
||||||
/* FPEXC bits */
|
/* FPEXC bits */
|
||||||
#define FPEXC_EXCEPTION (1<<31)
|
#define FPEXC_EX (1 << 31)
|
||||||
#define FPEXC_ENABLE (1<<30)
|
#define FPEXC_EN (1 << 30)
|
||||||
|
|
||||||
/* FPSCR bits */
|
/* FPSCR bits */
|
||||||
#define FPSCR_DEFAULT_NAN (1<<25)
|
#define FPSCR_DEFAULT_NAN (1<<25)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче