2019-06-04 11:11:33 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/vfp/vfphw.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004 ARM Limited.
|
|
|
|
* Written by Deep Blue Solutions Limited.
|
|
|
|
*
|
|
|
|
* This code is called from the kernel's undefined instruction trap.
|
|
|
|
* r9 holds the return address for successful handling.
|
|
|
|
* lr holds the return address for unrecognised instructions.
|
|
|
|
* r10 points at the start of the private FP workspace in the thread structure
|
|
|
|
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
|
|
|
|
*/
|
2014-04-02 13:57:48 +04:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/linkage.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/vfpmacros.h>
|
2012-07-31 01:40:12 +04:00
|
|
|
#include <linux/kern_levels.h>
|
2014-04-02 13:57:48 +04:00
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
.macro DBGSTR, str
|
|
|
|
#ifdef DEBUG
|
|
|
|
stmfd sp!, {r0-r3, ip, lr}
|
2013-02-26 18:41:41 +04:00
|
|
|
ldr r0, =1f
|
printk: Userspace format indexing support
We have a number of systems industry-wide that have a subset of their
functionality that works as follows:
1. Receive a message from local kmsg, serial console, or netconsole;
2. Apply a set of rules to classify the message;
3. Do something based on this classification (like scheduling a
remediation for the machine), rinse, and repeat.
As a couple of examples of places we have this implemented just inside
Facebook, although this isn't a Facebook-specific problem, we have this
inside our netconsole processing (for alarm classification), and as part
of our machine health checking. We use these messages to determine
fairly important metrics around production health, and it's important
that we get them right.
While for some kinds of issues we have counters, tracepoints, or metrics
with a stable interface which can reliably indicate the issue, in order
to react to production issues quickly we need to work with the interface
which most kernel developers naturally use when developing: printk.
Most production issues come from unexpected phenomena, and as such
usually the code in question doesn't have easily usable tracepoints or
other counters available for the specific problem being mitigated. We
have a number of lines of monitoring defence against problems in
production (host metrics, process metrics, service metrics, etc), and
where it's not feasible to reliably monitor at another level, this kind
of pragmatic netconsole monitoring is essential.
As one would expect, monitoring using printk is rather brittle for a
number of reasons -- most notably that the message might disappear
entirely in a new version of the kernel, or that the message may change
in some way that the regex or other classification methods start to
silently fail.
One factor that makes this even harder is that, under normal operation,
many of these messages are never expected to be hit. For example, there
may be a rare hardware bug which one wants to detect if it was to ever
happen again, but its recurrence is not likely or anticipated. This
precludes using something like checking whether the printk in question
was printed somewhere fleetwide recently to determine whether the
message in question is still present or not, since we don't anticipate
that it should be printed anywhere, but still need to monitor for its
future presence in the long-term.
This class of issue has happened on a number of occasions, causing
unhealthy machines with hardware issues to remain in production for
longer than ideal. As a recent example, some monitoring around
blk_update_request fell out of date and caused semi-broken machines to
remain in production for longer than would be desirable.
Searching through the codebase to find the message is also extremely
fragile, because many of the messages are further constructed beyond
their callsite (eg. btrfs_printk and other module-specific wrappers,
each with their own functionality). Even if they aren't, guessing the
format and formulation of the underlying message based on the aesthetics
of the message emitted is not a recipe for success at scale, and our
previous issues with fleetwide machine health checking demonstrate as
much.
This provides a solution to the issue of silently changed or deleted
printks: we record pointers to all printk format strings known at
compile time into a new .printk_index section, both in vmlinux and
modules. At runtime, this can then be iterated by looking at
<debugfs>/printk/index/<module>, which emits the following format, both
readable by humans and able to be parsed by machines:
$ head -1 vmlinux; shuf -n 5 vmlinux
# <level[,flags]> filename:line function "format"
<5> block/blk-settings.c:661 disk_stack_limits "%s: Warning: Device %s is misaligned\n"
<4> kernel/trace/trace.c:8296 trace_create_file "Could not create tracefs '%s' entry\n"
<6> arch/x86/kernel/hpet.c:144 _hpet_print_config "hpet: %s(%d):\n"
<6> init/do_mounts.c:605 prepare_namespace "Waiting for root device %s...\n"
<6> drivers/acpi/osl.c:1410 acpi_no_auto_serialize_setup "ACPI: auto-serialization disabled\n"
This mitigates the majority of cases where we have a highly-specific
printk which we want to match on, as we can now enumerate and check
whether the format changed or the printk callsite disappeared entirely
in userspace. This allows us to catch changes to printks we monitor
earlier and decide what to do about it before it becomes problematic.
There is no additional runtime cost for printk callers or printk itself,
and the assembly generated is exactly the same.
Signed-off-by: Chris Down <chris@chrisdown.name>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Jessica Yu <jeyu@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Tested-by: Petr Mladek <pmladek@suse.com>
Reported-by: kernel test robot <lkp@intel.com>
Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Acked-by: Jessica Yu <jeyu@kernel.org> # for module.{c,h}
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/e42070983637ac5e384f17fbdbe86d19c7b212a5.1623775748.git.chris@chrisdown.name
2021-06-15 19:52:53 +03:00
|
|
|
bl _printk
|
2013-02-26 18:41:41 +04:00
|
|
|
ldmfd sp!, {r0-r3, ip, lr}
|
|
|
|
|
|
|
|
.pushsection .rodata, "a"
|
|
|
|
1: .ascii KERN_DEBUG "VFP: \str\n"
|
|
|
|
.byte 0
|
|
|
|
.previous
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro DBGSTR1, str, arg
|
|
|
|
#ifdef DEBUG
|
|
|
|
stmfd sp!, {r0-r3, ip, lr}
|
|
|
|
mov r1, \arg
|
2013-02-26 18:41:41 +04:00
|
|
|
ldr r0, =1f
|
printk: Userspace format indexing support
We have a number of systems industry-wide that have a subset of their
functionality that works as follows:
1. Receive a message from local kmsg, serial console, or netconsole;
2. Apply a set of rules to classify the message;
3. Do something based on this classification (like scheduling a
remediation for the machine), rinse, and repeat.
As a couple of examples of places we have this implemented just inside
Facebook, although this isn't a Facebook-specific problem, we have this
inside our netconsole processing (for alarm classification), and as part
of our machine health checking. We use these messages to determine
fairly important metrics around production health, and it's important
that we get them right.
While for some kinds of issues we have counters, tracepoints, or metrics
with a stable interface which can reliably indicate the issue, in order
to react to production issues quickly we need to work with the interface
which most kernel developers naturally use when developing: printk.
Most production issues come from unexpected phenomena, and as such
usually the code in question doesn't have easily usable tracepoints or
other counters available for the specific problem being mitigated. We
have a number of lines of monitoring defence against problems in
production (host metrics, process metrics, service metrics, etc), and
where it's not feasible to reliably monitor at another level, this kind
of pragmatic netconsole monitoring is essential.
As one would expect, monitoring using printk is rather brittle for a
number of reasons -- most notably that the message might disappear
entirely in a new version of the kernel, or that the message may change
in some way that the regex or other classification methods start to
silently fail.
One factor that makes this even harder is that, under normal operation,
many of these messages are never expected to be hit. For example, there
may be a rare hardware bug which one wants to detect if it was to ever
happen again, but its recurrence is not likely or anticipated. This
precludes using something like checking whether the printk in question
was printed somewhere fleetwide recently to determine whether the
message in question is still present or not, since we don't anticipate
that it should be printed anywhere, but still need to monitor for its
future presence in the long-term.
This class of issue has happened on a number of occasions, causing
unhealthy machines with hardware issues to remain in production for
longer than ideal. As a recent example, some monitoring around
blk_update_request fell out of date and caused semi-broken machines to
remain in production for longer than would be desirable.
Searching through the codebase to find the message is also extremely
fragile, because many of the messages are further constructed beyond
their callsite (eg. btrfs_printk and other module-specific wrappers,
each with their own functionality). Even if they aren't, guessing the
format and formulation of the underlying message based on the aesthetics
of the message emitted is not a recipe for success at scale, and our
previous issues with fleetwide machine health checking demonstrate as
much.
This provides a solution to the issue of silently changed or deleted
printks: we record pointers to all printk format strings known at
compile time into a new .printk_index section, both in vmlinux and
modules. At runtime, this can then be iterated by looking at
<debugfs>/printk/index/<module>, which emits the following format, both
readable by humans and able to be parsed by machines:
$ head -1 vmlinux; shuf -n 5 vmlinux
# <level[,flags]> filename:line function "format"
<5> block/blk-settings.c:661 disk_stack_limits "%s: Warning: Device %s is misaligned\n"
<4> kernel/trace/trace.c:8296 trace_create_file "Could not create tracefs '%s' entry\n"
<6> arch/x86/kernel/hpet.c:144 _hpet_print_config "hpet: %s(%d):\n"
<6> init/do_mounts.c:605 prepare_namespace "Waiting for root device %s...\n"
<6> drivers/acpi/osl.c:1410 acpi_no_auto_serialize_setup "ACPI: auto-serialization disabled\n"
This mitigates the majority of cases where we have a highly-specific
printk which we want to match on, as we can now enumerate and check
whether the format changed or the printk callsite disappeared entirely
in userspace. This allows us to catch changes to printks we monitor
earlier and decide what to do about it before it becomes problematic.
There is no additional runtime cost for printk callers or printk itself,
and the assembly generated is exactly the same.
Signed-off-by: Chris Down <chris@chrisdown.name>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Jessica Yu <jeyu@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Tested-by: Petr Mladek <pmladek@suse.com>
Reported-by: kernel test robot <lkp@intel.com>
Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Acked-by: Jessica Yu <jeyu@kernel.org> # for module.{c,h}
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/e42070983637ac5e384f17fbdbe86d19c7b212a5.1623775748.git.chris@chrisdown.name
2021-06-15 19:52:53 +03:00
|
|
|
bl _printk
|
2013-02-26 18:41:41 +04:00
|
|
|
ldmfd sp!, {r0-r3, ip, lr}
|
|
|
|
|
|
|
|
.pushsection .rodata, "a"
|
|
|
|
1: .ascii KERN_DEBUG "VFP: \str\n"
|
|
|
|
.byte 0
|
|
|
|
.previous
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro DBGSTR3, str, arg1, arg2, arg3
|
|
|
|
#ifdef DEBUG
|
|
|
|
stmfd sp!, {r0-r3, ip, lr}
|
|
|
|
mov r3, \arg3
|
|
|
|
mov r2, \arg2
|
|
|
|
mov r1, \arg1
|
2013-02-26 18:41:41 +04:00
|
|
|
ldr r0, =1f
|
printk: Userspace format indexing support
We have a number of systems industry-wide that have a subset of their
functionality that works as follows:
1. Receive a message from local kmsg, serial console, or netconsole;
2. Apply a set of rules to classify the message;
3. Do something based on this classification (like scheduling a
remediation for the machine), rinse, and repeat.
As a couple of examples of places we have this implemented just inside
Facebook, although this isn't a Facebook-specific problem, we have this
inside our netconsole processing (for alarm classification), and as part
of our machine health checking. We use these messages to determine
fairly important metrics around production health, and it's important
that we get them right.
While for some kinds of issues we have counters, tracepoints, or metrics
with a stable interface which can reliably indicate the issue, in order
to react to production issues quickly we need to work with the interface
which most kernel developers naturally use when developing: printk.
Most production issues come from unexpected phenomena, and as such
usually the code in question doesn't have easily usable tracepoints or
other counters available for the specific problem being mitigated. We
have a number of lines of monitoring defence against problems in
production (host metrics, process metrics, service metrics, etc), and
where it's not feasible to reliably monitor at another level, this kind
of pragmatic netconsole monitoring is essential.
As one would expect, monitoring using printk is rather brittle for a
number of reasons -- most notably that the message might disappear
entirely in a new version of the kernel, or that the message may change
in some way that the regex or other classification methods start to
silently fail.
One factor that makes this even harder is that, under normal operation,
many of these messages are never expected to be hit. For example, there
may be a rare hardware bug which one wants to detect if it was to ever
happen again, but its recurrence is not likely or anticipated. This
precludes using something like checking whether the printk in question
was printed somewhere fleetwide recently to determine whether the
message in question is still present or not, since we don't anticipate
that it should be printed anywhere, but still need to monitor for its
future presence in the long-term.
This class of issue has happened on a number of occasions, causing
unhealthy machines with hardware issues to remain in production for
longer than ideal. As a recent example, some monitoring around
blk_update_request fell out of date and caused semi-broken machines to
remain in production for longer than would be desirable.
Searching through the codebase to find the message is also extremely
fragile, because many of the messages are further constructed beyond
their callsite (eg. btrfs_printk and other module-specific wrappers,
each with their own functionality). Even if they aren't, guessing the
format and formulation of the underlying message based on the aesthetics
of the message emitted is not a recipe for success at scale, and our
previous issues with fleetwide machine health checking demonstrate as
much.
This provides a solution to the issue of silently changed or deleted
printks: we record pointers to all printk format strings known at
compile time into a new .printk_index section, both in vmlinux and
modules. At runtime, this can then be iterated by looking at
<debugfs>/printk/index/<module>, which emits the following format, both
readable by humans and able to be parsed by machines:
$ head -1 vmlinux; shuf -n 5 vmlinux
# <level[,flags]> filename:line function "format"
<5> block/blk-settings.c:661 disk_stack_limits "%s: Warning: Device %s is misaligned\n"
<4> kernel/trace/trace.c:8296 trace_create_file "Could not create tracefs '%s' entry\n"
<6> arch/x86/kernel/hpet.c:144 _hpet_print_config "hpet: %s(%d):\n"
<6> init/do_mounts.c:605 prepare_namespace "Waiting for root device %s...\n"
<6> drivers/acpi/osl.c:1410 acpi_no_auto_serialize_setup "ACPI: auto-serialization disabled\n"
This mitigates the majority of cases where we have a highly-specific
printk which we want to match on, as we can now enumerate and check
whether the format changed or the printk callsite disappeared entirely
in userspace. This allows us to catch changes to printks we monitor
earlier and decide what to do about it before it becomes problematic.
There is no additional runtime cost for printk callers or printk itself,
and the assembly generated is exactly the same.
Signed-off-by: Chris Down <chris@chrisdown.name>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Jessica Yu <jeyu@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Tested-by: Petr Mladek <pmladek@suse.com>
Reported-by: kernel test robot <lkp@intel.com>
Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Acked-by: Jessica Yu <jeyu@kernel.org> # for module.{c,h}
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/e42070983637ac5e384f17fbdbe86d19c7b212a5.1623775748.git.chris@chrisdown.name
2021-06-15 19:52:53 +03:00
|
|
|
bl _printk
|
2013-02-26 18:41:41 +04:00
|
|
|
ldmfd sp!, {r0-r3, ip, lr}
|
|
|
|
|
|
|
|
.pushsection .rodata, "a"
|
|
|
|
1: .ascii KERN_DEBUG "VFP: \str\n"
|
|
|
|
.byte 0
|
|
|
|
.previous
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
|
|
@ VFP hardware support entry point.
|
|
|
|
@
|
2012-07-30 22:42:10 +04:00
|
|
|
@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
|
|
|
|
@ r2 = PC value to resume execution after successful emulation
|
|
|
|
@ r9 = normal "successful" return address
|
2005-04-17 02:20:36 +04:00
|
|
|
@ r10 = vfp_state union
|
2007-01-24 20:47:08 +03:00
|
|
|
@ r11 = CPU number
|
2012-07-30 22:42:10 +04:00
|
|
|
@ lr = unrecognised instruction return address
|
|
|
|
@ IRQs enabled.
|
2008-08-28 14:22:32 +04:00
|
|
|
ENTRY(vfp_support_entry)
|
2005-04-17 02:20:36 +04:00
|
|
|
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
|
|
|
|
|
ARM: 8991/1: use VFP assembler mnemonics if available
The integrated assembler of Clang 10 and earlier do not allow to access
the VFP registers through the coprocessor load/store instructions:
arch/arm/vfp/vfpmodule.c:342:2: error: invalid operand for instruction
fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
^
arch/arm/vfp/vfpinstr.h:79:6: note: expanded from macro 'fmxr'
asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0"
^
<inline asm>:1:6: note: instantiated into assembly here
mcr p10, 7, r0, cr8, cr0, 0 @ fmxr FPEXC, r0
^
This has been addressed with Clang 11 [0]. However, to support earlier
versions of Clang and for better readability use of VFP assembler
mnemonics still is preferred.
Ideally we would replace this code with the unified assembler language
mnemonics vmrs/vmsr on call sites along with .fpu assembler directives.
The GNU assembler supports the .fpu directive at least since 2.17 (when
documentation has been added). Since Linux requires binutils 2.21 it is
safe to use .fpu directive. However, binutils does not allow to use
FPINST or FPINST2 as an argument to vmrs/vmsr instructions up to
binutils 2.24 (see binutils commit 16d02dc907c5):
arch/arm/vfp/vfphw.S: Assembler messages:
arch/arm/vfp/vfphw.S:162: Error: operand 0 must be FPSID or FPSCR pr FPEXC -- `vmsr FPINST,r6'
arch/arm/vfp/vfphw.S:165: Error: operand 0 must be FPSID or FPSCR pr FPEXC -- `vmsr FPINST2,r8'
arch/arm/vfp/vfphw.S:235: Error: operand 1 must be a VFP extension System Register -- `vmrs r3,FPINST'
arch/arm/vfp/vfphw.S:238: Error: operand 1 must be a VFP extension System Register -- `vmrs r12,FPINST2'
Use as-instr in Kconfig to check if FPINST/FPINST2 can be used. If they
can be used make use of .fpu directives and UAL VFP mnemonics for
register access.
This allows to build vfpmodule.c with Clang and its integrated assembler.
[0] https://reviews.llvm.org/D59733
Link: https://github.com/ClangBuiltLinux/linux/issues/905
Signed-off-by: Stefan Agner <stefan@agner.ch>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2020-07-09 13:21:27 +03:00
|
|
|
.fpu vfpv2
|
2005-04-17 02:20:36 +04:00
|
|
|
VFPFMRX r1, FPEXC @ Is the VFP enabled?
|
|
|
|
DBGSTR1 "fpexc %08x", r1
|
2007-07-18 12:37:10 +04:00
|
|
|
tst r1, #FPEXC_EN
|
2005-04-17 02:20:36 +04:00
|
|
|
bne look_for_VFP_exceptions @ VFP is already enabled
|
|
|
|
|
|
|
|
DBGSTR1 "enable %x", r10
|
2011-07-09 16:44:04 +04:00
|
|
|
ldr r3, vfp_current_hw_state_address
|
2007-07-18 12:37:10 +04:00
|
|
|
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
|
2011-07-09 16:44:04 +04:00
|
|
|
ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
|
2007-07-18 12:37:10 +04:00
|
|
|
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
|
2011-07-09 17:24:36 +04:00
|
|
|
cmp r4, r10 @ this thread owns the hw context?
|
ARM: vfp: fix a hole in VFP thread migration
Fix a hole in the VFP thread migration. Lets define two threads.
Thread 1, we'll call 'interesting_thread' which is a thread which is
running on CPU0, using VFP (so vfp_current_hw_state[0] =
&interesting_thread->vfpstate) and gets migrated off to CPU1, where
it continues execution of VFP instructions.
Thread 2, we'll call 'new_cpu0_thread' which is the thread which takes
over on CPU0. This has also been using VFP, and last used VFP on CPU0,
but doesn't use it again.
The following code will be executed twice:
cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
vfp_current_hw_state[cpu]->hard.cpu = cpu;
}
/*
* Thread migration, just force the reloading of the
* state on the new CPU in case the VFP registers
* contain stale data.
*/
if (thread->vfpstate.hard.cpu != cpu)
vfp_current_hw_state[cpu] = NULL;
The first execution will be on CPU0 to switch away from 'interesting_thread'.
interesting_thread->cpu will be 0.
So, vfp_current_hw_state[0] points at interesting_thread->vfpstate.
The hardware state will be saved, along with the CPU number (0) that
it was executing on.
'thread' will be 'new_cpu0_thread' with new_cpu0_thread->cpu = 0.
Also, because it was executing on CPU0, new_cpu0_thread->vfpstate.hard.cpu = 0,
and so the thread migration check is not triggered.
This means that vfp_current_hw_state[0] remains pointing at interesting_thread.
The second execution will be on CPU1 to switch _to_ 'interesting_thread'.
So, 'thread' will be 'interesting_thread' and interesting_thread->cpu now
will be 1. The previous thread executing on CPU1 is not relevant to this
so we shall ignore that.
We get to the thread migration check. Here, we discover that
interesting_thread->vfpstate.hard.cpu = 0, yet interesting_thread->cpu is
now 1, indicating thread migration. We set vfp_current_hw_state[1] to
NULL.
So, at this point vfp_current_hw_state[] contains the following:
[0] = &interesting_thread->vfpstate
[1] = NULL
Our interesting thread now executes a VFP instruction, takes a fault
which loads the state into the VFP hardware. Now, through the assembly
we now have:
[0] = &interesting_thread->vfpstate
[1] = &interesting_thread->vfpstate
CPU1 stops due to ptrace (and so saves its VFP state) using the thread
switch code above), and CPU0 calls vfp_sync_hwstate().
if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
BANG, we corrupt interesting_thread's VFP state by overwriting the
more up-to-date state saved by CPU1 with the old VFP state from CPU0.
Fix this by ensuring that we have sane semantics for the various state
describing variables:
1. vfp_current_hw_state[] points to the current owner of the context
information stored in each CPUs hardware, or NULL if that state
information is invalid.
2. thread->vfpstate.hard.cpu always contains the most recent CPU number
which the state was loaded into or NR_CPUS if no CPU owns the state.
So, for a particular CPU to be a valid owner of the VFP state for a
particular thread t, two things must be true:
vfp_current_hw_state[cpu] == &t->vfpstate && t->vfpstate.hard.cpu == cpu.
and that is valid from the moment a CPU loads the saved VFP context
into the hardware. This gives clear and consistent semantics to
interpreting these variables.
This patch also fixes thread copying, ensuring that t->vfpstate.hard.cpu
is invalidated, otherwise CPU0 may believe it was the last owner. The
hole can happen thus:
- thread1 runs on CPU2 using VFP, migrates to CPU3, exits and thread_info
freed.
- New thread allocated from a previously running thread on CPU2, reusing
memory for thread1 and copying vfp.hard.cpu.
At this point, the following are true:
new_thread1->vfpstate.hard.cpu == 2
&new_thread1->vfpstate == vfp_current_hw_state[2]
Lastly, this also addresses thread flushing in a similar way to thread
copying. Hole is:
- thread runs on CPU0, using VFP, migrates to CPU1 but does not use VFP.
- thread calls execve(), so thread flush happens, leaving
vfp_current_hw_state[0] intact. This vfpstate is memset to 0 causing
thread->vfpstate.hard.cpu = 0.
- thread migrates back to CPU0 before using VFP.
At this point, the following are true:
thread->vfpstate.hard.cpu == 0
&thread->vfpstate == vfp_current_hw_state[0]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-07-09 19:09:43 +04:00
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
@ For UP, checking that this thread owns the hw context is
|
|
|
|
@ sufficient to determine that the hardware state is valid.
|
2011-07-09 17:24:36 +04:00
|
|
|
beq vfp_hw_state_valid
|
2005-04-17 02:20:36 +04:00
|
|
|
|
ARM: vfp: fix a hole in VFP thread migration
Fix a hole in the VFP thread migration. Lets define two threads.
Thread 1, we'll call 'interesting_thread' which is a thread which is
running on CPU0, using VFP (so vfp_current_hw_state[0] =
&interesting_thread->vfpstate) and gets migrated off to CPU1, where
it continues execution of VFP instructions.
Thread 2, we'll call 'new_cpu0_thread' which is the thread which takes
over on CPU0. This has also been using VFP, and last used VFP on CPU0,
but doesn't use it again.
The following code will be executed twice:
cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
vfp_current_hw_state[cpu]->hard.cpu = cpu;
}
/*
* Thread migration, just force the reloading of the
* state on the new CPU in case the VFP registers
* contain stale data.
*/
if (thread->vfpstate.hard.cpu != cpu)
vfp_current_hw_state[cpu] = NULL;
The first execution will be on CPU0 to switch away from 'interesting_thread'.
interesting_thread->cpu will be 0.
So, vfp_current_hw_state[0] points at interesting_thread->vfpstate.
The hardware state will be saved, along with the CPU number (0) that
it was executing on.
'thread' will be 'new_cpu0_thread' with new_cpu0_thread->cpu = 0.
Also, because it was executing on CPU0, new_cpu0_thread->vfpstate.hard.cpu = 0,
and so the thread migration check is not triggered.
This means that vfp_current_hw_state[0] remains pointing at interesting_thread.
The second execution will be on CPU1 to switch _to_ 'interesting_thread'.
So, 'thread' will be 'interesting_thread' and interesting_thread->cpu now
will be 1. The previous thread executing on CPU1 is not relevant to this
so we shall ignore that.
We get to the thread migration check. Here, we discover that
interesting_thread->vfpstate.hard.cpu = 0, yet interesting_thread->cpu is
now 1, indicating thread migration. We set vfp_current_hw_state[1] to
NULL.
So, at this point vfp_current_hw_state[] contains the following:
[0] = &interesting_thread->vfpstate
[1] = NULL
Our interesting thread now executes a VFP instruction, takes a fault
which loads the state into the VFP hardware. Now, through the assembly
we now have:
[0] = &interesting_thread->vfpstate
[1] = &interesting_thread->vfpstate
CPU1 stops due to ptrace (and so saves its VFP state) using the thread
switch code above), and CPU0 calls vfp_sync_hwstate().
if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
BANG, we corrupt interesting_thread's VFP state by overwriting the
more up-to-date state saved by CPU1 with the old VFP state from CPU0.
Fix this by ensuring that we have sane semantics for the various state
describing variables:
1. vfp_current_hw_state[] points to the current owner of the context
information stored in each CPUs hardware, or NULL if that state
information is invalid.
2. thread->vfpstate.hard.cpu always contains the most recent CPU number
which the state was loaded into or NR_CPUS if no CPU owns the state.
So, for a particular CPU to be a valid owner of the VFP state for a
particular thread t, two things must be true:
vfp_current_hw_state[cpu] == &t->vfpstate && t->vfpstate.hard.cpu == cpu.
and that is valid from the moment a CPU loads the saved VFP context
into the hardware. This gives clear and consistent semantics to
interpreting these variables.
This patch also fixes thread copying, ensuring that t->vfpstate.hard.cpu
is invalidated, otherwise CPU0 may believe it was the last owner. The
hole can happen thus:
- thread1 runs on CPU2 using VFP, migrates to CPU3, exits and thread_info
freed.
- New thread allocated from a previously running thread on CPU2, reusing
memory for thread1 and copying vfp.hard.cpu.
At this point, the following are true:
new_thread1->vfpstate.hard.cpu == 2
&new_thread1->vfpstate == vfp_current_hw_state[2]
Lastly, this also addresses thread flushing in a similar way to thread
copying. Hole is:
- thread runs on CPU0, using VFP, migrates to CPU1 but does not use VFP.
- thread calls execve(), so thread flush happens, leaving
vfp_current_hw_state[0] intact. This vfpstate is memset to 0 causing
thread->vfpstate.hard.cpu = 0.
- thread migrates back to CPU0 before using VFP.
At this point, the following are true:
thread->vfpstate.hard.cpu == 0
&thread->vfpstate == vfp_current_hw_state[0]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-07-09 19:09:43 +04:00
|
|
|
@ On UP, we lazily save the VFP context. As a different
|
|
|
|
@ thread wants ownership of the VFP hardware, save the old
|
|
|
|
@ state if there was a previous (valid) owner.
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
|
|
|
|
@ exceptions, so we can get at the
|
|
|
|
@ rest of it
|
|
|
|
|
|
|
|
DBGSTR1 "save old state %p", r4
|
ARM: vfp: fix a hole in VFP thread migration
Fix a hole in the VFP thread migration. Lets define two threads.
Thread 1, we'll call 'interesting_thread' which is a thread which is
running on CPU0, using VFP (so vfp_current_hw_state[0] =
&interesting_thread->vfpstate) and gets migrated off to CPU1, where
it continues execution of VFP instructions.
Thread 2, we'll call 'new_cpu0_thread' which is the thread which takes
over on CPU0. This has also been using VFP, and last used VFP on CPU0,
but doesn't use it again.
The following code will be executed twice:
cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
vfp_current_hw_state[cpu]->hard.cpu = cpu;
}
/*
* Thread migration, just force the reloading of the
* state on the new CPU in case the VFP registers
* contain stale data.
*/
if (thread->vfpstate.hard.cpu != cpu)
vfp_current_hw_state[cpu] = NULL;
The first execution will be on CPU0 to switch away from 'interesting_thread'.
interesting_thread->cpu will be 0.
So, vfp_current_hw_state[0] points at interesting_thread->vfpstate.
The hardware state will be saved, along with the CPU number (0) that
it was executing on.
'thread' will be 'new_cpu0_thread' with new_cpu0_thread->cpu = 0.
Also, because it was executing on CPU0, new_cpu0_thread->vfpstate.hard.cpu = 0,
and so the thread migration check is not triggered.
This means that vfp_current_hw_state[0] remains pointing at interesting_thread.
The second execution will be on CPU1 to switch _to_ 'interesting_thread'.
So, 'thread' will be 'interesting_thread' and interesting_thread->cpu now
will be 1. The previous thread executing on CPU1 is not relevant to this
so we shall ignore that.
We get to the thread migration check. Here, we discover that
interesting_thread->vfpstate.hard.cpu = 0, yet interesting_thread->cpu is
now 1, indicating thread migration. We set vfp_current_hw_state[1] to
NULL.
So, at this point vfp_current_hw_state[] contains the following:
[0] = &interesting_thread->vfpstate
[1] = NULL
Our interesting thread now executes a VFP instruction, takes a fault
which loads the state into the VFP hardware. Now, through the assembly
we now have:
[0] = &interesting_thread->vfpstate
[1] = &interesting_thread->vfpstate
CPU1 stops due to ptrace (and so saves its VFP state) using the thread
switch code above), and CPU0 calls vfp_sync_hwstate().
if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
BANG, we corrupt interesting_thread's VFP state by overwriting the
more up-to-date state saved by CPU1 with the old VFP state from CPU0.
Fix this by ensuring that we have sane semantics for the various state
describing variables:
1. vfp_current_hw_state[] points to the current owner of the context
information stored in each CPUs hardware, or NULL if that state
information is invalid.
2. thread->vfpstate.hard.cpu always contains the most recent CPU number
which the state was loaded into or NR_CPUS if no CPU owns the state.
So, for a particular CPU to be a valid owner of the VFP state for a
particular thread t, two things must be true:
vfp_current_hw_state[cpu] == &t->vfpstate && t->vfpstate.hard.cpu == cpu.
and that is valid from the moment a CPU loads the saved VFP context
into the hardware. This gives clear and consistent semantics to
interpreting these variables.
This patch also fixes thread copying, ensuring that t->vfpstate.hard.cpu
is invalidated, otherwise CPU0 may believe it was the last owner. The
hole can happen thus:
- thread1 runs on CPU2 using VFP, migrates to CPU3, exits and thread_info
freed.
- New thread allocated from a previously running thread on CPU2, reusing
memory for thread1 and copying vfp.hard.cpu.
At this point, the following are true:
new_thread1->vfpstate.hard.cpu == 2
&new_thread1->vfpstate == vfp_current_hw_state[2]
Lastly, this also addresses thread flushing in a similar way to thread
copying. Hole is:
- thread runs on CPU0, using VFP, migrates to CPU1 but does not use VFP.
- thread calls execve(), so thread flush happens, leaving
vfp_current_hw_state[0] intact. This vfpstate is memset to 0 causing
thread->vfpstate.hard.cpu = 0.
- thread migrates back to CPU0 before using VFP.
At this point, the following are true:
thread->vfpstate.hard.cpu == 0
&thread->vfpstate == vfp_current_hw_state[0]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-07-09 19:09:43 +04:00
|
|
|
cmp r4, #0 @ if the vfp_current_hw_state is NULL
|
|
|
|
beq vfp_reload_hw @ then the hw state needs reloading
|
2007-09-25 18:22:24 +04:00
|
|
|
VFPFSTMIA r4, r5 @ save the working registers
|
2005-04-17 02:20:36 +04:00
|
|
|
VFPFMRX r5, FPSCR @ current status
|
2009-05-30 17:00:18 +04:00
|
|
|
#ifndef CONFIG_CPU_FEROCEON
|
2007-11-22 20:32:01 +03:00
|
|
|
tst r1, #FPEXC_EX @ is there additional state to save?
|
2008-11-06 16:23:08 +03:00
|
|
|
beq 1f
|
|
|
|
VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
|
|
|
|
tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
|
|
|
|
beq 1f
|
|
|
|
VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
|
|
|
|
1:
|
2009-05-30 17:00:18 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
|
ARM: vfp: fix a hole in VFP thread migration
Fix a hole in the VFP thread migration. Lets define two threads.
Thread 1, we'll call 'interesting_thread' which is a thread which is
running on CPU0, using VFP (so vfp_current_hw_state[0] =
&interesting_thread->vfpstate) and gets migrated off to CPU1, where
it continues execution of VFP instructions.
Thread 2, we'll call 'new_cpu0_thread' which is the thread which takes
over on CPU0. This has also been using VFP, and last used VFP on CPU0,
but doesn't use it again.
The following code will be executed twice:
cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
vfp_current_hw_state[cpu]->hard.cpu = cpu;
}
/*
* Thread migration, just force the reloading of the
* state on the new CPU in case the VFP registers
* contain stale data.
*/
if (thread->vfpstate.hard.cpu != cpu)
vfp_current_hw_state[cpu] = NULL;
The first execution will be on CPU0 to switch away from 'interesting_thread'.
interesting_thread->cpu will be 0.
So, vfp_current_hw_state[0] points at interesting_thread->vfpstate.
The hardware state will be saved, along with the CPU number (0) that
it was executing on.
'thread' will be 'new_cpu0_thread' with new_cpu0_thread->cpu = 0.
Also, because it was executing on CPU0, new_cpu0_thread->vfpstate.hard.cpu = 0,
and so the thread migration check is not triggered.
This means that vfp_current_hw_state[0] remains pointing at interesting_thread.
The second execution will be on CPU1 to switch _to_ 'interesting_thread'.
So, 'thread' will be 'interesting_thread' and interesting_thread->cpu now
will be 1. The previous thread executing on CPU1 is not relevant to this
so we shall ignore that.
We get to the thread migration check. Here, we discover that
interesting_thread->vfpstate.hard.cpu = 0, yet interesting_thread->cpu is
now 1, indicating thread migration. We set vfp_current_hw_state[1] to
NULL.
So, at this point vfp_current_hw_state[] contains the following:
[0] = &interesting_thread->vfpstate
[1] = NULL
Our interesting thread now executes a VFP instruction, takes a fault
which loads the state into the VFP hardware. Now, through the assembly
we now have:
[0] = &interesting_thread->vfpstate
[1] = &interesting_thread->vfpstate
CPU1 stops due to ptrace (and so saves its VFP state) using the thread
switch code above), and CPU0 calls vfp_sync_hwstate().
if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
BANG, we corrupt interesting_thread's VFP state by overwriting the
more up-to-date state saved by CPU1 with the old VFP state from CPU0.
Fix this by ensuring that we have sane semantics for the various state
describing variables:
1. vfp_current_hw_state[] points to the current owner of the context
information stored in each CPUs hardware, or NULL if that state
information is invalid.
2. thread->vfpstate.hard.cpu always contains the most recent CPU number
which the state was loaded into or NR_CPUS if no CPU owns the state.
So, for a particular CPU to be a valid owner of the VFP state for a
particular thread t, two things must be true:
vfp_current_hw_state[cpu] == &t->vfpstate && t->vfpstate.hard.cpu == cpu.
and that is valid from the moment a CPU loads the saved VFP context
into the hardware. This gives clear and consistent semantics to
interpreting these variables.
This patch also fixes thread copying, ensuring that t->vfpstate.hard.cpu
is invalidated, otherwise CPU0 may believe it was the last owner. The
hole can happen thus:
- thread1 runs on CPU2 using VFP, migrates to CPU3, exits and thread_info
freed.
- New thread allocated from a previously running thread on CPU2, reusing
memory for thread1 and copying vfp.hard.cpu.
At this point, the following are true:
new_thread1->vfpstate.hard.cpu == 2
&new_thread1->vfpstate == vfp_current_hw_state[2]
Lastly, this also addresses thread flushing in a similar way to thread
copying. Hole is:
- thread runs on CPU0, using VFP, migrates to CPU1 but does not use VFP.
- thread calls execve(), so thread flush happens, leaving
vfp_current_hw_state[0] intact. This vfpstate is memset to 0 causing
thread->vfpstate.hard.cpu = 0.
- thread migrates back to CPU0 before using VFP.
At this point, the following are true:
thread->vfpstate.hard.cpu == 0
&thread->vfpstate == vfp_current_hw_state[0]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-07-09 19:09:43 +04:00
|
|
|
vfp_reload_hw:
|
|
|
|
|
|
|
|
#else
|
|
|
|
@ For SMP, if this thread does not own the hw context, then we
|
|
|
|
@ need to reload it. No need to save the old state as on SMP,
|
|
|
|
@ we always save the state when we switch away from a thread.
|
|
|
|
bne vfp_reload_hw
|
|
|
|
|
|
|
|
@ This thread has ownership of the current hardware context.
|
|
|
|
@ However, it may have been migrated to another CPU, in which
|
|
|
|
@ case the saved state is newer than the hardware context.
|
|
|
|
@ Check this by looking at the CPU number which the state was
|
|
|
|
@ last loaded onto.
|
|
|
|
ldr ip, [r10, #VFP_CPU]
|
|
|
|
teq ip, r11
|
|
|
|
beq vfp_hw_state_valid
|
|
|
|
|
|
|
|
vfp_reload_hw:
|
|
|
|
@ We're loading this threads state into the VFP hardware. Update
|
|
|
|
@ the CPU number which contains the most up to date VFP context.
|
|
|
|
str r11, [r10, #VFP_CPU]
|
|
|
|
|
|
|
|
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
|
|
|
|
@ exceptions, so we can get at the
|
|
|
|
@ rest of it
|
2007-01-24 20:47:08 +03:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
DBGSTR1 "load state %p", r10
|
2011-07-09 16:44:04 +04:00
|
|
|
str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
|
2005-04-17 02:20:36 +04:00
|
|
|
@ Load the saved state back into the VFP
|
2007-09-25 18:22:24 +04:00
|
|
|
VFPFLDMIA r10, r5 @ reload the working registers while
|
2005-04-17 02:20:36 +04:00
|
|
|
@ FPEXC is in a safe state
|
2006-03-26 00:58:00 +03:00
|
|
|
ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
|
2009-05-30 17:00:18 +04:00
|
|
|
#ifndef CONFIG_CPU_FEROCEON
|
2007-11-22 20:32:01 +03:00
|
|
|
tst r1, #FPEXC_EX @ is there additional state to restore?
|
2008-11-06 16:23:08 +03:00
|
|
|
beq 1f
|
|
|
|
VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
|
|
|
|
tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
|
|
|
|
beq 1f
|
|
|
|
VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
|
|
|
|
1:
|
2009-05-30 17:00:18 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
VFPFMXR FPSCR, r5 @ restore status
|
|
|
|
|
2011-07-09 17:24:36 +04:00
|
|
|
@ The context stored in the VFP hardware is up to date with this thread
|
|
|
|
vfp_hw_state_valid:
|
2007-07-18 12:37:10 +04:00
|
|
|
tst r1, #FPEXC_EX
|
2005-04-17 02:20:36 +04:00
|
|
|
bne process_exception @ might as well handle the pending
|
|
|
|
@ exception before retrying branch
|
|
|
|
@ out before setting an FPEXC that
|
|
|
|
@ stops us reading stuff
|
2012-07-30 22:42:10 +04:00
|
|
|
VFPFMXR FPEXC, r1 @ Restore FPEXC last
|
|
|
|
sub r2, r2, #4 @ Retry current instruction - if Thumb
|
|
|
|
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
|
|
|
|
@ else it's one 32-bit instruction, so
|
|
|
|
@ always subtract 4 from the following
|
|
|
|
@ instruction address.
|
2014-04-02 13:57:49 +04:00
|
|
|
dec_preempt_count_ti r10, r4
|
2014-06-30 19:29:12 +04:00
|
|
|
ret r9 @ we think we have handled things
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
|
|
|
|
look_for_VFP_exceptions:
|
2007-11-22 20:32:01 +03:00
|
|
|
@ Check for synchronous or asynchronous exception
|
|
|
|
tst r1, #FPEXC_EX | FPEXC_DEX
|
2005-04-17 02:20:36 +04:00
|
|
|
bne process_exception
|
2007-11-22 20:32:01 +03:00
|
|
|
@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
|
|
|
|
@ causes all the CDP instructions to be bounced synchronously without
|
|
|
|
@ setting the FPEXC.EX bit
|
2005-04-17 02:20:36 +04:00
|
|
|
VFPFMRX r5, FPSCR
|
2007-11-22 20:32:01 +03:00
|
|
|
tst r5, #FPSCR_IXE
|
2005-04-17 02:20:36 +04:00
|
|
|
bne process_exception
|
|
|
|
|
2014-11-10 23:56:42 +03:00
|
|
|
tst r5, #FPSCR_LENGTH_MASK
|
|
|
|
beq skip
|
|
|
|
orr r1, r1, #FPEXC_DEX
|
|
|
|
b process_exception
|
|
|
|
skip:
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
@ Fall into hand on to next handler - appropriate coproc instr
|
|
|
|
@ not recognised by VFP
|
|
|
|
|
|
|
|
DBGSTR "not VFP"
|
2014-04-02 13:57:49 +04:00
|
|
|
dec_preempt_count_ti r10, r4
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
process_exception:
|
|
|
|
DBGSTR "bounce"
|
|
|
|
mov r2, sp @ nothing stacked - regdump is at TOS
|
|
|
|
mov lr, r9 @ setup for a return to the user code.
|
|
|
|
|
|
|
|
@ Now call the C code to package up the bounce to the support code
|
|
|
|
@ r0 holds the trigger instruction
|
|
|
|
@ r1 holds the FPEXC value
|
|
|
|
@ r2 pointer to register dump
|
2007-11-22 20:32:01 +03:00
|
|
|
b VFP_bounce @ we have handled this - the support
|
2005-04-17 02:20:36 +04:00
|
|
|
@ code will raise an exception if
|
|
|
|
@ required. If not, the user code will
|
|
|
|
@ retry the faulted instruction
|
2008-08-28 14:22:32 +04:00
|
|
|
ENDPROC(vfp_support_entry)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-08-28 14:22:32 +04:00
|
|
|
ENTRY(vfp_save_state)
|
2007-01-24 20:47:08 +03:00
|
|
|
@ Save the current VFP state
|
|
|
|
@ r0 - save location
|
|
|
|
@ r1 - FPEXC
|
|
|
|
DBGSTR1 "save VFP state %p", r0
|
2007-09-25 18:22:24 +04:00
|
|
|
VFPFSTMIA r0, r2 @ save the working registers
|
2007-01-24 20:47:08 +03:00
|
|
|
VFPFMRX r2, FPSCR @ current status
|
2007-11-22 20:32:01 +03:00
|
|
|
tst r1, #FPEXC_EX @ is there additional state to save?
|
2008-11-06 16:23:08 +03:00
|
|
|
beq 1f
|
|
|
|
VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set)
|
|
|
|
tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
|
|
|
|
beq 1f
|
|
|
|
VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
|
|
|
|
1:
|
2007-01-24 20:47:08 +03:00
|
|
|
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2008-08-28 14:22:32 +04:00
|
|
|
ENDPROC(vfp_save_state)
|
2007-01-24 20:47:08 +03:00
|
|
|
|
2010-11-29 21:43:22 +03:00
|
|
|
.align
|
2011-07-09 16:44:04 +04:00
|
|
|
vfp_current_hw_state_address:
|
|
|
|
.word vfp_current_hw_state
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-07-24 15:32:57 +04:00
|
|
|
.macro tbl_branch, base, tmp, shift
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
adr \tmp, 1f
|
|
|
|
add \tmp, \tmp, \base, lsl \shift
|
2014-06-30 19:29:12 +04:00
|
|
|
ret \tmp
|
2009-07-24 15:32:57 +04:00
|
|
|
#else
|
|
|
|
add pc, pc, \base, lsl \shift
|
2005-04-17 02:20:36 +04:00
|
|
|
mov r0, r0
|
2009-07-24 15:32:57 +04:00
|
|
|
#endif
|
|
|
|
1:
|
|
|
|
.endm
|
|
|
|
|
|
|
|
ENTRY(vfp_get_float)
|
|
|
|
tbl_branch r0, r3, #3
|
2020-07-09 13:17:36 +03:00
|
|
|
.fpu vfpv2
|
2005-04-17 02:20:36 +04:00
|
|
|
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
2020-07-09 13:17:36 +03:00
|
|
|
1: vmov r0, s\dr
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2020-07-09 13:17:36 +03:00
|
|
|
.endr
|
|
|
|
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
|
|
|
1: vmov r0, s\dr
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2005-04-17 02:20:36 +04:00
|
|
|
.endr
|
2008-08-28 14:22:32 +04:00
|
|
|
ENDPROC(vfp_get_float)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-08-28 14:22:32 +04:00
|
|
|
ENTRY(vfp_put_float)
|
2009-07-24 15:32:57 +04:00
|
|
|
tbl_branch r1, r3, #3
|
2020-07-09 13:17:36 +03:00
|
|
|
.fpu vfpv2
|
2005-04-17 02:20:36 +04:00
|
|
|
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
2020-07-09 13:17:36 +03:00
|
|
|
1: vmov s\dr, r0
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2020-07-09 13:17:36 +03:00
|
|
|
.endr
|
|
|
|
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
|
|
|
1: vmov s\dr, r0
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2005-04-17 02:20:36 +04:00
|
|
|
.endr
|
2008-08-28 14:22:32 +04:00
|
|
|
ENDPROC(vfp_put_float)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-08-28 14:22:32 +04:00
|
|
|
ENTRY(vfp_get_double)
|
2009-07-24 15:32:57 +04:00
|
|
|
tbl_branch r0, r3, #3
|
2020-07-09 13:17:36 +03:00
|
|
|
.fpu vfpv2
|
2005-04-17 02:20:36 +04:00
|
|
|
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
2020-07-09 13:17:36 +03:00
|
|
|
1: vmov r0, r1, d\dr
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2005-04-17 02:20:36 +04:00
|
|
|
.endr
|
2007-09-25 18:22:24 +04:00
|
|
|
#ifdef CONFIG_VFPv3
|
|
|
|
@ d16 - d31 registers
|
2020-07-09 13:17:36 +03:00
|
|
|
.fpu vfpv3
|
|
|
|
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
|
|
|
1: vmov r0, r1, d\dr
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2007-09-25 18:22:24 +04:00
|
|
|
.endr
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-09-25 18:22:24 +04:00
|
|
|
@ virtual register 16 (or 32 if VFPv3) for compare with zero
|
2005-04-17 02:20:36 +04:00
|
|
|
mov r0, #0
|
|
|
|
mov r1, #0
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2008-08-28 14:22:32 +04:00
|
|
|
ENDPROC(vfp_get_double)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-08-28 14:22:32 +04:00
|
|
|
ENTRY(vfp_put_double)
|
2009-07-24 15:32:57 +04:00
|
|
|
tbl_branch r2, r3, #3
|
2020-07-09 13:17:36 +03:00
|
|
|
.fpu vfpv2
|
2005-04-17 02:20:36 +04:00
|
|
|
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
2020-07-09 13:17:36 +03:00
|
|
|
1: vmov d\dr, r0, r1
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2005-04-17 02:20:36 +04:00
|
|
|
.endr
|
2007-09-25 18:22:24 +04:00
|
|
|
#ifdef CONFIG_VFPv3
|
2020-07-09 13:17:36 +03:00
|
|
|
.fpu vfpv3
|
2007-09-25 18:22:24 +04:00
|
|
|
@ d16 - d31 registers
|
2020-07-09 13:17:36 +03:00
|
|
|
.irp dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
|
|
|
1: vmov d\dr, r0, r1
|
2014-06-30 19:29:12 +04:00
|
|
|
ret lr
|
2009-07-24 15:32:57 +04:00
|
|
|
.org 1b + 8
|
2007-09-25 18:22:24 +04:00
|
|
|
.endr
|
|
|
|
#endif
|
2008-08-28 14:22:32 +04:00
|
|
|
ENDPROC(vfp_put_double)
|