arm64: dts: agilex: fix gmac compatible
- The compatible for Agilex GMAC should be "altr,socfpga-stmmac-a10-s10" -----BEGIN PGP SIGNATURE----- iQJIBAABCgAyFiEEoHhMeiyk5VmwVMwNGZQEC4GjKPQFAl5ed6QUHGRpbmd1eWVu QGtlcm5lbC5vcmcACgkQGZQEC4GjKPS0dA//Vctu2B9YYK+X8KtO3AFVCc2HA69A JIMnsYReTn1HBHsoZQCXVJajsWI+J4h+z2hxQpGQHXcVzfpg6gFb5HLGqvAG+fZO MORVlN4ne96cYcOjA/8eTQOIkFigPrQIK3lZGR6nEJ6PzCz3bOfKdbtOChvuH4ev g/2I8O9FIL2u+Xa1BdxHXvOU24FZttH2OrIVHgdsnbdV1mtY3yDTTlyiOVDvgTRu kkv9PM3GnnSfQyNQOLRkSwfF+fvWrEV1+XFRsbotliuVGPOgHQ0WbsqAW+6fhh9W r/JIDxlgy6ddG9MQGZdtZC48Lem9ITk1g710uiVDQx710amzJfY1an6vB8xqtqab 6xNuxS6AqaEmzwQbzdT9AmFKa6EAKicOrYfN0XNBKvlqoF/IxR3EJYYJ0shxHgK/ ACn1e53nNnPzi9dg4y0IezmxUNf4jl6T+Su/791ofc8Edx+iNRLOyshex4tFD4zf 8mVgoBNp5yzkdAAyNjd1x9c7YPKxWUVLZkD4vNp5QObjdutjOsA1j6w9YTKZooBA BZZ5974rYZWvL7lHWoA0vaZDMF6cJ1osIP1d4cA49jXLHl33o5X22Lgy0lhBoL9F fTeWnmVgEdhlHXDWwjKh0vfDJ9EZnMlfTlWzTwnmRJEAswQBgk+wleDarSs0o8Ac pn6wton5J8JL/eQ= =VAHM -----END PGP SIGNATURE----- Merge tag 'socfpga_dts_fix_for_v5.6_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux into arm/fixes arm64: dts: agilex: fix gmac compatible - The compatible for Agilex GMAC should be "altr,socfpga-stmmac-a10-s10" * tag 'socfpga_dts_fix_for_v5.6_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux: (578 commits) arm64: dts: socfpga: agilex: Fix gmac compatible Linux 5.6-rc4 KVM: VMX: check descriptor table exits on instruction emulation ext4: potential crash on allocation error in ext4_alloc_flex_bg_array() macintosh: therm_windtunnel: fix regression when instantiating devices jbd2: fix data races at struct journal_head kvm: x86: Limit the number of "kvm: disabled by bios" messages KVM: x86: avoid useless copy of cpufreq policy KVM: allow disabling -Werror KVM: x86: allow compiling as non-module with W=1 KVM: Pre-allocate 1 cpumask variable per cpu for both pv tlb and pv ipis KVM: Introduce pv check helpers KVM: let declaration of kvm_get_running_vcpus match implementation KVM: SVM: allocate AVIC data structures based on kvm_amd module parameter MAINTAINERS: Correct Cadence PCI driver path io_uring: fix 32-bit compatability with sendmsg/recvmsg net: dsa: mv88e6xxx: Fix masking of egress port mlxsw: pci: Wait longer before accessing the device after reset sfc: fix timestamp reconstruction at 16-bit rollover points vsock: fix potential deadlock in transport->release() ... Link: https://lore.kernel.org/r/20200303153509.28248-1-dinguyen@kernel.org Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
Коммит
820d15632e
|
@ -100,6 +100,10 @@ modules.order
|
|||
/include/ksym/
|
||||
/arch/*/include/generated/
|
||||
|
||||
# Generated lkdtm tests
|
||||
/tools/testing/selftests/lkdtm/*.sh
|
||||
!/tools/testing/selftests/lkdtm/run.sh
|
||||
|
||||
# stgit generated dirs
|
||||
patches-*
|
||||
|
||||
|
|
2
COPYING
2
COPYING
|
@ -16,3 +16,5 @@ In addition, other licenses may also apply. Please see:
|
|||
Documentation/process/license-rules.rst
|
||||
|
||||
for more details.
|
||||
|
||||
All contributions to the Linux Kernel are subject to this COPYING file.
|
||||
|
|
5
CREDITS
5
CREDITS
|
@ -567,6 +567,11 @@ D: Original author of Amiga FFS filesystem
|
|||
S: Orlando, Florida
|
||||
S: USA
|
||||
|
||||
N: Paul Burton
|
||||
E: paulburton@kernel.org
|
||||
W: https://pburton.com
|
||||
D: MIPS maintainer 2018-2020
|
||||
|
||||
N: Lennert Buytenhek
|
||||
E: kernel@wantstofly.org
|
||||
D: Original (2.4) rewrite of the ethernet bridging code
|
||||
|
|
|
@ -62,6 +62,30 @@ Or more shorter, written as following::
|
|||
In both styles, same key words are automatically merged when parsing it
|
||||
at boot time. So you can append similar trees or key-values.
|
||||
|
||||
Same-key Values
|
||||
---------------
|
||||
|
||||
It is prohibited that two or more values or arrays share a same-key.
|
||||
For example,::
|
||||
|
||||
foo = bar, baz
|
||||
foo = qux # !ERROR! we can not re-define same key
|
||||
|
||||
If you want to append the value to existing key as an array member,
|
||||
you can use ``+=`` operator. For example::
|
||||
|
||||
foo = bar, baz
|
||||
foo += qux
|
||||
|
||||
In this case, the key ``foo`` has ``bar``, ``baz`` and ``qux``.
|
||||
|
||||
However, a sub-key and a value can not co-exist under a parent key.
|
||||
For example, following config is NOT allowed.::
|
||||
|
||||
foo = value1
|
||||
foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist
|
||||
|
||||
|
||||
Comments
|
||||
--------
|
||||
|
||||
|
@ -102,9 +126,13 @@ Boot Kernel With a Boot Config
|
|||
==============================
|
||||
|
||||
Since the boot configuration file is loaded with initrd, it will be added
|
||||
to the end of the initrd (initramfs) image file. The Linux kernel decodes
|
||||
the last part of the initrd image in memory to get the boot configuration
|
||||
data.
|
||||
to the end of the initrd (initramfs) image file with size, checksum and
|
||||
12-byte magic word as below.
|
||||
|
||||
[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n]
|
||||
|
||||
The Linux kernel decodes the last part of the initrd image in memory to
|
||||
get the boot configuration data.
|
||||
Because of this "piggyback" method, there is no need to change or
|
||||
update the boot loader and the kernel image itself.
|
||||
|
||||
|
|
|
@ -136,6 +136,10 @@
|
|||
dynamic table installation which will install SSDT
|
||||
tables to /sys/firmware/acpi/tables/dynamic.
|
||||
|
||||
acpi_no_watchdog [HW,ACPI,WDT]
|
||||
Ignore the ACPI-based watchdog interface (WDAT) and let
|
||||
a native driver control the watchdog device instead.
|
||||
|
||||
acpi_rsdp= [ACPI,EFI,KEXEC]
|
||||
Pass the RSDP address to the kernel, mostly used
|
||||
on machines running EFI runtime service to boot the
|
||||
|
|
|
@ -129,7 +129,7 @@ this logic.
|
|||
|
||||
As a single binary will need to support both 48-bit and 52-bit VA
|
||||
spaces, the VMEMMAP must be sized large enough for 52-bit VAs and
|
||||
also must be sized large enought to accommodate a fixed PAGE_OFFSET.
|
||||
also must be sized large enough to accommodate a fixed PAGE_OFFSET.
|
||||
|
||||
Most code in the kernel should not need to consider the VA_BITS, for
|
||||
code that does need to know the VA size the variables are
|
||||
|
|
|
@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
|
|||
how the user addresses are used by the kernel:
|
||||
|
||||
1. User addresses not accessed by the kernel but used for address space
|
||||
management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
|
||||
of valid tagged pointers in this context is always allowed.
|
||||
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
|
||||
tagged pointers in this context is allowed with the exception of
|
||||
``brk()``, ``mmap()`` and the ``new_address`` argument to
|
||||
``mremap()`` as these have the potential to alias with existing
|
||||
user addresses.
|
||||
|
||||
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
|
||||
incorrectly accept valid tagged pointers for the ``brk()``,
|
||||
``mmap()`` and ``mremap()`` system calls.
|
||||
|
||||
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
|
||||
relaxation is disabled by default and the application thread needs to
|
||||
|
|
|
@ -551,6 +551,7 @@ options to your ``.config``:
|
|||
Once the kernel is built and installed, a simple
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
modprobe example-test
|
||||
|
||||
...will run the tests.
|
||||
|
|
|
@ -43,9 +43,13 @@ properties:
|
|||
- enum:
|
||||
- allwinner,sun8i-h3-tcon-tv
|
||||
- allwinner,sun50i-a64-tcon-tv
|
||||
- allwinner,sun50i-h6-tcon-tv
|
||||
- const: allwinner,sun8i-a83t-tcon-tv
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- allwinner,sun50i-h6-tcon-tv
|
||||
- const: allwinner,sun8i-r40-tcon-tv
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
|
|
|
@ -33,24 +33,40 @@ properties:
|
|||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 3
|
||||
items:
|
||||
- description: The CSI interface clock
|
||||
- description: The CSI ISP clock
|
||||
- description: The CSI DRAM clock
|
||||
oneOf:
|
||||
- items:
|
||||
- description: The CSI interface clock
|
||||
- description: The CSI DRAM clock
|
||||
|
||||
- items:
|
||||
- description: The CSI interface clock
|
||||
- description: The CSI ISP clock
|
||||
- description: The CSI DRAM clock
|
||||
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 3
|
||||
items:
|
||||
- const: bus
|
||||
- const: isp
|
||||
- const: ram
|
||||
oneOf:
|
||||
- items:
|
||||
- const: bus
|
||||
- const: ram
|
||||
|
||||
- items:
|
||||
- const: bus
|
||||
- const: isp
|
||||
- const: ram
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
# FIXME: This should be made required eventually once every SoC will
|
||||
# have the MBUS declared.
|
||||
interconnects:
|
||||
maxItems: 1
|
||||
|
||||
# FIXME: This should be made required eventually once every SoC will
|
||||
# have the MBUS declared.
|
||||
interconnect-names:
|
||||
const: dma-mem
|
||||
|
||||
# See ./video-interfaces.txt for details
|
||||
port:
|
||||
type: object
|
||||
|
|
|
@ -347,6 +347,7 @@ examples:
|
|||
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
#iommu-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
external-memory-controller@7001b000 {
|
||||
|
@ -363,20 +364,23 @@ examples:
|
|||
timing-0 {
|
||||
clock-frequency = <12750000>;
|
||||
|
||||
nvidia,emc-zcal-cnt-long = <0x00000042>;
|
||||
nvidia,emc-auto-cal-interval = <0x001fffff>;
|
||||
nvidia,emc-ctt-term-ctrl = <0x00000802>;
|
||||
nvidia,emc-cfg = <0x73240000>;
|
||||
nvidia,emc-cfg-2 = <0x000008c5>;
|
||||
nvidia,emc-sel-dpd-ctrl = <0x00040128>;
|
||||
nvidia,emc-bgbias-ctl0 = <0x00000008>;
|
||||
nvidia,emc-auto-cal-config = <0xa1430000>;
|
||||
nvidia,emc-auto-cal-config2 = <0x00000000>;
|
||||
nvidia,emc-auto-cal-config3 = <0x00000000>;
|
||||
nvidia,emc-mode-reset = <0x80001221>;
|
||||
nvidia,emc-auto-cal-interval = <0x001fffff>;
|
||||
nvidia,emc-bgbias-ctl0 = <0x00000008>;
|
||||
nvidia,emc-cfg = <0x73240000>;
|
||||
nvidia,emc-cfg-2 = <0x000008c5>;
|
||||
nvidia,emc-ctt-term-ctrl = <0x00000802>;
|
||||
nvidia,emc-mode-1 = <0x80100003>;
|
||||
nvidia,emc-mode-2 = <0x80200008>;
|
||||
nvidia,emc-mode-4 = <0x00000000>;
|
||||
nvidia,emc-mode-reset = <0x80001221>;
|
||||
nvidia,emc-mrs-wait-cnt = <0x000e000e>;
|
||||
nvidia,emc-sel-dpd-ctrl = <0x00040128>;
|
||||
nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
|
||||
nvidia,emc-zcal-cnt-long = <0x00000042>;
|
||||
nvidia,emc-zcal-interval = <0x00000000>;
|
||||
|
||||
nvidia,emc-configuration = <
|
||||
0x00000000 /* EMC_RC */
|
||||
|
|
|
@ -124,7 +124,7 @@ not every application needs SDIO irq, e.g. MMC cards.
|
|||
pinctrl-1 = <&mmc1_idle>;
|
||||
pinctrl-2 = <&mmc1_sleep>;
|
||||
...
|
||||
interrupts-extended = <&intc 64 &gpio2 28 GPIO_ACTIVE_LOW>;
|
||||
interrupts-extended = <&intc 64 &gpio2 28 IRQ_TYPE_LEVEL_LOW>;
|
||||
};
|
||||
|
||||
mmc1_idle : pinmux_cirq_pin {
|
||||
|
|
|
@ -56,7 +56,6 @@ patternProperties:
|
|||
examples:
|
||||
- |
|
||||
davinci_mdio: mdio@5c030000 {
|
||||
compatible = "ti,davinci_mdio";
|
||||
reg = <0x5c030000 0x1000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
|
|
@ -134,7 +134,7 @@ Sequential zone files can only be written sequentially, starting from the file
|
|||
end, that is, write operations can only be append writes. Zonefs makes no
|
||||
attempt at accepting random writes and will fail any write request that has a
|
||||
start offset not corresponding to the end of the file, or to the end of the last
|
||||
write issued and still in-flight (for asynchrnous I/O operations).
|
||||
write issued and still in-flight (for asynchronous I/O operations).
|
||||
|
||||
Since dirty page writeback by the page cache does not guarantee a sequential
|
||||
write pattern, zonefs prevents buffered writes and writeable shared mappings
|
||||
|
@ -142,7 +142,7 @@ on sequential files. Only direct I/O writes are accepted for these files.
|
|||
zonefs relies on the sequential delivery of write I/O requests to the device
|
||||
implemented by the block layer elevator. An elevator implementing the sequential
|
||||
write feature for zoned block device (ELEVATOR_F_ZBD_SEQ_WRITE elevator feature)
|
||||
must be used. This type of elevator (e.g. mq-deadline) is the set by default
|
||||
must be used. This type of elevator (e.g. mq-deadline) is set by default
|
||||
for zoned block devices on device initialization.
|
||||
|
||||
There are no restrictions on the type of I/O used for read operations in
|
||||
|
@ -196,7 +196,7 @@ additional conditions that result in I/O errors.
|
|||
may still happen in the case of a partial failure of a very large direct I/O
|
||||
operation split into multiple BIOs/requests or asynchronous I/O operations.
|
||||
If one of the write request within the set of sequential write requests
|
||||
issued to the device fails, all write requests after queued after it will
|
||||
issued to the device fails, all write requests queued after it will
|
||||
become unaligned and fail.
|
||||
|
||||
* Delayed write errors: similarly to regular block devices, if the device side
|
||||
|
@ -207,7 +207,7 @@ additional conditions that result in I/O errors.
|
|||
causing all data to be dropped after the sector that caused the error.
|
||||
|
||||
All I/O errors detected by zonefs are notified to the user with an error code
|
||||
return for the system call that trigered or detected the error. The recovery
|
||||
return for the system call that triggered or detected the error. The recovery
|
||||
actions taken by zonefs in response to I/O errors depend on the I/O type (read
|
||||
vs write) and on the reason for the error (bad sector, unaligned writes or zone
|
||||
condition change).
|
||||
|
@ -222,7 +222,7 @@ condition change).
|
|||
* A zone condition change to read-only or offline also always triggers zonefs
|
||||
I/O error recovery.
|
||||
|
||||
Zonefs minimal I/O error recovery may change a file size and a file access
|
||||
Zonefs minimal I/O error recovery may change a file size and file access
|
||||
permissions.
|
||||
|
||||
* File size changes:
|
||||
|
@ -237,7 +237,7 @@ permissions.
|
|||
A file size may also be reduced to reflect a delayed write error detected on
|
||||
fsync(): in this case, the amount of data effectively written in the zone may
|
||||
be less than originally indicated by the file inode size. After such I/O
|
||||
error, zonefs always fixes a file inode size to reflect the amount of data
|
||||
error, zonefs always fixes the file inode size to reflect the amount of data
|
||||
persistently stored in the file zone.
|
||||
|
||||
* Access permission changes:
|
||||
|
@ -281,11 +281,11 @@ Further notes:
|
|||
permissions to read-only applies to all files. The file system is remounted
|
||||
read-only.
|
||||
* Access permission and file size changes due to the device transitioning zones
|
||||
to the offline condition are permanent. Remounting or reformating the device
|
||||
to the offline condition are permanent. Remounting or reformatting the device
|
||||
with mkfs.zonefs (mkzonefs) will not change back offline zone files to a good
|
||||
state.
|
||||
* File access permission changes to read-only due to the device transitioning
|
||||
zones to the read-only condition are permanent. Remounting or reformating
|
||||
zones to the read-only condition are permanent. Remounting or reformatting
|
||||
the device will not re-enable file write access.
|
||||
* File access permission changes implied by the remount-ro, zone-ro and
|
||||
zone-offline mount options are temporary for zones in a good condition.
|
||||
|
@ -301,13 +301,13 @@ Mount options
|
|||
|
||||
zonefs define the "errors=<behavior>" mount option to allow the user to specify
|
||||
zonefs behavior in response to I/O errors, inode size inconsistencies or zone
|
||||
condition chages. The defined behaviors are as follow:
|
||||
condition changes. The defined behaviors are as follow:
|
||||
* remount-ro (default)
|
||||
* zone-ro
|
||||
* zone-offline
|
||||
* repair
|
||||
|
||||
The I/O error actions defined for each behavior is detailed in the previous
|
||||
The I/O error actions defined for each behavior are detailed in the previous
|
||||
section.
|
||||
|
||||
Zonefs User Space Tools
|
||||
|
|
|
@ -24,6 +24,7 @@ This driver implements support for Infineon Multi-phase XDPE122 family
|
|||
dual loop voltage regulators.
|
||||
The family includes XDPE12284 and XDPE12254 devices.
|
||||
The devices from this family complaint with:
|
||||
|
||||
- Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC
|
||||
converter specification.
|
||||
- Intel SVID rev 1.9. protocol.
|
||||
|
|
|
@ -765,7 +765,7 @@ is not sufficient this sometimes needs to be explicit.
|
|||
Example::
|
||||
|
||||
#arch/x86/boot/Makefile
|
||||
subdir- := compressed/
|
||||
subdir- := compressed
|
||||
|
||||
The above assignment instructs kbuild to descend down in the
|
||||
directory compressed/ when "make clean" is executed.
|
||||
|
@ -1379,9 +1379,6 @@ See subsequent chapter for the syntax of the Kbuild file.
|
|||
in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate
|
||||
a wrapper of the asm-generic one.
|
||||
|
||||
The convention is to list one subdir per line and
|
||||
preferably in alphabetic order.
|
||||
|
||||
8 Kbuild Variables
|
||||
==================
|
||||
|
||||
|
|
|
@ -487,8 +487,9 @@ phy_register_fixup_for_id()::
|
|||
The stubs set one of the two matching criteria, and set the other one to
|
||||
match anything.
|
||||
|
||||
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module,
|
||||
unregister fixup and free allocate memory are required.
|
||||
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module load
|
||||
time, the module needs to unregister the fixup and free allocated memory when
|
||||
it's unloaded.
|
||||
|
||||
Call one of following function before unloading module::
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ Power Management
|
|||
drivers-testing
|
||||
energy-model
|
||||
freezing-of-tasks
|
||||
interface
|
||||
opp
|
||||
pci
|
||||
pm_qos_interface
|
||||
|
|
|
@ -244,23 +244,23 @@ disclosure of a particular issue, unless requested by a response team or by
|
|||
an involved disclosed party. The current ambassadors list:
|
||||
|
||||
============= ========================================================
|
||||
ARM
|
||||
ARM Grant Likely <grant.likely@arm.com>
|
||||
AMD Tom Lendacky <tom.lendacky@amd.com>
|
||||
IBM
|
||||
Intel Tony Luck <tony.luck@intel.com>
|
||||
Qualcomm Trilok Soni <tsoni@codeaurora.org>
|
||||
|
||||
Microsoft Sasha Levin <sashal@kernel.org>
|
||||
Microsoft James Morris <jamorris@linux.microsoft.com>
|
||||
VMware
|
||||
Xen Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
Canonical Tyler Hicks <tyhicks@canonical.com>
|
||||
Canonical John Johansen <john.johansen@canonical.com>
|
||||
Debian Ben Hutchings <ben@decadent.org.uk>
|
||||
Oracle Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
SUSE Jiri Kosina <jkosina@suse.cz>
|
||||
|
||||
Amazon Peter Bowen <pzb@amzn.com>
|
||||
Amazon
|
||||
Google Kees Cook <keescook@chromium.org>
|
||||
============= ========================================================
|
||||
|
||||
|
|
|
@ -30,4 +30,4 @@ if [ -n "$parallel" ] ; then
|
|||
parallel="-j$parallel"
|
||||
fi
|
||||
|
||||
exec "$sphinx" "$parallel" "$@"
|
||||
exec "$sphinx" $parallel "$@"
|
||||
|
|
|
@ -183,7 +183,7 @@ CVE分配
|
|||
VMware
|
||||
Xen Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
Canonical Tyler Hicks <tyhicks@canonical.com>
|
||||
Canonical John Johansen <john.johansen@canonical.com>
|
||||
Debian Ben Hutchings <ben@decadent.org.uk>
|
||||
Oracle Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
|
|
|
@ -4611,35 +4611,38 @@ unpins the VPA pages and releases all the device pages that are used to
|
|||
track the secure pages by hypervisor.
|
||||
|
||||
4.122 KVM_S390_NORMAL_RESET
|
||||
---------------------------
|
||||
|
||||
Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: none
|
||||
Returns: 0
|
||||
:Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
:Architectures: s390
|
||||
:Type: vcpu ioctl
|
||||
:Parameters: none
|
||||
:Returns: 0
|
||||
|
||||
This ioctl resets VCPU registers and control structures according to
|
||||
the cpu reset definition in the POP (Principles Of Operation).
|
||||
|
||||
4.123 KVM_S390_INITIAL_RESET
|
||||
----------------------------
|
||||
|
||||
Capability: none
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: none
|
||||
Returns: 0
|
||||
:Capability: none
|
||||
:Architectures: s390
|
||||
:Type: vcpu ioctl
|
||||
:Parameters: none
|
||||
:Returns: 0
|
||||
|
||||
This ioctl resets VCPU registers and control structures according to
|
||||
the initial cpu reset definition in the POP. However, the cpu is not
|
||||
put into ESA mode. This reset is a superset of the normal reset.
|
||||
|
||||
4.124 KVM_S390_CLEAR_RESET
|
||||
--------------------------
|
||||
|
||||
Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: none
|
||||
Returns: 0
|
||||
:Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
:Architectures: s390
|
||||
:Type: vcpu ioctl
|
||||
:Parameters: none
|
||||
:Returns: 0
|
||||
|
||||
This ioctl resets VCPU registers and control structures according to
|
||||
the clear cpu reset definition in the POP. However, the cpu is not put
|
||||
|
|
|
@ -19,7 +19,6 @@ x86-specific Documentation
|
|||
tlb
|
||||
mtrr
|
||||
pat
|
||||
intel_mpx
|
||||
intel-iommu
|
||||
intel_txt
|
||||
amd-memory-encryption
|
||||
|
|
65
MAINTAINERS
65
MAINTAINERS
|
@ -3649,6 +3649,7 @@ F: sound/pci/oxygen/
|
|||
|
||||
C-SKY ARCHITECTURE
|
||||
M: Guo Ren <guoren@kernel.org>
|
||||
L: linux-csky@vger.kernel.org
|
||||
T: git https://github.com/c-sky/csky-linux.git
|
||||
S: Supported
|
||||
F: arch/csky/
|
||||
|
@ -3909,7 +3910,7 @@ S: Supported
|
|||
F: Documentation/filesystems/ceph.txt
|
||||
F: fs/ceph/
|
||||
|
||||
CERTIFICATE HANDLING:
|
||||
CERTIFICATE HANDLING
|
||||
M: David Howells <dhowells@redhat.com>
|
||||
M: David Woodhouse <dwmw2@infradead.org>
|
||||
L: keyrings@vger.kernel.org
|
||||
|
@ -3919,7 +3920,7 @@ F: certs/
|
|||
F: scripts/sign-file.c
|
||||
F: scripts/extract-cert.c
|
||||
|
||||
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
|
||||
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM
|
||||
L: devel@driverdev.osuosl.org
|
||||
S: Obsolete
|
||||
F: drivers/staging/wusbcore/
|
||||
|
@ -5932,12 +5933,12 @@ S: Maintained
|
|||
F: drivers/media/dvb-frontends/ec100*
|
||||
|
||||
ECRYPT FILE SYSTEM
|
||||
M: Tyler Hicks <tyhicks@canonical.com>
|
||||
M: Tyler Hicks <code@tyhicks.com>
|
||||
L: ecryptfs@vger.kernel.org
|
||||
W: http://ecryptfs.org
|
||||
W: https://launchpad.net/ecryptfs
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs.git
|
||||
S: Supported
|
||||
S: Odd Fixes
|
||||
F: Documentation/filesystems/ecryptfs.txt
|
||||
F: fs/ecryptfs/
|
||||
|
||||
|
@ -7047,7 +7048,7 @@ L: kvm@vger.kernel.org
|
|||
S: Supported
|
||||
F: drivers/uio/uio_pci_generic.c
|
||||
|
||||
GENERIC VDSO LIBRARY:
|
||||
GENERIC VDSO LIBRARY
|
||||
M: Andy Lutomirski <luto@kernel.org>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
M: Vincenzo Frascino <vincenzo.frascino@arm.com>
|
||||
|
@ -8392,7 +8393,7 @@ M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
|
|||
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
W: https://01.org/linuxgraphics/
|
||||
B: https://01.org/linuxgraphics/documentation/how-report-bugs
|
||||
B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
|
||||
C: irc://chat.freenode.net/intel-gfx
|
||||
Q: http://patchwork.freedesktop.org/project/intel-gfx/
|
||||
T: git git://anongit.freedesktop.org/drm-intel
|
||||
|
@ -9278,7 +9279,7 @@ F: include/keys/trusted-type.h
|
|||
F: security/keys/trusted.c
|
||||
F: include/keys/trusted.h
|
||||
|
||||
KEYS/KEYRINGS:
|
||||
KEYS/KEYRINGS
|
||||
M: David Howells <dhowells@redhat.com>
|
||||
M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
|
||||
L: keyrings@vger.kernel.org
|
||||
|
@ -11114,14 +11115,12 @@ S: Maintained
|
|||
F: drivers/usb/image/microtek.*
|
||||
|
||||
MIPS
|
||||
M: Ralf Baechle <ralf@linux-mips.org>
|
||||
M: Paul Burton <paulburton@kernel.org>
|
||||
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
|
||||
L: linux-mips@vger.kernel.org
|
||||
W: http://www.linux-mips.org/
|
||||
T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
|
||||
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
|
||||
S: Supported
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/mips/
|
||||
F: Documentation/mips/
|
||||
F: arch/mips/
|
||||
|
@ -11484,7 +11483,7 @@ F: drivers/scsi/mac_scsi.*
|
|||
F: drivers/scsi/sun3_scsi.*
|
||||
F: drivers/scsi/sun3_scsi_vme.c
|
||||
|
||||
NCSI LIBRARY:
|
||||
NCSI LIBRARY
|
||||
M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
|
||||
S: Maintained
|
||||
F: net/ncsi/
|
||||
|
@ -12740,7 +12739,7 @@ M: Tom Joseph <tjoseph@cadence.com>
|
|||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/cdns,*.txt
|
||||
F: drivers/pci/controller/pcie-cadence*
|
||||
F: drivers/pci/controller/cadence/
|
||||
|
||||
PCI DRIVER FOR FREESCALE LAYERSCAPE
|
||||
M: Minghuan Lian <minghuan.Lian@nxp.com>
|
||||
|
@ -13512,7 +13511,7 @@ L: linuxppc-dev@lists.ozlabs.org
|
|||
S: Maintained
|
||||
F: drivers/block/ps3vram.c
|
||||
|
||||
PSAMPLE PACKET SAMPLING SUPPORT:
|
||||
PSAMPLE PACKET SAMPLING SUPPORT
|
||||
M: Yotam Gigi <yotam.gi@gmail.com>
|
||||
S: Maintained
|
||||
F: net/psample
|
||||
|
@ -14582,10 +14581,10 @@ F: drivers/media/pci/saa7146/
|
|||
F: include/media/drv-intf/saa7146*
|
||||
|
||||
SAFESETID SECURITY MODULE
|
||||
M: Micah Morton <mortonm@chromium.org>
|
||||
S: Supported
|
||||
F: security/safesetid/
|
||||
F: Documentation/admin-guide/LSM/SafeSetID.rst
|
||||
M: Micah Morton <mortonm@chromium.org>
|
||||
S: Supported
|
||||
F: security/safesetid/
|
||||
F: Documentation/admin-guide/LSM/SafeSetID.rst
|
||||
|
||||
SAMSUNG AUDIO (ASoC) DRIVERS
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
|
@ -16552,8 +16551,8 @@ M: Michael Jamet <michael.jamet@intel.com>
|
|||
M: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
M: Yehezkel Bernat <YehezkelShB@gmail.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
|
||||
F: Documentation/admin-guide/thunderbolt.rst
|
||||
F: drivers/thunderbolt/
|
||||
F: include/linux/thunderbolt.h
|
||||
|
@ -17080,7 +17079,7 @@ S: Maintained
|
|||
F: Documentation/admin-guide/ufs.rst
|
||||
F: fs/ufs/
|
||||
|
||||
UHID USERSPACE HID IO DRIVER:
|
||||
UHID USERSPACE HID IO DRIVER
|
||||
M: David Herrmann <dh.herrmann@googlemail.com>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -17094,18 +17093,18 @@ S: Maintained
|
|||
F: drivers/usb/common/ulpi.c
|
||||
F: include/linux/ulpi/
|
||||
|
||||
ULTRA-WIDEBAND (UWB) SUBSYSTEM:
|
||||
ULTRA-WIDEBAND (UWB) SUBSYSTEM
|
||||
L: devel@driverdev.osuosl.org
|
||||
S: Obsolete
|
||||
F: drivers/staging/uwb/
|
||||
|
||||
UNICODE SUBSYSTEM:
|
||||
UNICODE SUBSYSTEM
|
||||
M: Gabriel Krisman Bertazi <krisman@collabora.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Supported
|
||||
F: fs/unicode/
|
||||
|
||||
UNICORE32 ARCHITECTURE:
|
||||
UNICORE32 ARCHITECTURE
|
||||
M: Guan Xuetao <gxt@pku.edu.cn>
|
||||
W: http://mprc.pku.edu.cn/~guanxuetao/linux
|
||||
S: Maintained
|
||||
|
@ -17392,11 +17391,14 @@ F: drivers/usb/
|
|||
F: include/linux/usb.h
|
||||
F: include/linux/usb/
|
||||
|
||||
USB TYPEC PI3USB30532 MUX DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
USB TYPEC BUS FOR ALTERNATE MODES
|
||||
M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/usb/typec/mux/pi3usb30532.c
|
||||
F: Documentation/ABI/testing/sysfs-bus-typec
|
||||
F: Documentation/driver-api/usb/typec_bus.rst
|
||||
F: drivers/usb/typec/altmodes/
|
||||
F: include/linux/usb/typec_altmode.h
|
||||
|
||||
USB TYPEC CLASS
|
||||
M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
|
||||
|
@ -17407,14 +17409,11 @@ F: Documentation/driver-api/usb/typec.rst
|
|||
F: drivers/usb/typec/
|
||||
F: include/linux/usb/typec.h
|
||||
|
||||
USB TYPEC BUS FOR ALTERNATE MODES
|
||||
M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
|
||||
USB TYPEC PI3USB30532 MUX DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-bus-typec
|
||||
F: Documentation/driver-api/usb/typec_bus.rst
|
||||
F: drivers/usb/typec/altmodes/
|
||||
F: include/linux/usb/typec_altmode.h
|
||||
F: drivers/usb/typec/mux/pi3usb30532.c
|
||||
|
||||
USB TYPEC PORT CONTROLLER DRIVERS
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
|
@ -17791,7 +17790,7 @@ F: include/linux/vbox_utils.h
|
|||
F: include/uapi/linux/vbox*.h
|
||||
F: drivers/virt/vboxguest/
|
||||
|
||||
VIRTUAL BOX SHARED FOLDER VFS DRIVER:
|
||||
VIRTUAL BOX SHARED FOLDER VFS DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -68,6 +68,7 @@ unexport GREP_OPTIONS
|
|||
#
|
||||
# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
|
||||
# If KBUILD_VERBOSE equals 1 then the above command is displayed.
|
||||
# If KBUILD_VERBOSE equals 2 then give the reason why each target is rebuilt.
|
||||
#
|
||||
# To put more focus on warnings, be less verbose as default
|
||||
# Use 'make V=1' to see the full commands
|
||||
|
@ -1238,7 +1239,7 @@ ifneq ($(dtstree),)
|
|||
%.dtb: include/config/kernel.release scripts_dtc
|
||||
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
|
||||
|
||||
PHONY += dtbs dtbs_install dt_binding_check
|
||||
PHONY += dtbs dtbs_install dtbs_check
|
||||
dtbs dtbs_check: include/config/kernel.release scripts_dtc
|
||||
$(Q)$(MAKE) $(build)=$(dtstree)
|
||||
|
||||
|
@ -1258,6 +1259,7 @@ PHONY += scripts_dtc
|
|||
scripts_dtc: scripts_basic
|
||||
$(Q)$(MAKE) $(build)=scripts/dtc
|
||||
|
||||
PHONY += dt_binding_check
|
||||
dt_binding_check: scripts_dtc
|
||||
$(Q)$(MAKE) $(build)=Documentation/devicetree/bindings
|
||||
|
||||
|
|
|
@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
|
|||
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_enter(void) {}
|
||||
static inline void kvm_arm_vhe_guest_exit(void) {}
|
||||
|
||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||
|
|
|
@ -102,7 +102,7 @@
|
|||
};
|
||||
|
||||
gmac0: ethernet@ff800000 {
|
||||
compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
|
||||
compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
|
||||
reg = <0xff800000 0x2000>;
|
||||
interrupts = <0 90 4>;
|
||||
interrupt-names = "macirq";
|
||||
|
@ -118,7 +118,7 @@
|
|||
};
|
||||
|
||||
gmac1: ethernet@ff802000 {
|
||||
compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
|
||||
compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
|
||||
reg = <0xff802000 0x2000>;
|
||||
interrupts = <0 91 4>;
|
||||
interrupt-names = "macirq";
|
||||
|
@ -134,7 +134,7 @@
|
|||
};
|
||||
|
||||
gmac2: ethernet@ff804000 {
|
||||
compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
|
||||
compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
|
||||
reg = <0xff804000 0x2000>;
|
||||
interrupts = <0 92 4>;
|
||||
interrupt-names = "macirq";
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
|
|||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_dir(u32 irq)
|
||||
static __always_inline void gic_write_dir(u32 irq)
|
||||
{
|
||||
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
|
||||
isb();
|
||||
|
|
|
@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
|
|||
return test_bit(ICACHEF_ALIASING, &__icache_flags);
|
||||
}
|
||||
|
||||
static inline int icache_is_vpipt(void)
|
||||
static __always_inline int icache_is_vpipt(void)
|
||||
{
|
||||
return test_bit(ICACHEF_VPIPT, &__icache_flags);
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
|
|||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *);
|
||||
|
||||
static inline void __flush_icache_all(void)
|
||||
static __always_inline void __flush_icache_all(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
|
||||
return;
|
||||
|
|
|
@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
|
|||
return cpuid_feature_extract_signed_field_width(features, field, 4);
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__
|
||||
static __always_inline unsigned int __attribute_const__
|
||||
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
|
||||
{
|
||||
return (u64)(features << (64 - width - field)) >> (64 - width);
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__
|
||||
static __always_inline unsigned int __attribute_const__
|
||||
cpuid_feature_extract_unsigned_field(u64 features, int field)
|
||||
{
|
||||
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
|
||||
|
@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
|
|||
return val == 0x1;
|
||||
}
|
||||
|
||||
static inline bool system_supports_fpsimd(void)
|
||||
static __always_inline bool system_supports_fpsimd(void)
|
||||
{
|
||||
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
|
||||
}
|
||||
|
@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void)
|
|||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
static inline bool system_supports_sve(void)
|
||||
static __always_inline bool system_supports_sve(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SVE) &&
|
||||
cpus_have_const_cap(ARM64_SVE);
|
||||
}
|
||||
|
||||
static inline bool system_supports_cnp(void)
|
||||
static __always_inline bool system_supports_cnp(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_CNP) &&
|
||||
cpus_have_const_cap(ARM64_HAS_CNP);
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
|||
}
|
||||
|
||||
#define __raw_writel __raw_writel
|
||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|||
}
|
||||
|
||||
#define __raw_readl __raw_readl
|
||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
|
||||
|
|
|
@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
|
|||
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !(vcpu->arch.hcr_el2 & HCR_RW);
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
|
|||
vcpu->arch.vsesr_el2 = vsesr;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
|
||||
}
|
||||
|
@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
|
|||
*__vcpu_elr_el1(vcpu) = v;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
|
||||
}
|
||||
|
||||
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return kvm_condition_valid32(vcpu);
|
||||
|
@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
|||
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
|
||||
* AArch32 with banked registers.
|
||||
*/
|
||||
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
||||
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
||||
u8 reg_num)
|
||||
{
|
||||
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
||||
}
|
||||
|
||||
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||
unsigned long val)
|
||||
{
|
||||
if (reg_num != 31)
|
||||
|
@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
|||
return mode != PSR_MODE_EL0t;
|
||||
}
|
||||
|
||||
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.esr_el2;
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
|
@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.far_el2;
|
||||
}
|
||||
|
||||
static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
|
|||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
|
||||
}
|
||||
|
@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
|||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
|
||||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
||||
|
@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
|||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
||||
}
|
||||
|
||||
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
||||
}
|
||||
|
||||
/* This one is not specific to Data Abort */
|
||||
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
||||
}
|
||||
|
@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
|||
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
||||
case FSC_SEA:
|
||||
|
@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
return ESR_ELx_SYS64_ISS_RT(esr);
|
||||
|
@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
|
|||
return data; /* Leave LE untouched */
|
||||
}
|
||||
|
||||
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
kvm_skip_instr32(vcpu, is_wide_instr);
|
||||
|
@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
|||
* Skip an instruction which has been emulated at hyp while most guest sysregs
|
||||
* are live.
|
||||
*/
|
||||
static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
|
||||
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
|
|
|
@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
|
|||
static inline void kvm_clr_pmu_events(u32 clr) {}
|
||||
#endif
|
||||
|
||||
static inline void kvm_arm_vhe_guest_enter(void)
|
||||
{
|
||||
local_daif_mask();
|
||||
|
||||
/*
|
||||
* Having IRQs masked via PMR when entering the guest means the GIC
|
||||
* will not signal the CPU of interrupts of lower priority, and the
|
||||
* only way to get out will be via guest exceptions.
|
||||
* Naturally, we want to avoid this.
|
||||
*
|
||||
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
|
||||
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
|
||||
*/
|
||||
pmr_sync();
|
||||
}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_exit(void)
|
||||
{
|
||||
/*
|
||||
* local_daif_restore() takes care to properly restore PSTATE.DAIF
|
||||
* and the GIC PMR if the host is using IRQ priorities.
|
||||
*/
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
|
||||
/*
|
||||
* When we exit from the guest we change a number of CPU configuration
|
||||
* parameters, such as traps. Make sure these changes take effect
|
||||
* before running the host or additional guests.
|
||||
*/
|
||||
isb();
|
||||
}
|
||||
|
||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||
|
|
|
@ -47,6 +47,13 @@
|
|||
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
|
||||
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
|
||||
|
||||
/*
|
||||
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
|
||||
* static inline can allow the compiler to out-of-line this. KVM always wants
|
||||
* the macro version as its always inlined.
|
||||
*/
|
||||
#define __kvm_swab32(x) ___constant_swab32(x)
|
||||
|
||||
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
|
|||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
void kvm_compute_layout(void);
|
||||
|
||||
static inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
|
||||
"ror %0, %0, #1\n"
|
||||
|
@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
|||
extern void *__kvm_bp_vect_base;
|
||||
extern int __kvm_harden_el2_vector_slot;
|
||||
|
||||
/* This is only called on a VHE system */
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
||||
|
||||
#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
|
||||
#define __LSE_PREAMBLE ".arch_extension lse\n"
|
||||
|
||||
#include <linux/compiler_types.h>
|
||||
#include <linux/export.h>
|
||||
|
|
|
@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void)
|
|||
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
|
||||
|
||||
#define untagged_addr(addr) ({ \
|
||||
u64 __addr = (__force u64)addr; \
|
||||
u64 __addr = (__force u64)(addr); \
|
||||
__addr &= __untagged_addr(__addr); \
|
||||
(__force __typeof__(addr))__addr; \
|
||||
})
|
||||
|
|
|
@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||
return read_sysreg(CurrentEL) == CurrentEL_EL2;
|
||||
}
|
||||
|
||||
static inline bool has_vhe(void)
|
||||
static __always_inline bool has_vhe(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
|
||||
return true;
|
||||
|
|
|
@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
|
|||
}
|
||||
|
||||
/* Switch to the guest for VHE systems running in EL2 */
|
||||
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_cpu_context *guest_ctxt;
|
||||
|
@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||
|
||||
return exit_code;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
|
||||
NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
|
||||
|
||||
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
local_daif_mask();
|
||||
|
||||
/*
|
||||
* Having IRQs masked via PMR when entering the guest means the GIC
|
||||
* will not signal the CPU of interrupts of lower priority, and the
|
||||
* only way to get out will be via guest exceptions.
|
||||
* Naturally, we want to avoid this.
|
||||
*
|
||||
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
|
||||
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
|
||||
*/
|
||||
pmr_sync();
|
||||
|
||||
ret = __kvm_vcpu_run_vhe(vcpu);
|
||||
|
||||
/*
|
||||
* local_daif_restore() takes care to properly restore PSTATE.DAIF
|
||||
* and the GIC PMR if the host is using IRQ priorities.
|
||||
*/
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
|
||||
/*
|
||||
* When we exit from the guest we change a number of CPU configuration
|
||||
* parameters, such as traps. Make sure these changes take effect
|
||||
* before running the host or additional guests.
|
||||
*/
|
||||
isb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Switch to the guest for legacy non-VHE systems */
|
||||
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
|||
u32 data = vcpu_get_reg(vcpu, rd);
|
||||
if (__is_be(vcpu)) {
|
||||
/* guest pre-swabbed data, undo this for writel() */
|
||||
data = swab32(data);
|
||||
data = __kvm_swab32(data);
|
||||
}
|
||||
writel_relaxed(data, addr);
|
||||
} else {
|
||||
u32 data = readl_relaxed(addr);
|
||||
if (__is_be(vcpu)) {
|
||||
/* guest expects swabbed data */
|
||||
data = swab32(data);
|
||||
data = __kvm_swab32(data);
|
||||
}
|
||||
vcpu_set_reg(vcpu, rd, data);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ config CSKY
|
|||
select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
|
||||
select COMMON_CLK
|
||||
select CLKSRC_MMIO
|
||||
select CLKSRC_OF
|
||||
select CSKY_MPINTC if CPU_CK860
|
||||
select CSKY_MP_TIMER if CPU_CK860
|
||||
select CSKY_APB_INTC
|
||||
|
@ -37,6 +36,7 @@ config CSKY
|
|||
select GX6605S_TIMER if CPU_CK610
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
|
@ -47,8 +47,8 @@ config CSKY
|
|||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_STACKPROTECTOR
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select MAY_HAVE_SPARSE_IRQ
|
||||
select MODULES_USE_ELF_RELA if MODULES
|
||||
|
@ -59,6 +59,11 @@ config CSKY
|
|||
select TIMER_OF
|
||||
select USB_ARCH_HAS_EHCI
|
||||
select USB_ARCH_HAS_OHCI
|
||||
select GENERIC_PCI_IOMAP
|
||||
select HAVE_PCI
|
||||
select PCI_DOMAINS_GENERIC if PCI
|
||||
select PCI_SYSCALL if PCI
|
||||
select PCI_MSI if PCI
|
||||
|
||||
config CPU_HAS_CACHEV2
|
||||
bool
|
||||
|
@ -75,7 +80,7 @@ config CPU_HAS_TLBI
|
|||
config CPU_HAS_LDSTEX
|
||||
bool
|
||||
help
|
||||
For SMP, CPU needs "ldex&stex" instrcutions to atomic operations.
|
||||
For SMP, CPU needs "ldex&stex" instructions for atomic operations.
|
||||
|
||||
config CPU_NEED_TLBSYNC
|
||||
bool
|
||||
|
@ -188,6 +193,40 @@ config CPU_PM_STOP
|
|||
bool "stop"
|
||||
endchoice
|
||||
|
||||
menuconfig HAVE_TCM
|
||||
bool "Tightly-Coupled/Sram Memory"
|
||||
select GENERIC_ALLOCATOR
|
||||
help
|
||||
The implementation are not only used by TCM (Tightly-Coupled Meory)
|
||||
but also used by sram on SOC bus. It follow existed linux tcm
|
||||
software interface, so that old tcm application codes could be
|
||||
re-used directly.
|
||||
|
||||
if HAVE_TCM
|
||||
config ITCM_RAM_BASE
|
||||
hex "ITCM ram base"
|
||||
default 0xffffffff
|
||||
|
||||
config ITCM_NR_PAGES
|
||||
int "Page count of ITCM size: NR*4KB"
|
||||
range 1 256
|
||||
default 32
|
||||
|
||||
config HAVE_DTCM
|
||||
bool "DTCM Support"
|
||||
|
||||
config DTCM_RAM_BASE
|
||||
hex "DTCM ram base"
|
||||
depends on HAVE_DTCM
|
||||
default 0xffffffff
|
||||
|
||||
config DTCM_NR_PAGES
|
||||
int "Page count of DTCM size: NR*4KB"
|
||||
depends on HAVE_DTCM
|
||||
range 1 256
|
||||
default 32
|
||||
endif
|
||||
|
||||
config CPU_HAS_VDSP
|
||||
bool "CPU has VDSP coprocessor"
|
||||
depends on CPU_HAS_FPU && CPU_HAS_FPUV2
|
||||
|
@ -196,6 +235,10 @@ config CPU_HAS_FPU
|
|||
bool "CPU has FPU coprocessor"
|
||||
depends on CPU_CK807 || CPU_CK810 || CPU_CK860
|
||||
|
||||
config CPU_HAS_ICACHE_INS
|
||||
bool "CPU has Icache invalidate instructions"
|
||||
depends on CPU_HAS_CACHEV2
|
||||
|
||||
config CPU_HAS_TEE
|
||||
bool "CPU has Trusted Execution Environment"
|
||||
depends on CPU_CK810
|
||||
|
@ -235,4 +278,6 @@ config HOTPLUG_CPU
|
|||
Say N if you want to disable CPU hotplug.
|
||||
endmenu
|
||||
|
||||
source "arch/csky/Kconfig.platforms"
|
||||
|
||||
source "kernel/Kconfig.hz"
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
menu "Platform drivers selection"
|
||||
|
||||
config ARCH_CSKY_DW_APB_ICTL
|
||||
bool "Select dw-apb interrupt controller"
|
||||
select DW_APB_ICTL
|
||||
default y
|
||||
help
|
||||
This enables support for snps dw-apb-ictl
|
||||
endmenu
|
|
@ -48,9 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
|
|||
|
||||
#define flush_icache_page(vma, page) do {} while (0);
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
|
||||
#define flush_icache_user_range(vma,page,addr,len) \
|
||||
flush_dcache_page(page)
|
||||
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
|
||||
#define flush_icache_deferred(mm) do {} while (0);
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
|
|
|
@ -16,14 +16,16 @@
|
|||
#define LSAVE_A4 40
|
||||
#define LSAVE_A5 44
|
||||
|
||||
#define usp ss1
|
||||
|
||||
.macro USPTOKSP
|
||||
mtcr sp, ss1
|
||||
mtcr sp, usp
|
||||
mfcr sp, ss0
|
||||
.endm
|
||||
|
||||
.macro KSPTOUSP
|
||||
mtcr sp, ss0
|
||||
mfcr sp, ss1
|
||||
mfcr sp, usp
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL epc_inc
|
||||
|
@ -45,7 +47,13 @@
|
|||
add lr, r13
|
||||
stw lr, (sp, 8)
|
||||
|
||||
mov lr, sp
|
||||
addi lr, 32
|
||||
addi lr, 32
|
||||
addi lr, 16
|
||||
bt 2f
|
||||
mfcr lr, ss1
|
||||
2:
|
||||
stw lr, (sp, 16)
|
||||
|
||||
stw a0, (sp, 20)
|
||||
|
@ -79,9 +87,10 @@
|
|||
ldw a0, (sp, 12)
|
||||
mtcr a0, epsr
|
||||
btsti a0, 31
|
||||
bt 1f
|
||||
ldw a0, (sp, 16)
|
||||
mtcr a0, ss1
|
||||
|
||||
1:
|
||||
ldw a0, (sp, 24)
|
||||
ldw a1, (sp, 28)
|
||||
ldw a2, (sp, 32)
|
||||
|
@ -102,9 +111,9 @@
|
|||
addi sp, 32
|
||||
addi sp, 8
|
||||
|
||||
bt 1f
|
||||
bt 2f
|
||||
KSPTOUSP
|
||||
1:
|
||||
2:
|
||||
rte
|
||||
.endm
|
||||
|
||||
|
|
|
@ -6,46 +6,80 @@
|
|||
#include <linux/mm.h>
|
||||
#include <asm/cache.h>
|
||||
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||
{
|
||||
unsigned long start;
|
||||
|
||||
start = (unsigned long) kmap_atomic(page);
|
||||
|
||||
cache_wbinv_range(start, start + PAGE_SIZE);
|
||||
|
||||
kunmap_atomic((void *)start);
|
||||
}
|
||||
|
||||
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, int len)
|
||||
{
|
||||
unsigned long kaddr;
|
||||
|
||||
kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
|
||||
|
||||
cache_wbinv_range(kaddr, kaddr + len);
|
||||
|
||||
kunmap_atomic((void *)kaddr);
|
||||
}
|
||||
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte)
|
||||
{
|
||||
unsigned long addr, pfn;
|
||||
unsigned long addr;
|
||||
struct page *page;
|
||||
|
||||
pfn = pte_pfn(*pte);
|
||||
if (unlikely(!pfn_valid(pfn)))
|
||||
page = pfn_to_page(pte_pfn(*pte));
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (page == ZERO_PAGE(0))
|
||||
if (test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
return;
|
||||
|
||||
addr = (unsigned long) kmap_atomic(page);
|
||||
|
||||
cache_wbinv_range(addr, addr + PAGE_SIZE);
|
||||
dcache_wb_range(addr, addr + PAGE_SIZE);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
icache_inv_range(addr, addr + PAGE_SIZE);
|
||||
|
||||
kunmap_atomic((void *) addr);
|
||||
}
|
||||
|
||||
void flush_icache_deferred(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
cpumask_t *mask = &mm->context.icache_stale_mask;
|
||||
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
/*
|
||||
* Ensure the remote hart's writes are visible to this hart.
|
||||
* This pairs with a barrier in flush_icache_mm.
|
||||
*/
|
||||
smp_mb();
|
||||
local_icache_inv_all(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void flush_icache_mm_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned int cpu;
|
||||
cpumask_t others, *mask;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ICACHE_INS
|
||||
if (mm == current->mm) {
|
||||
icache_inv_range(start, end);
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Mark every hart's icache as needing a flush for this MM. */
|
||||
mask = &mm->context.icache_stale_mask;
|
||||
cpumask_setall(mask);
|
||||
|
||||
/* Flush this hart's I$ now, and mark it as flushed. */
|
||||
cpu = smp_processor_id();
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
local_icache_inv_all(NULL);
|
||||
|
||||
/*
|
||||
* Flush the I$ of other harts concurrently executing, and mark them as
|
||||
* flushed.
|
||||
*/
|
||||
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
||||
|
||||
if (mm != current->active_mm || !cpumask_empty(&others)) {
|
||||
on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
|
||||
cpumask_clear(mask);
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -13,24 +13,27 @@
|
|||
#define flush_cache_all() do { } while (0)
|
||||
#define flush_cache_mm(mm) do { } while (0)
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
|
||||
#define flush_cache_range(vma, start, end) \
|
||||
do { \
|
||||
if (vma->vm_flags & VM_EXEC) \
|
||||
icache_inv_all(); \
|
||||
} while (0)
|
||||
|
||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
|
||||
#define PG_dcache_clean PG_arch_1
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
static inline void flush_dcache_page(struct page *page)
|
||||
{
|
||||
if (test_bit(PG_dcache_clean, &page->flags))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_page(vma, page) do { } while (0)
|
||||
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, int len);
|
||||
void flush_icache_mm_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end);
|
||||
void flush_icache_deferred(struct mm_struct *mm);
|
||||
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
@ -38,7 +41,13 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
|||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
memcpy(dst, src, len); \
|
||||
cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \
|
||||
if (vma->vm_flags & VM_EXEC) { \
|
||||
dcache_wb_range((unsigned long)dst, \
|
||||
(unsigned long)dst + len); \
|
||||
flush_icache_mm_range(current->mm, \
|
||||
(unsigned long)dst, \
|
||||
(unsigned long)dst + len); \
|
||||
} \
|
||||
} while (0)
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy(dst, src, len)
|
||||
|
|
|
@ -31,7 +31,13 @@
|
|||
|
||||
mfcr lr, epsr
|
||||
stw lr, (sp, 12)
|
||||
btsti lr, 31
|
||||
bf 1f
|
||||
addi lr, sp, 152
|
||||
br 2f
|
||||
1:
|
||||
mfcr lr, usp
|
||||
2:
|
||||
stw lr, (sp, 16)
|
||||
|
||||
stw a0, (sp, 20)
|
||||
|
@ -64,8 +70,10 @@
|
|||
mtcr a0, epc
|
||||
ldw a0, (sp, 12)
|
||||
mtcr a0, epsr
|
||||
btsti a0, 31
|
||||
ldw a0, (sp, 16)
|
||||
mtcr a0, usp
|
||||
mtcr a0, ss0
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_HILO
|
||||
ldw a0, (sp, 140)
|
||||
|
@ -86,6 +94,9 @@
|
|||
addi sp, 40
|
||||
ldm r16-r30, (sp)
|
||||
addi sp, 72
|
||||
bf 1f
|
||||
mfcr sp, ss0
|
||||
1:
|
||||
rte
|
||||
.endm
|
||||
|
||||
|
|
|
@ -10,9 +10,6 @@ CONFIG_BSD_PROCESS_ACCT=y
|
|||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_DEFAULT_DEADLINE=y
|
||||
CONFIG_CPU_CK807=y
|
||||
CONFIG_CPU_HAS_FPU=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
|
@ -27,10 +24,7 @@ CONFIG_SERIAL_NONSTANDARD=y
|
|||
CONFIG_SERIAL_8250=y
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
CONFIG_TTY_PRINTK=y
|
||||
# CONFIG_VGA_CONSOLE is not set
|
||||
CONFIG_CSKY_MPTIMER=y
|
||||
CONFIG_GX6605S_TIMER=y
|
||||
CONFIG_PM_DEVFREQ=y
|
||||
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
|
||||
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||
|
@ -56,6 +50,4 @@ CONFIG_CRAMFS=y
|
|||
CONFIG_ROMFS_FS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
|
|
|
@ -28,7 +28,6 @@ generic-y += local64.h
|
|||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mmiowb.h
|
||||
generic-y += module.h
|
||||
generic-y += pci.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
generic-y += qrwlock.h
|
||||
|
|
|
@ -16,6 +16,7 @@ void dcache_wb_line(unsigned long start);
|
|||
|
||||
void icache_inv_range(unsigned long start, unsigned long end);
|
||||
void icache_inv_all(void);
|
||||
void local_icache_inv_all(void *priv);
|
||||
|
||||
void dcache_wb_range(unsigned long start, unsigned long end);
|
||||
void dcache_wbinv_all(void);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#ifndef __ASM_CSKY_CACHEFLUSH_H
|
||||
#define __ASM_CSKY_CACHEFLUSH_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <abi/cacheflush.h>
|
||||
|
||||
#endif /* __ASM_CSKY_CACHEFLUSH_H */
|
||||
|
|
|
@ -5,12 +5,16 @@
|
|||
#define __ASM_CSKY_FIXMAP_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/memory.h>
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#include <linux/threads.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#endif
|
||||
|
||||
enum fixed_addresses {
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
FIX_TCM = TCM_NR_PAGES,
|
||||
#endif
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
FIX_KMAP_BEGIN,
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
||||
|
@ -18,10 +22,13 @@ enum fixed_addresses {
|
|||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
#define FIXADDR_TOP 0xffffc000
|
||||
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
|
||||
|
||||
#include <asm-generic/fixmap.h>
|
||||
|
||||
extern void fixrange_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base);
|
||||
extern void __init fixaddr_init(void);
|
||||
|
||||
#endif /* __ASM_CSKY_FIXMAP_H */
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef __ASM_CSKY_MEMORY_H
|
||||
#define __ASM_CSKY_MEMORY_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/const.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#define FIXADDR_TOP _AC(0xffffc000, UL)
|
||||
#define PKMAP_BASE _AC(0xff800000, UL)
|
||||
#define VMALLOC_START _AC(0xc0008000, UL)
|
||||
#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
|
||||
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
#ifdef CONFIG_HAVE_DTCM
|
||||
#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES)
|
||||
#else
|
||||
#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES)
|
||||
#endif
|
||||
#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL)
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -7,6 +7,7 @@
|
|||
typedef struct {
|
||||
atomic64_t asid;
|
||||
void *vdso;
|
||||
cpumask_t icache_stale_mask;
|
||||
} mm_context_t;
|
||||
|
||||
#endif /* __ASM_CSKY_MMU_H */
|
||||
|
|
|
@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
|
||||
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
||||
write_mmu_entryhi(next->context.asid.counter);
|
||||
|
||||
flush_icache_deferred(next);
|
||||
}
|
||||
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_CSKY_PCI_H
|
||||
#define __ASM_CSKY_PCI_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
#define PCIBIOS_MIN_IO 0
|
||||
#define PCIBIOS_MIN_MEM 0
|
||||
|
||||
/* C-SKY shim does not initialize PCI bus */
|
||||
#define pcibios_assign_all_busses() 1
|
||||
|
||||
extern int isa_dma_bridge_buggy;
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
{
|
||||
/* no legacy IRQ on csky */
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
{
|
||||
/* always show the domain in /proc */
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
#endif /* __ASM_CSKY_PCI_H */
|
|
@ -5,6 +5,7 @@
|
|||
#define __ASM_CSKY_PGTABLE_H
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <abi/pgtable-bits.h>
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
@ -16,11 +17,6 @@
|
|||
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
|
||||
#define FIRST_USER_ADDRESS 0UL
|
||||
|
||||
#define PKMAP_BASE (0xff800000)
|
||||
|
||||
#define VMALLOC_START (0xc0008000)
|
||||
#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* C-SKY is two-level paging structure:
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_STACKPROTECTOR_H
|
||||
#define _ASM_STACKPROTECTOR_H 1
|
||||
|
||||
#include <linux/random.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
extern unsigned long __stack_chk_guard;
|
||||
|
||||
/*
|
||||
* Initialize the stackprotector canary value.
|
||||
*
|
||||
* NOTE: this must only be called from functions that never return,
|
||||
* and it must always be inlined.
|
||||
*/
|
||||
static __always_inline void boot_init_stack_canary(void)
|
||||
{
|
||||
unsigned long canary;
|
||||
|
||||
/* Try to get a semi random initial value. */
|
||||
get_random_bytes(&canary, sizeof(canary));
|
||||
canary ^= LINUX_VERSION_CODE;
|
||||
canary &= CANARY_MASK;
|
||||
|
||||
current->stack_canary = canary;
|
||||
__stack_chk_guard = current->stack_canary;
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_STACKPROTECTOR_H */
|
|
@ -0,0 +1,24 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef __ASM_CSKY_TCM_H
|
||||
#define __ASM_CSKY_TCM_H
|
||||
|
||||
#ifndef CONFIG_HAVE_TCM
|
||||
#error "You should not be including tcm.h unless you have a TCM!"
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Tag variables with this */
|
||||
#define __tcmdata __section(.tcm.data)
|
||||
/* Tag constants with this */
|
||||
#define __tcmconst __section(.tcm.rodata)
|
||||
/* Tag functions inside TCM called from outside TCM with this */
|
||||
#define __tcmfunc __section(.tcm.text) noinline
|
||||
/* Tag function inside TCM called from inside TCM with this */
|
||||
#define __tcmlocalfunc __section(.tcm.text)
|
||||
|
||||
void *tcm_alloc(size_t len);
|
||||
void tcm_free(void *addr, size_t len);
|
||||
|
||||
#endif
|
|
@ -1,7 +1,10 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#define __ARCH_WANT_STAT64
|
||||
#define __ARCH_WANT_NEW_STAT
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
#define __ARCH_WANT_SYS_CLONE3
|
||||
#define __ARCH_WANT_SET_GET_RLIMIT
|
||||
#define __ARCH_WANT_TIME32_SYSCALLS
|
||||
#include <asm-generic/unistd.h>
|
||||
|
|
|
@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg)
|
|||
mfcr a3, epc
|
||||
addi a3, TRAP0_SIZE
|
||||
|
||||
subi sp, 8
|
||||
subi sp, 16
|
||||
stw a3, (sp, 0)
|
||||
mfcr a3, epsr
|
||||
stw a3, (sp, 4)
|
||||
mfcr a3, usp
|
||||
stw a3, (sp, 8)
|
||||
|
||||
psrset ee
|
||||
#ifdef CONFIG_CPU_HAS_LDSTEX
|
||||
|
@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg)
|
|||
mtcr a3, epc
|
||||
ldw a3, (sp, 4)
|
||||
mtcr a3, epsr
|
||||
addi sp, 8
|
||||
ldw a3, (sp, 8)
|
||||
mtcr a3, usp
|
||||
addi sp, 16
|
||||
KSPTOUSP
|
||||
rte
|
||||
END(csky_cmpxchg)
|
||||
|
|
|
@ -16,6 +16,12 @@
|
|||
|
||||
struct cpuinfo_csky cpu_data[NR_CPUS];
|
||||
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
EXPORT_SYMBOL(__stack_chk_guard);
|
||||
#endif
|
||||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
asmlinkage void ret_from_kernel_thread(void);
|
||||
|
||||
|
@ -34,10 +40,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
|
|||
return sw->r15;
|
||||
}
|
||||
|
||||
int copy_thread(unsigned long clone_flags,
|
||||
int copy_thread_tls(unsigned long clone_flags,
|
||||
unsigned long usp,
|
||||
unsigned long kthread_arg,
|
||||
struct task_struct *p)
|
||||
struct task_struct *p,
|
||||
unsigned long tls)
|
||||
{
|
||||
struct switch_stack *childstack;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
@ -64,7 +71,7 @@ int copy_thread(unsigned long clone_flags,
|
|||
childregs->usp = usp;
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
task_thread_info(p)->tp_value = childregs->tls
|
||||
= childregs->regs[0];
|
||||
= tls;
|
||||
|
||||
childregs->a0 = 0;
|
||||
childstack->r15 = (unsigned long) ret_from_fork;
|
||||
|
|
|
@ -47,9 +47,6 @@ static void __init csky_memblock_init(void)
|
|||
signed long size;
|
||||
|
||||
memblock_reserve(__pa(_stext), _end - _stext);
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
|
||||
#endif
|
||||
|
||||
early_init_fdt_reserve_self();
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
@ -133,6 +130,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
sparse_init();
|
||||
|
||||
fixaddr_init();
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
kmap_init();
|
||||
#endif
|
||||
|
|
|
@ -120,7 +120,7 @@ void __init setup_smp_ipi(void)
|
|||
int rc;
|
||||
|
||||
if (ipi_irq == 0)
|
||||
panic("%s IRQ mapping failed\n", __func__);
|
||||
return;
|
||||
|
||||
rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
|
||||
&ipi_dummy_dev);
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/of_clk.h>
|
||||
|
||||
void __init time_init(void)
|
||||
{
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <asm/vmlinux.lds.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
OUTPUT_ARCH(csky)
|
||||
ENTRY(_start)
|
||||
|
@ -53,6 +54,54 @@ SECTIONS
|
|||
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||
_edata = .;
|
||||
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
.tcm_start : {
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__tcm_start = .;
|
||||
}
|
||||
|
||||
.text_data_tcm FIXADDR_TCM : AT(__tcm_start)
|
||||
{
|
||||
. = ALIGN(4);
|
||||
__stcm_text_data = .;
|
||||
*(.tcm.text)
|
||||
*(.tcm.rodata)
|
||||
#ifndef CONFIG_HAVE_DTCM
|
||||
*(.tcm.data)
|
||||
#endif
|
||||
. = ALIGN(4);
|
||||
__etcm_text_data = .;
|
||||
}
|
||||
|
||||
. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm);
|
||||
|
||||
#ifdef CONFIG_HAVE_DTCM
|
||||
#define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE
|
||||
|
||||
.dtcm_start : {
|
||||
__dtcm_start = .;
|
||||
}
|
||||
|
||||
.data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start)
|
||||
{
|
||||
. = ALIGN(4);
|
||||
__stcm_data = .;
|
||||
*(.tcm.data)
|
||||
. = ALIGN(4);
|
||||
__etcm_data = .;
|
||||
}
|
||||
|
||||
. = ADDR(.dtcm_start) + SIZEOF(.data_tcm);
|
||||
|
||||
.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) {
|
||||
#else
|
||||
.tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) {
|
||||
#endif
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__tcm_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
EXCEPTION_TABLE(L1_CACHE_BYTES)
|
||||
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
|
||||
VBR_BASE
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
|
||||
obj-y += cachev2.o
|
||||
CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
|
||||
else
|
||||
obj-y += cachev1.o
|
||||
CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
obj-y += dma-mapping.o
|
||||
|
@ -14,3 +16,4 @@ obj-y += syscache.o
|
|||
obj-y += tlb.o
|
||||
obj-y += asid.o
|
||||
obj-y += context.o
|
||||
obj-$(CONFIG_HAVE_TCM) += tcm.o
|
||||
|
|
|
@ -94,6 +94,11 @@ void icache_inv_all(void)
|
|||
cache_op_all(INS_CACHE|CACHE_INV, 0);
|
||||
}
|
||||
|
||||
void local_icache_inv_all(void *priv)
|
||||
{
|
||||
cache_op_all(INS_CACHE|CACHE_INV, 0);
|
||||
}
|
||||
|
||||
void dcache_wb_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
|
||||
|
|
|
@ -3,15 +3,25 @@
|
|||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
inline void dcache_wb_line(unsigned long start)
|
||||
#define INS_CACHE (1 << 0)
|
||||
#define CACHE_INV (1 << 4)
|
||||
|
||||
void local_icache_inv_all(void *priv)
|
||||
{
|
||||
asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
|
||||
mtcr("cr17", INS_CACHE|CACHE_INV);
|
||||
sync_is();
|
||||
}
|
||||
|
||||
void icache_inv_all(void)
|
||||
{
|
||||
on_each_cpu(local_icache_inv_all, NULL, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ICACHE_INS
|
||||
void icache_inv_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
||||
|
@ -20,10 +30,16 @@ void icache_inv_range(unsigned long start, unsigned long end)
|
|||
asm volatile("icache.iva %0\n"::"r"(i):"memory");
|
||||
sync_is();
|
||||
}
|
||||
|
||||
void icache_inv_all(void)
|
||||
#else
|
||||
void icache_inv_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
asm volatile("icache.ialls\n":::"memory");
|
||||
icache_inv_all();
|
||||
}
|
||||
#endif
|
||||
|
||||
inline void dcache_wb_line(unsigned long start)
|
||||
{
|
||||
asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
|
||||
sync_is();
|
||||
}
|
||||
|
||||
|
@ -36,27 +52,10 @@ void dcache_wb_range(unsigned long start, unsigned long end)
|
|||
sync_is();
|
||||
}
|
||||
|
||||
void dcache_inv_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
||||
|
||||
for (; i < end; i += L1_CACHE_BYTES)
|
||||
asm volatile("dcache.civa %0\n"::"r"(i):"memory");
|
||||
sync_is();
|
||||
}
|
||||
|
||||
void cache_wbinv_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
||||
|
||||
for (; i < end; i += L1_CACHE_BYTES)
|
||||
asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
|
||||
sync_is();
|
||||
|
||||
i = start & ~(L1_CACHE_BYTES - 1);
|
||||
for (; i < end; i += L1_CACHE_BYTES)
|
||||
asm volatile("icache.iva %0\n"::"r"(i):"memory");
|
||||
sync_is();
|
||||
dcache_wb_range(start, end);
|
||||
icache_inv_range(start, end);
|
||||
}
|
||||
EXPORT_SYMBOL(cache_wbinv_range);
|
||||
|
||||
|
|
|
@ -117,85 +117,29 @@ struct page *kmap_atomic_to_page(void *ptr)
|
|||
return pte_page(*pte);
|
||||
}
|
||||
|
||||
static void __init fixrange_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
int i, j, k;
|
||||
unsigned long vaddr;
|
||||
|
||||
vaddr = start;
|
||||
i = __pgd_offset(vaddr);
|
||||
j = __pud_offset(vaddr);
|
||||
k = __pmd_offset(vaddr);
|
||||
pgd = pgd_base + i;
|
||||
|
||||
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
|
||||
pud = (pud_t *)pgd;
|
||||
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
|
||||
pmd = (pmd_t *)pud;
|
||||
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!pte)
|
||||
panic("%s: Failed to allocate %lu bytes align=%lx\n",
|
||||
__func__, PAGE_SIZE,
|
||||
PAGE_SIZE);
|
||||
|
||||
set_pmd(pmd, __pmd(__pa(pte)));
|
||||
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
||||
}
|
||||
vaddr += PMD_SIZE;
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
j = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init fixaddr_kmap_pages_init(void)
|
||||
static void __init kmap_pages_init(void)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
pgd_t *pgd_base;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pud_t *pud;
|
||||
pte_t *pte;
|
||||
#endif
|
||||
pgd_base = swapper_pg_dir;
|
||||
|
||||
/*
|
||||
* Fixed mappings:
|
||||
*/
|
||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
||||
fixrange_init(vaddr, 0, pgd_base);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Permanent kmaps:
|
||||
*/
|
||||
vaddr = PKMAP_BASE;
|
||||
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
|
||||
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
|
||||
|
||||
pgd = swapper_pg_dir + __pgd_offset(vaddr);
|
||||
pud = (pud_t *)pgd;
|
||||
pmd = pmd_offset(pud, vaddr);
|
||||
pte = pte_offset_kernel(pmd, vaddr);
|
||||
pkmap_page_table = pte;
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init kmap_init(void)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
|
||||
fixaddr_kmap_pages_init();
|
||||
kmap_pages_init();
|
||||
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/initrd.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/cachectl.h>
|
||||
|
@ -31,10 +32,50 @@
|
|||
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
|
||||
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
|
||||
EXPORT_SYMBOL(invalid_pte_table);
|
||||
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
||||
__page_aligned_bss;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
static void __init setup_initrd(void)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
if (initrd_start >= initrd_end) {
|
||||
pr_err("initrd not found or empty");
|
||||
goto disable;
|
||||
}
|
||||
|
||||
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||
pr_err("initrd extends beyond end of memory");
|
||||
goto disable;
|
||||
}
|
||||
|
||||
size = initrd_end - initrd_start;
|
||||
|
||||
if (memblock_is_region_reserved(__pa(initrd_start), size)) {
|
||||
pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
|
||||
__pa(initrd_start), size);
|
||||
goto disable;
|
||||
}
|
||||
|
||||
memblock_reserve(__pa(initrd_start), size);
|
||||
|
||||
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||
(void *)(initrd_start), size);
|
||||
|
||||
initrd_below_start_ok = 1;
|
||||
|
||||
return;
|
||||
|
||||
disable:
|
||||
initrd_start = initrd_end = 0;
|
||||
|
||||
pr_err(" - disabling initrd\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
@ -46,6 +87,10 @@ void __init mem_init(void)
|
|||
#endif
|
||||
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
setup_initrd();
|
||||
#endif
|
||||
|
||||
memblock_free_all();
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
@ -101,3 +146,50 @@ void __init pre_mmu_init(void)
|
|||
/* Setup page mask to 4k */
|
||||
write_mmu_pagemask(0);
|
||||
}
|
||||
|
||||
void __init fixrange_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
int i, j, k;
|
||||
unsigned long vaddr;
|
||||
|
||||
vaddr = start;
|
||||
i = __pgd_offset(vaddr);
|
||||
j = __pud_offset(vaddr);
|
||||
k = __pmd_offset(vaddr);
|
||||
pgd = pgd_base + i;
|
||||
|
||||
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
|
||||
pud = (pud_t *)pgd;
|
||||
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
|
||||
pmd = (pmd_t *)pud;
|
||||
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!pte)
|
||||
panic("%s: Failed to allocate %lu bytes align=%lx\n",
|
||||
__func__, PAGE_SIZE,
|
||||
PAGE_SIZE);
|
||||
|
||||
set_pmd(pmd, __pmd(__pa(pte)));
|
||||
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
||||
}
|
||||
vaddr += PMD_SIZE;
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void __init fixaddr_init(void)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
|
||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
||||
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#include <linux/syscalls.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachectl.h>
|
||||
|
||||
SYSCALL_DEFINE3(cacheflush,
|
||||
|
@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush,
|
|||
{
|
||||
switch (cache) {
|
||||
case ICACHE:
|
||||
icache_inv_range((unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
break;
|
||||
case BCACHE:
|
||||
flush_icache_mm_range(current->mm,
|
||||
(unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
case DCACHE:
|
||||
dcache_wb_range((unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
break;
|
||||
case BCACHE:
|
||||
cache_wbinv_range((unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
#if (CONFIG_ITCM_RAM_BASE == 0xffffffff)
|
||||
#error "You should define ITCM_RAM_BASE"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_DTCM
|
||||
#if (CONFIG_DTCM_RAM_BASE == 0xffffffff)
|
||||
#error "You should define DTCM_RAM_BASE"
|
||||
#endif
|
||||
|
||||
#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE)
|
||||
#error "You should define correct DTCM_RAM_BASE"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern char __tcm_start, __tcm_end, __dtcm_start;
|
||||
|
||||
static struct gen_pool *tcm_pool;
|
||||
|
||||
static void __init tcm_mapping_init(void)
|
||||
{
|
||||
pte_t *tcm_pte;
|
||||
unsigned long vaddr, paddr;
|
||||
int i;
|
||||
|
||||
paddr = CONFIG_ITCM_RAM_BASE;
|
||||
|
||||
if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE)))
|
||||
goto panic;
|
||||
|
||||
#ifndef CONFIG_HAVE_DTCM
|
||||
for (i = 0; i < TCM_NR_PAGES; i++) {
|
||||
#else
|
||||
for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) {
|
||||
#endif
|
||||
vaddr = __fix_to_virt(FIX_TCM - i);
|
||||
|
||||
tcm_pte =
|
||||
pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
|
||||
|
||||
set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
|
||||
|
||||
flush_tlb_one(vaddr);
|
||||
|
||||
paddr = paddr + PAGE_SIZE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_DTCM
|
||||
if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE)))
|
||||
goto panic;
|
||||
|
||||
paddr = CONFIG_DTCM_RAM_BASE;
|
||||
|
||||
for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) {
|
||||
vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i);
|
||||
|
||||
tcm_pte =
|
||||
pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr);
|
||||
|
||||
set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
|
||||
|
||||
flush_tlb_one(vaddr);
|
||||
|
||||
paddr = paddr + PAGE_SIZE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_DTCM
|
||||
memcpy((void *)__fix_to_virt(FIX_TCM),
|
||||
&__tcm_start, &__tcm_end - &__tcm_start);
|
||||
|
||||
pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n",
|
||||
__func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
|
||||
|
||||
pr_info("%s: __tcm_start va:0x%08lx size:%d\n",
|
||||
__func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start);
|
||||
#else
|
||||
memcpy((void *)__fix_to_virt(FIX_TCM),
|
||||
&__tcm_start, &__dtcm_start - &__tcm_start);
|
||||
|
||||
pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n",
|
||||
__func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
|
||||
|
||||
pr_info("%s: __itcm_start va:0x%08lx size:%d\n",
|
||||
__func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start);
|
||||
|
||||
memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
|
||||
&__dtcm_start, &__tcm_end - &__dtcm_start);
|
||||
|
||||
pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n",
|
||||
__func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
|
||||
CONFIG_DTCM_RAM_BASE);
|
||||
|
||||
pr_info("%s: __dtcm_start va:0x%08lx size:%d\n",
|
||||
__func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start);
|
||||
|
||||
#endif
|
||||
return;
|
||||
panic:
|
||||
panic("TCM init error");
|
||||
}
|
||||
|
||||
void *tcm_alloc(size_t len)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
|
||||
if (!tcm_pool)
|
||||
return NULL;
|
||||
|
||||
vaddr = gen_pool_alloc(tcm_pool, len);
|
||||
if (!vaddr)
|
||||
return NULL;
|
||||
|
||||
return (void *) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(tcm_alloc);
|
||||
|
||||
void tcm_free(void *addr, size_t len)
|
||||
{
|
||||
gen_pool_free(tcm_pool, (unsigned long) addr, len);
|
||||
}
|
||||
EXPORT_SYMBOL(tcm_free);
|
||||
|
||||
static int __init tcm_setup_pool(void)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_DTCM
|
||||
u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE)
|
||||
- (u32) (&__tcm_end - &__tcm_start);
|
||||
|
||||
u32 tcm_pool_start = __fix_to_virt(FIX_TCM)
|
||||
+ (u32) (&__tcm_end - &__tcm_start);
|
||||
#else
|
||||
u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
|
||||
- (u32) (&__tcm_end - &__dtcm_start);
|
||||
|
||||
u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES)
|
||||
+ (u32) (&__tcm_end - &__dtcm_start);
|
||||
#endif
|
||||
int ret;
|
||||
|
||||
tcm_pool = gen_pool_create(2, -1);
|
||||
|
||||
ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1);
|
||||
if (ret) {
|
||||
pr_err("%s: gen_pool add failed!\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n",
|
||||
__func__, pool_size, tcm_pool_start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init tcm_init(void)
|
||||
{
|
||||
tcm_mapping_init();
|
||||
|
||||
tcm_setup_pool();
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(tcm_init);
|
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <dt-bindings/clock/jz4740-cgu.h>
|
||||
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
|
@ -45,14 +46,6 @@
|
|||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
watchdog: watchdog@10002000 {
|
||||
compatible = "ingenic,jz4740-watchdog";
|
||||
reg = <0x10002000 0x10>;
|
||||
|
||||
clocks = <&cgu JZ4740_CLK_RTC>;
|
||||
clock-names = "rtc";
|
||||
};
|
||||
|
||||
tcu: timer@10002000 {
|
||||
compatible = "ingenic,jz4740-tcu", "simple-mfd";
|
||||
reg = <0x10002000 0x1000>;
|
||||
|
@ -73,6 +66,14 @@
|
|||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <23 22 21>;
|
||||
|
||||
watchdog: watchdog@0 {
|
||||
compatible = "ingenic,jz4740-watchdog";
|
||||
reg = <0x0 0xc>;
|
||||
|
||||
clocks = <&tcu TCU_CLK_WDT>;
|
||||
clock-names = "wdt";
|
||||
};
|
||||
};
|
||||
|
||||
rtc_dev: rtc@10003000 {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <dt-bindings/clock/jz4780-cgu.h>
|
||||
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||
#include <dt-bindings/dma/jz4780-dma.h>
|
||||
|
||||
/ {
|
||||
|
@ -67,6 +68,14 @@
|
|||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <27 26 25>;
|
||||
|
||||
watchdog: watchdog@0 {
|
||||
compatible = "ingenic,jz4780-watchdog";
|
||||
reg = <0x0 0xc>;
|
||||
|
||||
clocks = <&tcu TCU_CLK_WDT>;
|
||||
clock-names = "wdt";
|
||||
};
|
||||
};
|
||||
|
||||
rtc_dev: rtc@10003000 {
|
||||
|
@ -348,14 +357,6 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
watchdog: watchdog@10002000 {
|
||||
compatible = "ingenic,jz4780-watchdog";
|
||||
reg = <0x10002000 0x10>;
|
||||
|
||||
clocks = <&cgu JZ4780_CLK_RTCLK>;
|
||||
clock-names = "rtc";
|
||||
};
|
||||
|
||||
nemc: nemc@13410000 {
|
||||
compatible = "ingenic,jz4780-nemc";
|
||||
reg = <0x13410000 0x10000>;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||
#include <dt-bindings/clock/x1000-cgu.h>
|
||||
#include <dt-bindings/dma/x1000-dma.h>
|
||||
|
||||
|
@ -72,7 +73,7 @@
|
|||
compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog";
|
||||
reg = <0x0 0x10>;
|
||||
|
||||
clocks = <&cgu X1000_CLK_RTCLK>;
|
||||
clocks = <&tcu TCU_CLK_WDT>;
|
||||
clock-names = "wdt";
|
||||
};
|
||||
};
|
||||
|
@ -158,7 +159,6 @@
|
|||
i2c0: i2c-controller@10050000 {
|
||||
compatible = "ingenic,x1000-i2c";
|
||||
reg = <0x10050000 0x1000>;
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -173,7 +173,6 @@
|
|||
i2c1: i2c-controller@10051000 {
|
||||
compatible = "ingenic,x1000-i2c";
|
||||
reg = <0x10051000 0x1000>;
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -188,7 +187,6 @@
|
|||
i2c2: i2c-controller@10052000 {
|
||||
compatible = "ingenic,x1000-i2c";
|
||||
reg = <0x10052000 0x1000>;
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
|
|
@ -155,9 +155,11 @@
|
|||
* effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
|
||||
* optimized memory barrier primitives."). Here we specify that the affected
|
||||
* sync instructions should be emitted twice.
|
||||
* Note that this expression is evaluated by the assembler (not the compiler),
|
||||
* and that the assembler evaluates '==' as 0 or -1, not 0 or 1.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb))
|
||||
# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb))
|
||||
#else
|
||||
# define __SYNC_rpt(type) 1
|
||||
#endif
|
||||
|
|
|
@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
|
|||
{
|
||||
list_del(&v->list);
|
||||
if (v->load_addr)
|
||||
release_progmem(v);
|
||||
release_progmem(v->load_addr);
|
||||
kfree(v);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ endif
|
|||
cflags-vdso := $(ccflags-vdso) \
|
||||
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
||||
-O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
|
||||
-mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
|
||||
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
||||
$(call cc-option, -fno-asynchronous-unwind-tables) \
|
||||
$(call cc-option, -fno-stack-protector)
|
||||
|
@ -51,6 +52,8 @@ endif
|
|||
|
||||
CFLAGS_REMOVE_vgettimeofday.o = -pg
|
||||
|
||||
DISABLE_VDSO := n
|
||||
|
||||
#
|
||||
# For the pre-R6 code in arch/mips/vdso/vdso.h for locating
|
||||
# the base address of VDSO, the linker will emit a R_MIPS_PC32
|
||||
|
@ -64,11 +67,24 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg
|
|||
ifndef CONFIG_CPU_MIPSR6
|
||||
ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
|
||||
$(warning MIPS VDSO requires binutils >= 2.25)
|
||||
obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
|
||||
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
||||
DISABLE_VDSO := y
|
||||
endif
|
||||
endif
|
||||
|
||||
#
|
||||
# GCC (at least up to version 9.2) appears to emit function calls that make use
|
||||
# of the GOT when targeting microMIPS, which we can't use in the VDSO due to
|
||||
# the lack of relocations. As such, we disable the VDSO for microMIPS builds.
|
||||
#
|
||||
ifdef CONFIG_CPU_MICROMIPS
|
||||
DISABLE_VDSO := y
|
||||
endif
|
||||
|
||||
ifeq ($(DISABLE_VDSO),y)
|
||||
obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
|
||||
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
||||
endif
|
||||
|
||||
# VDSO linker flags.
|
||||
VDSO_LDFLAGS := \
|
||||
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
|
||||
|
@ -81,12 +97,18 @@ GCOV_PROFILE := n
|
|||
UBSAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# Check that we don't have PIC 'jalr t9' calls left
|
||||
quiet_cmd_vdso_mips_check = VDSOCHK $@
|
||||
cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \
|
||||
then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \
|
||||
rm -f $@; /bin/false); fi
|
||||
|
||||
#
|
||||
# Shared build commands.
|
||||
#
|
||||
|
||||
quiet_cmd_vdsold_and_vdso_check = LD $@
|
||||
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
|
||||
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check); $(cmd_vdso_mips_check)
|
||||
|
||||
quiet_cmd_vdsold = VDSO $@
|
||||
cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
|
||||
|
|
|
@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
|
|||
/*
|
||||
* Some number of bits at the level of the page table that points to
|
||||
* a hugepte are used to encode the size. This masks those bits.
|
||||
* On 8xx, HW assistance requires 4k alignment for the hugepte.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_8xx
|
||||
#define HUGEPD_SHIFT_MASK 0xfff
|
||||
#else
|
||||
#define HUGEPD_SHIFT_MASK 0x3f
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -168,6 +168,10 @@ struct thread_struct {
|
|||
unsigned long srr1;
|
||||
unsigned long dar;
|
||||
unsigned long dsisr;
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
|
||||
unsigned long lr, ctr;
|
||||
#endif
|
||||
#endif
|
||||
/* Debug Registers */
|
||||
struct debug_reg debug;
|
||||
|
|
|
@ -132,6 +132,18 @@ int main(void)
|
|||
OFFSET(SRR1, thread_struct, srr1);
|
||||
OFFSET(DAR, thread_struct, dar);
|
||||
OFFSET(DSISR, thread_struct, dsisr);
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
OFFSET(THR0, thread_struct, r0);
|
||||
OFFSET(THR3, thread_struct, r3);
|
||||
OFFSET(THR4, thread_struct, r4);
|
||||
OFFSET(THR5, thread_struct, r5);
|
||||
OFFSET(THR6, thread_struct, r6);
|
||||
OFFSET(THR8, thread_struct, r8);
|
||||
OFFSET(THR9, thread_struct, r9);
|
||||
OFFSET(THR11, thread_struct, r11);
|
||||
OFFSET(THLR, thread_struct, lr);
|
||||
OFFSET(THCTR, thread_struct, ctr);
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_SPE
|
||||
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
|
||||
|
|
|
@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void)
|
|||
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
||||
eeh_handle_normal_event(pe);
|
||||
} else {
|
||||
eeh_for_each_pe(pe, tmp_pe)
|
||||
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
||||
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
||||
|
||||
/* Notify all devices to be down */
|
||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
|
||||
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
||||
eeh_pe_report(
|
||||
"error_detected(permanent failure)", pe,
|
||||
eeh_report_failure, NULL);
|
||||
|
||||
pci_lock_rescan_remove();
|
||||
list_for_each_entry(hose, &hose_list, list_node) {
|
||||
phb_pe = eeh_phb_pe_get(hose);
|
||||
|
@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void)
|
|||
(phb_pe->state & EEH_PE_RECOVERING))
|
||||
continue;
|
||||
|
||||
eeh_for_each_pe(pe, tmp_pe)
|
||||
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
||||
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
||||
|
||||
/* Notify all devices to be down */
|
||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
|
||||
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
||||
eeh_pe_report(
|
||||
"error_detected(permanent failure)", pe,
|
||||
eeh_report_failure, NULL);
|
||||
bus = eeh_pe_bus_get(phb_pe);
|
||||
if (!bus) {
|
||||
pr_err("%s: Cannot find PCI bus for "
|
||||
|
|
|
@ -783,7 +783,7 @@ fast_exception_return:
|
|||
1: lis r3,exc_exit_restart_end@ha
|
||||
addi r3,r3,exc_exit_restart_end@l
|
||||
cmplw r12,r3
|
||||
#if CONFIG_PPC_BOOK3S_601
|
||||
#ifdef CONFIG_PPC_BOOK3S_601
|
||||
bge 2b
|
||||
#else
|
||||
bge 3f
|
||||
|
@ -791,7 +791,7 @@ fast_exception_return:
|
|||
lis r4,exc_exit_restart@ha
|
||||
addi r4,r4,exc_exit_restart@l
|
||||
cmplw r12,r4
|
||||
#if CONFIG_PPC_BOOK3S_601
|
||||
#ifdef CONFIG_PPC_BOOK3S_601
|
||||
blt 2b
|
||||
#else
|
||||
blt 3f
|
||||
|
@ -1354,12 +1354,17 @@ _GLOBAL(enter_rtas)
|
|||
mtspr SPRN_SRR0,r8
|
||||
mtspr SPRN_SRR1,r9
|
||||
RFI
|
||||
1: tophys(r9,r1)
|
||||
1: tophys_novmstack r9, r1
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
|
||||
mtmsr r0
|
||||
isync
|
||||
#endif
|
||||
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
|
||||
lwz r9,8(r9) /* original msr value */
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
li r0,0
|
||||
tophys(r7, r2)
|
||||
tophys_novmstack r7, r2
|
||||
stw r0, THREAD + RTAS_SP(r7)
|
||||
mtspr SPRN_SRR0,r8
|
||||
mtspr SPRN_SRR1,r9
|
||||
|
|
|
@ -290,17 +290,55 @@ MachineCheck:
|
|||
7: EXCEPTION_PROLOG_2
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_CHRP
|
||||
bne cr1,1f
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mfspr r4, SPRN_SPRG_THREAD
|
||||
tovirt(r4, r4)
|
||||
lwz r4, RTAS_SP(r4)
|
||||
cmpwi cr1, r4, 0
|
||||
#endif
|
||||
EXC_XFER_STD(0x200, machine_check_exception)
|
||||
#ifdef CONFIG_PPC_CHRP
|
||||
1: b machine_check_in_rtas
|
||||
beq cr1, machine_check_tramp
|
||||
b machine_check_in_rtas
|
||||
#else
|
||||
b machine_check_tramp
|
||||
#endif
|
||||
|
||||
/* Data access exception. */
|
||||
. = 0x300
|
||||
DO_KVM 0x300
|
||||
DataAccess:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
stw r11, THR11(r10)
|
||||
mfspr r10, SPRN_DSISR
|
||||
mfcr r11
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
|
||||
#else
|
||||
andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
|
||||
#endif
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
beq hash_page_dsi
|
||||
.Lhash_page_dsi_cont:
|
||||
mtcr r11
|
||||
lwz r11, THR11(r10)
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||
mfspr r11, SPRN_DAR
|
||||
stw r11, DAR(r10)
|
||||
mfspr r11, SPRN_DSISR
|
||||
stw r11, DSISR(r10)
|
||||
mfspr r11, SPRN_SRR0
|
||||
stw r11, SRR0(r10)
|
||||
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||
stw r11, SRR1(r10)
|
||||
mfcr r10
|
||||
andi. r11, r11, MSR_PR
|
||||
|
||||
EXCEPTION_PROLOG_1
|
||||
b handle_page_fault_tramp_1
|
||||
#else /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
get_and_save_dar_dsisr_on_stack r4, r5, r11
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
|
@ -316,11 +354,32 @@ BEGIN_MMU_FTR_SECTION
|
|||
FTR_SECTION_ELSE
|
||||
b handle_page_fault_tramp_2
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
|
||||
/* Instruction access exception. */
|
||||
. = 0x400
|
||||
DO_KVM 0x400
|
||||
InstructionAccess:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
mfspr r11, SPRN_SRR0
|
||||
stw r11, SRR0(r10)
|
||||
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||
stw r11, SRR1(r10)
|
||||
mfcr r10
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
|
||||
bne hash_page_isi
|
||||
.Lhash_page_isi_cont:
|
||||
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
andi. r11, r11, MSR_PR
|
||||
|
||||
EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2
|
||||
#else /* CONFIG_VMAP_STACK */
|
||||
EXCEPTION_PROLOG
|
||||
andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
|
||||
beq 1f /* if so, try to put a PTE */
|
||||
|
@ -329,6 +388,7 @@ InstructionAccess:
|
|||
BEGIN_MMU_FTR_SECTION
|
||||
bl hash_page
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
1: mr r4,r12
|
||||
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
|
||||
stw r4, _DAR(r11)
|
||||
|
@ -344,7 +404,7 @@ Alignment:
|
|||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||
save_dar_dsisr_on_stack r4, r5, r11
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0x600, alignment_exception)
|
||||
b alignment_exception_tramp
|
||||
|
||||
/* Program check exception */
|
||||
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
|
||||
|
@ -645,15 +705,100 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
|
|||
|
||||
. = 0x3000
|
||||
|
||||
machine_check_tramp:
|
||||
EXC_XFER_STD(0x200, machine_check_exception)
|
||||
|
||||
alignment_exception_tramp:
|
||||
EXC_XFER_STD(0x600, alignment_exception)
|
||||
|
||||
handle_page_fault_tramp_1:
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
EXCEPTION_PROLOG_2 handle_dar_dsisr=1
|
||||
#endif
|
||||
lwz r4, _DAR(r11)
|
||||
lwz r5, _DSISR(r11)
|
||||
/* fall through */
|
||||
handle_page_fault_tramp_2:
|
||||
EXC_XFER_LITE(0x300, handle_page_fault)
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
.macro save_regs_thread thread
|
||||
stw r0, THR0(\thread)
|
||||
stw r3, THR3(\thread)
|
||||
stw r4, THR4(\thread)
|
||||
stw r5, THR5(\thread)
|
||||
stw r6, THR6(\thread)
|
||||
stw r8, THR8(\thread)
|
||||
stw r9, THR9(\thread)
|
||||
mflr r0
|
||||
stw r0, THLR(\thread)
|
||||
mfctr r0
|
||||
stw r0, THCTR(\thread)
|
||||
.endm
|
||||
|
||||
.macro restore_regs_thread thread
|
||||
lwz r0, THLR(\thread)
|
||||
mtlr r0
|
||||
lwz r0, THCTR(\thread)
|
||||
mtctr r0
|
||||
lwz r0, THR0(\thread)
|
||||
lwz r3, THR3(\thread)
|
||||
lwz r4, THR4(\thread)
|
||||
lwz r5, THR5(\thread)
|
||||
lwz r6, THR6(\thread)
|
||||
lwz r8, THR8(\thread)
|
||||
lwz r9, THR9(\thread)
|
||||
.endm
|
||||
|
||||
hash_page_dsi:
|
||||
save_regs_thread r10
|
||||
mfdsisr r3
|
||||
mfdar r4
|
||||
mfsrr0 r5
|
||||
mfsrr1 r9
|
||||
rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
|
||||
bl hash_page
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
restore_regs_thread r10
|
||||
b .Lhash_page_dsi_cont
|
||||
|
||||
hash_page_isi:
|
||||
mr r11, r10
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
save_regs_thread r10
|
||||
li r3, 0
|
||||
lwz r4, SRR0(r10)
|
||||
lwz r9, SRR1(r10)
|
||||
bl hash_page
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
restore_regs_thread r10
|
||||
mr r10, r11
|
||||
b .Lhash_page_isi_cont
|
||||
|
||||
.globl fast_hash_page_return
|
||||
fast_hash_page_return:
|
||||
andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
|
||||
mfspr r10, SPRN_SPRG_THREAD
|
||||
restore_regs_thread r10
|
||||
bne 1f
|
||||
|
||||
/* DSI */
|
||||
mtcr r11
|
||||
lwz r11, THR11(r10)
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
SYNC
|
||||
RFI
|
||||
|
||||
1: /* ISI */
|
||||
mtcr r11
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
SYNC
|
||||
RFI
|
||||
|
||||
stack_overflow:
|
||||
vmap_stack_overflow_exception
|
||||
#endif
|
||||
|
||||
AltiVecUnavailable:
|
||||
EXCEPTION_PROLOG
|
||||
|
|
|
@ -64,11 +64,25 @@
|
|||
.endm
|
||||
|
||||
.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
|
||||
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
mtcr r10
|
||||
FTR_SECTION_ELSE
|
||||
stw r10, _CCR(r11)
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#else
|
||||
stw r10,_CCR(r11) /* save registers */
|
||||
#endif
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
stw r12,GPR12(r11)
|
||||
stw r9,GPR9(r11)
|
||||
mfspr r10,SPRN_SPRG_SCRATCH0
|
||||
stw r10,GPR10(r11)
|
||||
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
mfcr r10
|
||||
stw r10, _CCR(r11)
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif
|
||||
mfspr r12,SPRN_SPRG_SCRATCH1
|
||||
stw r12,GPR11(r11)
|
||||
mflr r10
|
||||
|
@ -83,6 +97,11 @@
|
|||
stw r10, _DSISR(r11)
|
||||
.endif
|
||||
lwz r9, SRR1(r12)
|
||||
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
andi. r10, r9, MSR_PR
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#endif
|
||||
lwz r12, SRR0(r12)
|
||||
#else
|
||||
mfspr r12,SPRN_SRR0
|
||||
|
|
|
@ -256,7 +256,7 @@ InstructionTLBMiss:
|
|||
* set. All other Linux PTE bits control the behavior
|
||||
* of the MMU.
|
||||
*/
|
||||
rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
|
||||
rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
|
||||
rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
|
||||
ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
|
||||
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
|
||||
|
|
|
@ -166,7 +166,11 @@ BEGIN_FTR_SECTION
|
|||
mfspr r9,SPRN_HID0
|
||||
andis. r9,r9,HID0_NAP@h
|
||||
beq 1f
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
addis r9, r11, nap_save_msscr0@ha
|
||||
#else
|
||||
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
|
||||
#endif
|
||||
lwz r9,nap_save_msscr0@l(r9)
|
||||
mtspr SPRN_MSSCR0, r9
|
||||
sync
|
||||
|
@ -174,7 +178,11 @@ BEGIN_FTR_SECTION
|
|||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||
BEGIN_FTR_SECTION
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
addis r9, r11, nap_save_hid1@ha
|
||||
#else
|
||||
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
|
||||
#endif
|
||||
lwz r9,nap_save_hid1@l(r9)
|
||||
mtspr SPRN_HID1, r9
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
||||
|
|
|
@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
|
|||
* normal/non-checkpointed stack pointer.
|
||||
*/
|
||||
|
||||
unsigned long ret = tsk->thread.regs->gpr[1];
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
|
||||
preempt_disable();
|
||||
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
||||
if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
|
||||
return tsk->thread.ckpt_regs.gpr[1];
|
||||
ret = tsk->thread.ckpt_regs.gpr[1];
|
||||
|
||||
/*
|
||||
* If we treclaim, we must clear the current thread's TM bits
|
||||
* before re-enabling preemption. Otherwise we might be
|
||||
* preempted and have the live MSR[TS] changed behind our back
|
||||
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
|
||||
* enter the signal handler in non-transactional state.
|
||||
*/
|
||||
tsk->thread.regs->msr &= ~MSR_TS_MASK;
|
||||
preempt_enable();
|
||||
}
|
||||
#endif
|
||||
return tsk->thread.regs->gpr[1];
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
|||
*/
|
||||
static int save_tm_user_regs(struct pt_regs *regs,
|
||||
struct mcontext __user *frame,
|
||||
struct mcontext __user *tm_frame, int sigret)
|
||||
struct mcontext __user *tm_frame, int sigret,
|
||||
unsigned long msr)
|
||||
{
|
||||
unsigned long msr = regs->msr;
|
||||
|
||||
WARN_ON(tm_suspend_disabled);
|
||||
|
||||
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
|
||||
* just indicates to userland that we were doing a transaction, but we
|
||||
* don't want to return in transactional state. This also ensures
|
||||
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
|
||||
*/
|
||||
regs->msr &= ~MSR_TS_MASK;
|
||||
|
||||
/* Save both sets of general registers */
|
||||
if (save_general_regs(¤t->thread.ckpt_regs, frame)
|
||||
|| save_general_regs(regs, tm_frame))
|
||||
|
@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
int sigret;
|
||||
unsigned long tramp;
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||
unsigned long msr = regs->msr;
|
||||
#endif
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
|
@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tm_frame = &rt_sf->uc_transact.uc_mcontext;
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
if (__put_user((unsigned long)&rt_sf->uc_transact,
|
||||
&rt_sf->uc.uc_link) ||
|
||||
__put_user((unsigned long)tm_frame,
|
||||
&rt_sf->uc_transact.uc_regs))
|
||||
goto badframe;
|
||||
if (save_tm_user_regs(regs, frame, tm_frame, sigret))
|
||||
if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
|
||||
goto badframe;
|
||||
}
|
||||
else
|
||||
|
@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
int sigret;
|
||||
unsigned long tramp;
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||
unsigned long msr = regs->msr;
|
||||
#endif
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
|
@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tm_mctx = &frame->mctx_transact;
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
|
||||
sigret))
|
||||
sigret, msr))
|
||||
goto badframe;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
|
|||
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
||||
struct sigcontext __user *tm_sc,
|
||||
struct task_struct *tsk,
|
||||
int signr, sigset_t *set, unsigned long handler)
|
||||
int signr, sigset_t *set, unsigned long handler,
|
||||
unsigned long msr)
|
||||
{
|
||||
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
|
||||
* process never used altivec yet (MSR_VEC is zero in pt_regs of
|
||||
|
@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
|||
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
|
||||
#endif
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
unsigned long msr = tsk->thread.regs->msr;
|
||||
long err = 0;
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
|
||||
BUG_ON(!MSR_TM_ACTIVE(msr));
|
||||
|
||||
WARN_ON(tm_suspend_disabled);
|
||||
|
||||
|
@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
|||
*/
|
||||
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
|
||||
|
||||
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
|
||||
* just indicates to userland that we were doing a transaction, but we
|
||||
* don't want to return in transactional state. This also ensures
|
||||
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
|
||||
*/
|
||||
regs->msr &= ~MSR_TS_MASK;
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
err |= __put_user(v_regs, &sc->v_regs);
|
||||
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
|
||||
|
@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
|||
unsigned long newsp = 0;
|
||||
long err = 0;
|
||||
struct pt_regs *regs = tsk->thread.regs;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||
unsigned long msr = regs->msr;
|
||||
#endif
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
|
@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
|||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
/* The ucontext_t passed to userland points to the second
|
||||
* ucontext_t (for transactional state) with its uc_link ptr.
|
||||
*/
|
||||
|
@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
|||
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
|
||||
&frame->uc_transact.uc_mcontext,
|
||||
tsk, ksig->sig, NULL,
|
||||
(unsigned long)ksig->ka.sa.sa_handler);
|
||||
(unsigned long)ksig->ka.sa.sa_handler,
|
||||
msr);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -25,12 +25,6 @@
|
|||
#include <asm/feature-fixups.h>
|
||||
#include <asm/code-patching-asm.h>
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
#define ADDR_OFFSET 0
|
||||
#else
|
||||
#define ADDR_OFFSET PAGE_OFFSET
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.section .bss
|
||||
.align 2
|
||||
|
@ -53,8 +47,8 @@ mmu_hash_lock:
|
|||
.text
|
||||
_GLOBAL(hash_page)
|
||||
#ifdef CONFIG_SMP
|
||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@h
|
||||
ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l
|
||||
lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
|
||||
ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
|
||||
lis r0,0x0fff
|
||||
b 10f
|
||||
11: lwz r6,0(r8)
|
||||
|
@ -72,12 +66,9 @@ _GLOBAL(hash_page)
|
|||
cmplw 0,r4,r0
|
||||
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
|
||||
mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
tovirt(r5, r5)
|
||||
#endif
|
||||
blt+ 112f /* assume user more likely */
|
||||
lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */
|
||||
addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */
|
||||
lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
|
||||
112:
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
|
@ -89,9 +80,6 @@ _GLOBAL(hash_page)
|
|||
lwzx r8,r8,r5 /* Get L1 entry */
|
||||
rlwinm. r8,r8,0,0,20 /* extract pt base address */
|
||||
#endif
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
tovirt(r8, r8)
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
beq- hash_page_out /* return if no mapping */
|
||||
#else
|
||||
|
@ -143,30 +131,36 @@ retry:
|
|||
bne- retry /* retry if someone got there first */
|
||||
|
||||
mfsrin r3,r4 /* get segment reg for segment */
|
||||
#ifndef CONFIG_VMAP_STACK
|
||||
mfctr r0
|
||||
stw r0,_CTR(r11)
|
||||
#endif
|
||||
bl create_hpte /* add the hash table entry */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
eieio
|
||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
|
||||
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
|
||||
li r0,0
|
||||
stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
|
||||
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
b fast_hash_page_return
|
||||
#else
|
||||
/* Return from the exception */
|
||||
lwz r5,_CTR(r11)
|
||||
mtctr r5
|
||||
lwz r0,GPR0(r11)
|
||||
lwz r8,GPR8(r11)
|
||||
b fast_exception_return
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
hash_page_out:
|
||||
eieio
|
||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
|
||||
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
|
||||
li r0,0
|
||||
stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
|
||||
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
|
||||
blr
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
|||
patch_site 1f, patch__hash_page_A1
|
||||
patch_site 2f, patch__hash_page_A2
|
||||
/* Get the address of the primary PTE group in the hash table (r3) */
|
||||
0: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */
|
||||
0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
|
||||
1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
|
||||
2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
|
||||
xor r3,r3,r0 /* make primary hash */
|
||||
|
@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
|||
beq+ 10f /* no PTE: go look for an empty slot */
|
||||
tlbie r4
|
||||
|
||||
lis r4, (htab_hash_searches - ADDR_OFFSET)@ha
|
||||
lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
|
||||
lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
|
||||
lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
|
||||
addi r6,r6,1 /* count how many searches we do */
|
||||
stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
|
||||
stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
|
||||
|
||||
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
|
||||
mtctr r0
|
||||
|
@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
|||
beq+ found_empty
|
||||
|
||||
/* update counter of times that the primary PTEG is full */
|
||||
lis r4, (primary_pteg_full - ADDR_OFFSET)@ha
|
||||
lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
|
||||
lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
|
||||
lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
|
||||
addi r6,r6,1
|
||||
stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
|
||||
stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
|
||||
|
||||
patch_site 0f, patch__hash_page_C
|
||||
/* Search the secondary PTEG for an empty slot */
|
||||
|
@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
|||
* lockup here but that shouldn't happen
|
||||
*/
|
||||
|
||||
1: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */
|
||||
lwz r6, (next_slot - ADDR_OFFSET)@l(r4)
|
||||
1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
|
||||
lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
|
||||
addi r6,r6,HPTE_SIZE /* search for candidate */
|
||||
andi. r6,r6,7*HPTE_SIZE
|
||||
stw r6,next_slot@l(r4)
|
||||
|
|
|
@ -413,7 +413,7 @@ void __init MMU_init_hw(void)
|
|||
void __init MMU_init_hw_patch(void)
|
||||
{
|
||||
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
|
||||
unsigned int hash;
|
||||
unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
|
||||
|
||||
if (ppc_md.progress)
|
||||
ppc_md.progress("hash:patch", 0x345);
|
||||
|
@ -425,11 +425,6 @@ void __init MMU_init_hw_patch(void)
|
|||
/*
|
||||
* Patch up the instructions in hashtable.S:create_hpte
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
hash = (unsigned int)Hash;
|
||||
else
|
||||
hash = (unsigned int)Hash - PAGE_OFFSET;
|
||||
|
||||
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
||||
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
|
||||
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
|
||||
|
@ -439,8 +434,7 @@ void __init MMU_init_hw_patch(void)
|
|||
/*
|
||||
* Patch up the instructions in hashtable.S:flush_hash_page
|
||||
*/
|
||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff,
|
||||
((unsigned int)Hash - PAGE_OFFSET) >> 16);
|
||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
|
||||
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
|
||||
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
|
||||
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
|
||||
|
|
|
@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
|||
if (pshift >= pdshift) {
|
||||
cachep = PGT_CACHE(PTE_T_ORDER);
|
||||
num_hugepd = 1 << (pshift - pdshift);
|
||||
new = NULL;
|
||||
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
|
||||
cachep = PGT_CACHE(PTE_INDEX_SIZE);
|
||||
cachep = NULL;
|
||||
num_hugepd = 1;
|
||||
new = pte_alloc_one(mm);
|
||||
} else {
|
||||
cachep = PGT_CACHE(pdshift - pshift);
|
||||
num_hugepd = 1;
|
||||
new = NULL;
|
||||
}
|
||||
|
||||
if (!cachep) {
|
||||
if (!cachep && !new) {
|
||||
WARN_ONCE(1, "No page table cache created for hugetlb tables");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
if (cachep)
|
||||
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
|
||||
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
|
||||
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
|
||||
|
@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
|||
if (i < num_hugepd) {
|
||||
for (i = i - 1 ; i >= 0; i--, hpdp--)
|
||||
*hpdp = __hugepd(0);
|
||||
kmem_cache_free(cachep, new);
|
||||
if (cachep)
|
||||
kmem_cache_free(cachep, new);
|
||||
else
|
||||
pte_free(mm, new);
|
||||
} else {
|
||||
kmemleak_ignore(new);
|
||||
}
|
||||
|
@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
|
|||
if (shift >= pdshift)
|
||||
hugepd_free(tlb, hugepte);
|
||||
else if (IS_ENABLED(CONFIG_PPC_8xx))
|
||||
pgtable_free_tlb(tlb, hugepte,
|
||||
get_hugepd_cache_index(PTE_INDEX_SIZE));
|
||||
pgtable_free_tlb(tlb, hugepte, 0);
|
||||
else
|
||||
pgtable_free_tlb(tlb, hugepte,
|
||||
get_hugepd_cache_index(pdshift - shift));
|
||||
|
@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
|
|||
* if we have pdshift and shift value same, we don't
|
||||
* use pgt cache for hugepd.
|
||||
*/
|
||||
if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
|
||||
pgtable_cache_add(PTE_INDEX_SIZE);
|
||||
else if (pdshift > shift)
|
||||
pgtable_cache_add(pdshift - shift);
|
||||
else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
|
||||
if (pdshift > shift) {
|
||||
if (!IS_ENABLED(CONFIG_PPC_8xx))
|
||||
pgtable_cache_add(pdshift - shift);
|
||||
} else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
|
||||
IS_ENABLED(CONFIG_PPC_8xx)) {
|
||||
pgtable_cache_add(PTE_T_ORDER);
|
||||
}
|
||||
|
||||
configured = true;
|
||||
}
|
||||
|
|
|
@ -185,8 +185,7 @@ u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
|
|||
|
||||
static void __init kasan_early_hash_table(void)
|
||||
{
|
||||
unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
|
||||
__pa(early_hash);
|
||||
unsigned int hash = __pa(early_hash);
|
||||
|
||||
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
|
||||
|
|
|
@ -3435,6 +3435,11 @@ getstring(char *s, int size)
|
|||
int c;
|
||||
|
||||
c = skipbl();
|
||||
if (c == '\n') {
|
||||
*s = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
if( size > 1 ){
|
||||
*s++ = c;
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
Image
|
||||
Image.gz
|
||||
loader
|
||||
loader.lds
|
||||
|
|
|
@ -72,6 +72,16 @@
|
|||
#define EXC_LOAD_PAGE_FAULT 13
|
||||
#define EXC_STORE_PAGE_FAULT 15
|
||||
|
||||
/* PMP configuration */
|
||||
#define PMP_R 0x01
|
||||
#define PMP_W 0x02
|
||||
#define PMP_X 0x04
|
||||
#define PMP_A 0x18
|
||||
#define PMP_A_TOR 0x08
|
||||
#define PMP_A_NA4 0x10
|
||||
#define PMP_A_NAPOT 0x18
|
||||
#define PMP_L 0x80
|
||||
|
||||
/* symbolic CSR names: */
|
||||
#define CSR_CYCLE 0xc00
|
||||
#define CSR_TIME 0xc01
|
||||
|
@ -100,6 +110,8 @@
|
|||
#define CSR_MCAUSE 0x342
|
||||
#define CSR_MTVAL 0x343
|
||||
#define CSR_MIP 0x344
|
||||
#define CSR_PMPCFG0 0x3a0
|
||||
#define CSR_PMPADDR0 0x3b0
|
||||
#define CSR_MHARTID 0xf14
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
|
|
|
@ -58,6 +58,12 @@ _start_kernel:
|
|||
/* Reset all registers except ra, a0, a1 */
|
||||
call reset_regs
|
||||
|
||||
/* Setup a PMP to permit access to all of memory. */
|
||||
li a0, -1
|
||||
csrw CSR_PMPADDR0, a0
|
||||
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
|
||||
csrw CSR_PMPCFG0, a0
|
||||
|
||||
/*
|
||||
* The hartid in a0 is expected later on, and we have no firmware
|
||||
* to hand it to us.
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче