Merge branches 'cpuidle', 'fixes' and 'misc' into for-linus
This commit is contained in:
Коммит
5f5a00eaa1
|
@ -128,16 +128,44 @@ X!Edrivers/base/interface.c
|
|||
!Edrivers/base/platform.c
|
||||
!Edrivers/base/bus.c
|
||||
</sect1>
|
||||
<sect1><title>Device Drivers DMA Management</title>
|
||||
<sect1>
|
||||
<title>Buffer Sharing and Synchronization</title>
|
||||
<para>
|
||||
The dma-buf subsystem provides the framework for sharing buffers
|
||||
for hardware (DMA) access across multiple device drivers and
|
||||
subsystems, and for synchronizing asynchronous hardware access.
|
||||
</para>
|
||||
<para>
|
||||
This is used, for example, by drm "prime" multi-GPU support, but
|
||||
is of course not limited to GPU use cases.
|
||||
</para>
|
||||
<para>
|
||||
The three main components of this are: (1) dma-buf, representing
|
||||
a sg_table and exposed to userspace as a file descriptor to allow
|
||||
passing between devices, (2) fence, which provides a mechanism
|
||||
to signal when one device as finished access, and (3) reservation,
|
||||
which manages the shared or exclusive fence(s) associated with
|
||||
the buffer.
|
||||
</para>
|
||||
<sect2><title>dma-buf</title>
|
||||
!Edrivers/dma-buf/dma-buf.c
|
||||
!Edrivers/dma-buf/fence.c
|
||||
!Edrivers/dma-buf/seqno-fence.c
|
||||
!Iinclude/linux/fence.h
|
||||
!Iinclude/linux/seqno-fence.h
|
||||
!Iinclude/linux/dma-buf.h
|
||||
</sect2>
|
||||
<sect2><title>reservation</title>
|
||||
!Pdrivers/dma-buf/reservation.c Reservation Object Overview
|
||||
!Edrivers/dma-buf/reservation.c
|
||||
!Iinclude/linux/reservation.h
|
||||
</sect2>
|
||||
<sect2><title>fence</title>
|
||||
!Edrivers/dma-buf/fence.c
|
||||
!Iinclude/linux/fence.h
|
||||
!Edrivers/dma-buf/seqno-fence.c
|
||||
!Iinclude/linux/seqno-fence.h
|
||||
!Edrivers/dma-buf/sync_file.c
|
||||
!Iinclude/linux/sync_file.h
|
||||
</sect2>
|
||||
</sect1>
|
||||
<sect1><title>Device Drivers DMA Management</title>
|
||||
!Edrivers/base/dma-coherent.c
|
||||
!Edrivers/base/dma-mapping.c
|
||||
</sect1>
|
||||
|
|
|
@ -56,6 +56,7 @@ stable kernels.
|
|||
| ARM | MMU-500 | #841119,#826419 | N/A |
|
||||
| | | | |
|
||||
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
|
||||
| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
|
||||
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
|
||||
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
|
||||
| Cavium | ThunderX SMMUv2 | #27704 | N/A |
|
||||
|
|
|
@ -62,6 +62,7 @@ Required properties:
|
|||
display-timings are used instead.
|
||||
|
||||
Optional properties (required if display-timings are used):
|
||||
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
|
||||
- display-timings : A node that describes the display timings as defined in
|
||||
Documentation/devicetree/bindings/display/display-timing.txt.
|
||||
- fsl,data-mapping : should be "spwg" or "jeida"
|
||||
|
|
|
@ -1,141 +1,26 @@
|
|||
Each mount of the devpts filesystem is now distinct such that ptys
|
||||
and their indicies allocated in one mount are independent from ptys
|
||||
and their indicies in all other mounts.
|
||||
|
||||
To support containers, we now allow multiple instances of devpts filesystem,
|
||||
such that indices of ptys allocated in one instance are independent of indices
|
||||
allocated in other instances of devpts.
|
||||
All mounts of the devpts filesystem now create a /dev/pts/ptmx node
|
||||
with permissions 0000.
|
||||
|
||||
To preserve backward compatibility, this support for multiple instances is
|
||||
enabled only if:
|
||||
To retain backwards compatibility the a ptmx device node (aka any node
|
||||
created with "mknod name c 5 2") when opened will look for an instance
|
||||
of devpts under the name "pts" in the same directory as the ptmx device
|
||||
node.
|
||||
|
||||
- CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and
|
||||
- '-o newinstance' mount option is specified while mounting devpts
|
||||
|
||||
IOW, devpts now supports both single-instance and multi-instance semantics.
|
||||
|
||||
If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
|
||||
this referred to as the "legacy" mode. In this mode, the new mount options
|
||||
(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
|
||||
on console.
|
||||
|
||||
If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
|
||||
'newinstance' option (as in current start-up scripts) the new mount binds
|
||||
to the initial kernel mount of devpts. This mode is referred to as the
|
||||
'single-instance' mode and the current, single-instance semantics are
|
||||
preserved, i.e PTYs are common across the system.
|
||||
|
||||
The only difference between this single-instance mode and the legacy mode
|
||||
is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
|
||||
can safely be ignored.
|
||||
|
||||
If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
|
||||
the mount is considered to be in the multi-instance mode and a new instance
|
||||
of the devpts fs is created. Any ptys created in this instance are independent
|
||||
of ptys in other instances of devpts. Like in the single-instance mode, the
|
||||
/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
|
||||
open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
|
||||
bind-mount.
|
||||
|
||||
Eg: A container startup script could do the following:
|
||||
|
||||
$ chmod 0666 /dev/pts/ptmx
|
||||
$ rm /dev/ptmx
|
||||
$ ln -s pts/ptmx /dev/ptmx
|
||||
$ ns_exec -cm /bin/bash
|
||||
|
||||
# We are now in new container
|
||||
|
||||
$ umount /dev/pts
|
||||
$ mount -t devpts -o newinstance lxcpts /dev/pts
|
||||
$ sshd -p 1234
|
||||
|
||||
where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
|
||||
/bin/bash in the child process. A pty created by the sshd is not visible in
|
||||
the original mount of /dev/pts.
|
||||
As an option instead of placing a /dev/ptmx device node at /dev/ptmx
|
||||
it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
|
||||
to bind mount /dev/ptx/ptmx to /dev/ptmx. If you opt for using
|
||||
the devpts filesystem in this manner devpts should be mounted with
|
||||
the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
|
||||
|
||||
Total count of pty pairs in all instances is limited by sysctls:
|
||||
kernel.pty.max = 4096 - global limit
|
||||
kernel.pty.reserve = 1024 - reserve for initial instance
|
||||
kernel.pty.reserve = 1024 - reserved for filesystems mounted from the initial mount namespace
|
||||
kernel.pty.nr - current count of ptys
|
||||
|
||||
Per-instance limit could be set by adding mount option "max=<count>".
|
||||
This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
|
||||
In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
|
||||
|
||||
User-space changes
|
||||
------------------
|
||||
|
||||
In multi-instance mode (i.e '-o newinstance' mount option is specified at least
|
||||
once), following user-space issues should be noted.
|
||||
|
||||
1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
|
||||
and no change is needed to system-startup scripts.
|
||||
|
||||
2. To effectively use multi-instance mode (i.e -o newinstance is specified)
|
||||
administrators or startup scripts should "redirect" open of /dev/ptmx to
|
||||
/dev/pts/ptmx using either a bind mount or symlink.
|
||||
|
||||
$ mount -t devpts -o newinstance devpts /dev/pts
|
||||
|
||||
followed by either
|
||||
|
||||
$ rm /dev/ptmx
|
||||
$ ln -s pts/ptmx /dev/ptmx
|
||||
$ chmod 666 /dev/pts/ptmx
|
||||
or
|
||||
$ mount -o bind /dev/pts/ptmx /dev/ptmx
|
||||
|
||||
3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
|
||||
enables better error-reporting and treats both single-instance and
|
||||
multi-instance mounts similarly.
|
||||
|
||||
But this method requires that system-startup scripts set the mode of
|
||||
/dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
|
||||
mode by, either
|
||||
|
||||
- adding ptmxmode mount option to devpts entry in /etc/fstab, or
|
||||
- using 'chmod 0666 /dev/pts/ptmx'
|
||||
|
||||
4. If multi-instance mode mount is needed for containers, but the system
|
||||
startup scripts have not yet been updated, container-startup scripts
|
||||
should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
|
||||
instance mounts.
|
||||
|
||||
Or, in general, container-startup scripts should use:
|
||||
|
||||
mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
|
||||
if [ ! -L /dev/ptmx ]; then
|
||||
mount -o bind /dev/pts/ptmx /dev/ptmx
|
||||
fi
|
||||
|
||||
When all devpts mounts are multi-instance, /dev/ptmx can permanently be
|
||||
a symlink to pts/ptmx and the bind mount can be ignored.
|
||||
|
||||
5. A multi-instance mount that is not accompanied by the /dev/ptmx to
|
||||
/dev/pts/ptmx redirection would result in an unusable/unreachable pty.
|
||||
|
||||
mount -t devpts -o newinstance lxcpts /dev/pts
|
||||
|
||||
immediately followed by:
|
||||
|
||||
open("/dev/ptmx")
|
||||
|
||||
would create a pty, say /dev/pts/7, in the initial kernel mount.
|
||||
But /dev/pts/7 would be invisible in the new mount.
|
||||
|
||||
6. The permissions for /dev/pts/ptmx node should be specified when mounting
|
||||
/dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
|
||||
|
||||
mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
|
||||
|
||||
The permissions can be later be changed as usual with 'chmod'.
|
||||
|
||||
chmod 666 /dev/pts/ptmx
|
||||
|
||||
7. A mount of devpts without the 'newinstance' option results in binding to
|
||||
initial kernel mount. This behavior while preserving legacy semantics,
|
||||
does not provide strict isolation in a container environment. i.e by
|
||||
mounting devpts without the 'newinstance' option, a container could
|
||||
get visibility into the 'host' or root container's devpts.
|
||||
|
||||
To workaround this and have strict isolation, all mounts of devpts,
|
||||
including the mount in the root container, should use the newinstance
|
||||
option.
|
||||
|
|
|
@ -170,21 +170,92 @@ document trapinfo
|
|||
address the kernel panicked.
|
||||
end
|
||||
|
||||
define dump_log_idx
|
||||
set $idx = $arg0
|
||||
if ($argc > 1)
|
||||
set $prev_flags = $arg1
|
||||
else
|
||||
set $prev_flags = 0
|
||||
end
|
||||
set $msg = ((struct printk_log *) (log_buf + $idx))
|
||||
set $prefix = 1
|
||||
set $newline = 1
|
||||
set $log = log_buf + $idx + sizeof(*$msg)
|
||||
|
||||
# prev & LOG_CONT && !(msg->flags & LOG_PREIX)
|
||||
if (($prev_flags & 8) && !($msg->flags & 4))
|
||||
set $prefix = 0
|
||||
end
|
||||
|
||||
# msg->flags & LOG_CONT
|
||||
if ($msg->flags & 8)
|
||||
# (prev & LOG_CONT && !(prev & LOG_NEWLINE))
|
||||
if (($prev_flags & 8) && !($prev_flags & 2))
|
||||
set $prefix = 0
|
||||
end
|
||||
# (!(msg->flags & LOG_NEWLINE))
|
||||
if (!($msg->flags & 2))
|
||||
set $newline = 0
|
||||
end
|
||||
end
|
||||
|
||||
if ($prefix)
|
||||
printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
|
||||
end
|
||||
if ($msg->text_len != 0)
|
||||
eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
|
||||
end
|
||||
if ($newline)
|
||||
printf "\n"
|
||||
end
|
||||
if ($msg->dict_len > 0)
|
||||
set $dict = $log + $msg->text_len
|
||||
set $idx = 0
|
||||
set $line = 1
|
||||
while ($idx < $msg->dict_len)
|
||||
if ($line)
|
||||
printf " "
|
||||
set $line = 0
|
||||
end
|
||||
set $c = $dict[$idx]
|
||||
if ($c == '\0')
|
||||
printf "\n"
|
||||
set $line = 1
|
||||
else
|
||||
if ($c < ' ' || $c >= 127 || $c == '\\')
|
||||
printf "\\x%02x", $c
|
||||
else
|
||||
printf "%c", $c
|
||||
end
|
||||
end
|
||||
set $idx = $idx + 1
|
||||
end
|
||||
printf "\n"
|
||||
end
|
||||
end
|
||||
document dump_log_idx
|
||||
Dump a single log given its index in the log buffer. The first
|
||||
parameter is the index into log_buf, the second is optional and
|
||||
specified the previous log buffer's flags, used for properly
|
||||
formatting continued lines.
|
||||
end
|
||||
|
||||
define dmesg
|
||||
set $i = 0
|
||||
set $end_idx = (log_end - 1) & (log_buf_len - 1)
|
||||
set $i = log_first_idx
|
||||
set $end_idx = log_first_idx
|
||||
set $prev_flags = 0
|
||||
|
||||
while ($i < logged_chars)
|
||||
set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
|
||||
|
||||
if ($idx + 100 <= $end_idx) || \
|
||||
($end_idx <= $idx && $idx + 100 < log_buf_len)
|
||||
printf "%.100s", &log_buf[$idx]
|
||||
set $i = $i + 100
|
||||
while (1)
|
||||
set $msg = ((struct printk_log *) (log_buf + $i))
|
||||
if ($msg->len == 0)
|
||||
set $i = 0
|
||||
else
|
||||
printf "%c", log_buf[$idx]
|
||||
set $i = $i + 1
|
||||
dump_log_idx $i $prev_flags
|
||||
set $i = $i + $msg->len
|
||||
set $prev_flags = $msg->flags
|
||||
end
|
||||
if ($i == $end_idx)
|
||||
loop_break
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -369,8 +369,6 @@ does not allocate any driver private context space.
|
|||
Switch configuration
|
||||
--------------------
|
||||
|
||||
- priv_size: additional size needed by the switch driver for its private context
|
||||
|
||||
- tag_protocol: this is to indicate what kind of tagging protocol is supported,
|
||||
should be a valid value from the dsa_tag_protocol enum
|
||||
|
||||
|
@ -416,11 +414,6 @@ PHY devices and link management
|
|||
to the switch port MDIO registers. If unavailable return a negative error
|
||||
code.
|
||||
|
||||
- poll_link: Function invoked by DSA to query the link state of the switch
|
||||
builtin Ethernet PHYs, per port. This function is responsible for calling
|
||||
netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
|
||||
single call. Executes from workqueue context.
|
||||
|
||||
- adjust_link: Function invoked by the PHY library when a slave network device
|
||||
is attached to a PHY device. This function is responsible for appropriately
|
||||
configuring the switch port link parameters: speed, duplex, pause based on
|
||||
|
@ -542,6 +535,16 @@ Bridge layer
|
|||
Bridge VLAN filtering
|
||||
---------------------
|
||||
|
||||
- port_vlan_filtering: bridge layer function invoked when the bridge gets
|
||||
configured for turning on or off VLAN filtering. If nothing specific needs to
|
||||
be done at the hardware level, this callback does not need to be implemented.
|
||||
When VLAN filtering is turned on, the hardware must be programmed with
|
||||
rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed
|
||||
VLAN ID map/rules. If there is no PVID programmed into the switch port,
|
||||
untagged frames must be rejected as well. When turned off the switch must
|
||||
accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
|
||||
allowed.
|
||||
|
||||
- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
|
||||
configuration of a VLAN on the given port. If the operation is not supported
|
||||
by the hardware, this function should return -EOPNOTSUPP to inform the bridge
|
||||
|
|
|
@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN
|
|||
|
||||
shared_media - BOOLEAN
|
||||
Send(router) or accept(host) RFC1620 shared media redirects.
|
||||
Overrides ip_secure_redirects.
|
||||
Overrides secure_redirects.
|
||||
shared_media for the interface will be enabled if at least one of
|
||||
conf/{all,interface}/shared_media is set to TRUE,
|
||||
it will be disabled otherwise
|
||||
default TRUE
|
||||
|
||||
secure_redirects - BOOLEAN
|
||||
Accept ICMP redirect messages only for gateways,
|
||||
listed in default gateway list.
|
||||
Accept ICMP redirect messages only to gateways listed in the
|
||||
interface's current gateway list. Even if disabled, RFC1122 redirect
|
||||
rules still apply.
|
||||
Overridden by shared_media.
|
||||
secure_redirects for the interface will be enabled if at least one of
|
||||
conf/{all,interface}/secure_redirects is set to TRUE,
|
||||
it will be disabled otherwise
|
||||
|
|
|
@ -826,7 +826,8 @@ The keyctl syscall functions are:
|
|||
(*) Compute a Diffie-Hellman shared secret or public key
|
||||
|
||||
long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
|
||||
char *buffer, size_t buflen);
|
||||
char *buffer, size_t buflen,
|
||||
void *reserved);
|
||||
|
||||
The params struct contains serial numbers for three keys:
|
||||
|
||||
|
@ -843,6 +844,8 @@ The keyctl syscall functions are:
|
|||
public key. If the base is the remote public key, the result is
|
||||
the shared secret.
|
||||
|
||||
The reserved argument must be set to NULL.
|
||||
|
||||
The buffer length must be at least the length of the prime, or zero.
|
||||
|
||||
If the buffer length is nonzero, the length of the result is
|
||||
|
|
|
@ -7989,6 +7989,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
||||
S: Odd Fixes
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
F: drivers/net/
|
||||
F: include/linux/if_*
|
||||
F: include/linux/netdevice.h
|
||||
|
@ -8944,6 +8945,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
|
|||
L: linux-gpio@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pinctrl/
|
||||
F: drivers/pinctrl/
|
||||
F: include/linux/pinctrl/
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -1186,6 +1186,60 @@ config ARM_ERRATA_773022
|
|||
loop buffer may deliver incorrect instructions. This
|
||||
workaround disables the loop buffer to avoid the erratum.
|
||||
|
||||
config ARM_ERRATA_818325_852422
|
||||
bool "ARM errata: A12: some seqs of opposed cond code instrs => deadlock or corruption"
|
||||
depends on CPU_V7
|
||||
help
|
||||
This option enables the workaround for:
|
||||
- Cortex-A12 818325: Execution of an UNPREDICTABLE STR or STM
|
||||
instruction might deadlock. Fixed in r0p1.
|
||||
- Cortex-A12 852422: Execution of a sequence of instructions might
|
||||
lead to either a data corruption or a CPU deadlock. Not fixed in
|
||||
any Cortex-A12 cores yet.
|
||||
This workaround for all both errata involves setting bit[12] of the
|
||||
Feature Register. This bit disables an optimisation applied to a
|
||||
sequence of 2 instructions that use opposing condition codes.
|
||||
|
||||
config ARM_ERRATA_821420
|
||||
bool "ARM errata: A12: sequence of VMOV to core registers might lead to a dead lock"
|
||||
depends on CPU_V7
|
||||
help
|
||||
This option enables the workaround for the 821420 Cortex-A12
|
||||
(all revs) erratum. In very rare timing conditions, a sequence
|
||||
of VMOV to Core registers instructions, for which the second
|
||||
one is in the shadow of a branch or abort, can lead to a
|
||||
deadlock when the VMOV instructions are issued out-of-order.
|
||||
|
||||
config ARM_ERRATA_825619
|
||||
bool "ARM errata: A12: DMB NSHST/ISHST mixed ... might cause deadlock"
|
||||
depends on CPU_V7
|
||||
help
|
||||
This option enables the workaround for the 825619 Cortex-A12
|
||||
(all revs) erratum. Within rare timing constraints, executing a
|
||||
DMB NSHST or DMB ISHST instruction followed by a mix of Cacheable
|
||||
and Device/Strongly-Ordered loads and stores might cause deadlock
|
||||
|
||||
config ARM_ERRATA_852421
|
||||
bool "ARM errata: A17: DMB ST might fail to create order between stores"
|
||||
depends on CPU_V7
|
||||
help
|
||||
This option enables the workaround for the 852421 Cortex-A17
|
||||
(r1p0, r1p1, r1p2) erratum. Under very rare timing conditions,
|
||||
execution of a DMB ST instruction might fail to properly order
|
||||
stores from GroupA and stores from GroupB.
|
||||
|
||||
config ARM_ERRATA_852423
|
||||
bool "ARM errata: A17: some seqs of opposed cond code instrs => deadlock or corruption"
|
||||
depends on CPU_V7
|
||||
help
|
||||
This option enables the workaround for:
|
||||
- Cortex-A17 852423: Execution of a sequence of instructions might
|
||||
lead to either a data corruption or a CPU deadlock. Not fixed in
|
||||
any Cortex-A17 cores yet.
|
||||
This is identical to Cortex-A12 erratum 852422. It is a separate
|
||||
config option from the A12 erratum due to the way errata are checked
|
||||
for and handled.
|
||||
|
||||
endmenu
|
||||
|
||||
source "arch/arm/common/Kconfig"
|
||||
|
|
|
@ -327,6 +327,7 @@ zImage: Image
|
|||
|
||||
$(BOOT_TARGETS): vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
@$(kecho) ' Kernel: $(boot)/$@ is ready'
|
||||
|
||||
$(INSTALL_TARGETS):
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
|
||||
|
|
|
@ -31,7 +31,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
|
|||
|
||||
$(obj)/xipImage: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
@$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
|
||||
@$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)'
|
||||
|
||||
$(obj)/Image $(obj)/zImage: FORCE
|
||||
@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
|
||||
|
@ -46,14 +46,12 @@ $(obj)/xipImage: FORCE
|
|||
|
||||
$(obj)/Image: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
@$(kecho) ' Kernel: $@ is ready'
|
||||
|
||||
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
|
||||
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
|
||||
|
||||
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
@$(kecho) ' Kernel: $@ is ready'
|
||||
|
||||
endif
|
||||
|
||||
|
@ -78,14 +76,12 @@ fi
|
|||
$(obj)/uImage: $(obj)/zImage FORCE
|
||||
@$(check_for_multiple_loadaddr)
|
||||
$(call if_changed,uimage)
|
||||
@$(kecho) ' Image $@ is ready'
|
||||
|
||||
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
|
||||
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
|
||||
|
||||
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
|
||||
$(call if_changed,objcopy)
|
||||
@$(kecho) ' Kernel: $@ is ready'
|
||||
|
||||
PHONY += initrd install zinstall uinstall
|
||||
initrd:
|
||||
|
|
|
@ -480,13 +480,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|||
.macro uaccess_save, tmp
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
mrc p15, 0, \tmp, c3, c0, 0
|
||||
str \tmp, [sp, #S_FRAME_SIZE]
|
||||
str \tmp, [sp, #SVC_DACR]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_restore
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
ldr r0, [sp, #S_FRAME_SIZE]
|
||||
ldr r0, [sp, #SVC_DACR]
|
||||
mcr p15, 0, r0, c3, c0, 0
|
||||
#endif
|
||||
.endm
|
||||
|
|
|
@ -44,9 +44,7 @@ extern void arm_heavy_mb(void);
|
|||
#define __arm_heavy_mb(x...) dsb(x)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_BARRIERS
|
||||
#include <mach/barriers.h>
|
||||
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
|
||||
#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
|
||||
#define mb() __arm_heavy_mb()
|
||||
#define rmb() dsb()
|
||||
#define wmb() __arm_heavy_mb(st)
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
#include <asm/param.h> /* HZ */
|
||||
|
||||
#define MAX_UDELAY_MS 2
|
||||
#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
|
||||
#define UDELAY_SHIFT 30
|
||||
#define UDELAY_MULT UL(2047 * HZ + 483648 * HZ / 1000000)
|
||||
#define UDELAY_SHIFT 31
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -34,7 +34,7 @@ extern struct arm_delay_ops {
|
|||
* it, it means that you're calling udelay() with an out of range value.
|
||||
*
|
||||
* With currently imposed limits, this means that we support a max delay
|
||||
* of 2000us. Further limits: HZ<=1000 and bogomips<=3355
|
||||
* of 2000us. Further limits: HZ<=1000
|
||||
*/
|
||||
extern void __bad_udelay(void);
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#define fd_outb(val,port) \
|
||||
do { \
|
||||
if ((port) == FD_DOR) \
|
||||
if ((port) == (u32)FD_DOR) \
|
||||
fd_setdor((val)); \
|
||||
else \
|
||||
outb((val),(port)); \
|
||||
|
|
|
@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|||
|
||||
#define pmd_large(pmd) (pmd_val(pmd) & 2)
|
||||
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
|
||||
#define pmd_present(pmd) (pmd_val(pmd))
|
||||
|
||||
#define copy_pmd(pmdpd,pmdps) \
|
||||
do { \
|
||||
|
|
|
@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|||
: !!(pmd_val(pmd) & (val)))
|
||||
#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
|
||||
|
||||
#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
|
||||
#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
|
||||
#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
|
@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
|
|||
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
||||
|
||||
/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
|
||||
/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
|
||||
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
{
|
||||
return __pmd(0);
|
||||
return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
|
|
|
@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|||
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
#define pmd_present(pmd) (pmd_val(pmd))
|
||||
|
||||
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
|
|
|
@ -13,10 +13,20 @@
|
|||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
|
||||
struct pt_regs {
|
||||
unsigned long uregs[18];
|
||||
};
|
||||
|
||||
struct svc_pt_regs {
|
||||
struct pt_regs regs;
|
||||
u32 dacr;
|
||||
u32 addr_limit;
|
||||
};
|
||||
|
||||
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
|
||||
|
||||
#define user_mode(regs) \
|
||||
(((regs)->ARM_cpsr & 0xf) == 0)
|
||||
|
||||
|
|
|
@ -104,14 +104,6 @@ static inline void set_fs(mm_segment_t fs)
|
|||
|
||||
#define segment_eq(a, b) ((a) == (b))
|
||||
|
||||
#define __addr_ok(addr) ({ \
|
||||
unsigned long flag; \
|
||||
__asm__("cmp %2, %0; movlo %0, #0" \
|
||||
: "=&r" (flag) \
|
||||
: "0" (current_thread_info()->addr_limit), "r" (addr) \
|
||||
: "cc"); \
|
||||
(flag == 0); })
|
||||
|
||||
/* We use 33-bit arithmetic here... */
|
||||
#define __range_ok(addr, size) ({ \
|
||||
unsigned long flag, roksum; \
|
||||
|
@ -238,49 +230,23 @@ extern int __put_user_2(void *, unsigned int);
|
|||
extern int __put_user_4(void *, unsigned int);
|
||||
extern int __put_user_8(void *, unsigned long long);
|
||||
|
||||
#define __put_user_x(__r2, __p, __e, __l, __s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%2", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
"bl __put_user_" #__s \
|
||||
: "=&r" (__e) \
|
||||
: "0" (__p), "r" (__r2), "r" (__l) \
|
||||
: "ip", "lr", "cc")
|
||||
|
||||
#define __put_user_check(x, p) \
|
||||
#define __put_user_check(__pu_val, __ptr, __err, __s) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
const typeof(*(p)) __user *__tmp_p = (p); \
|
||||
register const typeof(*(p)) __r2 asm("r2") = (x); \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
|
||||
register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
|
||||
register const void __user *__p asm("r0") = __ptr; \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
unsigned int __ua_flags = uaccess_save_and_enable(); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
__put_user_x(__r2, __p, __e, __l, 1); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_x(__r2, __p, __e, __l, 2); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_x(__r2, __p, __e, __l, 4); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_x(__r2, __p, __e, __l, 8); \
|
||||
break; \
|
||||
default: __e = __put_user_bad(); break; \
|
||||
} \
|
||||
uaccess_restore(__ua_flags); \
|
||||
__e; \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%2", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
"bl __put_user_" #__s \
|
||||
: "=&r" (__e) \
|
||||
: "0" (__p), "r" (__r2), "r" (__l) \
|
||||
: "ip", "lr", "cc"); \
|
||||
__err = __e; \
|
||||
})
|
||||
|
||||
#define put_user(x, p) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__put_user_check(x, p); \
|
||||
})
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
|
@ -298,7 +264,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||
}
|
||||
|
||||
#define get_user(x, p) __get_user(x, p)
|
||||
#define put_user(x, p) __put_user(x, p)
|
||||
#define __put_user_check __put_user_nocheck
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
@ -389,36 +355,54 @@ do { \
|
|||
#define __get_user_asm_word(x, addr, err) \
|
||||
__get_user_asm(x, addr, err, ldr)
|
||||
|
||||
|
||||
#define __put_user_switch(x, ptr, __err, __fn) \
|
||||
do { \
|
||||
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
unsigned int __ua_flags; \
|
||||
might_fault(); \
|
||||
__ua_flags = uaccess_save_and_enable(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
|
||||
case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
|
||||
case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
|
||||
case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
|
||||
default: __err = __put_user_bad(); break; \
|
||||
} \
|
||||
uaccess_restore(__ua_flags); \
|
||||
} while (0)
|
||||
|
||||
#define put_user(x, ptr) \
|
||||
({ \
|
||||
int __pu_err = 0; \
|
||||
__put_user_switch((x), (ptr), __pu_err, __put_user_check); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
long __pu_err = 0; \
|
||||
__put_user_err((x), (ptr), __pu_err); \
|
||||
__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__put_user_err((x), (ptr), err); \
|
||||
__put_user_switch((x), (ptr), (err), __put_user_nocheck); \
|
||||
(void) 0; \
|
||||
})
|
||||
|
||||
#define __put_user_err(x, ptr, err) \
|
||||
do { \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
unsigned int __ua_flags; \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
__ua_flags = uaccess_save_and_enable(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
|
||||
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
|
||||
case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \
|
||||
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
|
||||
default: __put_user_bad(); \
|
||||
} \
|
||||
uaccess_restore(__ua_flags); \
|
||||
} while (0)
|
||||
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
|
||||
do { \
|
||||
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
|
||||
__put_user_nocheck_##__size(x, __pu_addr, __err); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_nocheck_1 __put_user_asm_byte
|
||||
#define __put_user_nocheck_2 __put_user_asm_half
|
||||
#define __put_user_nocheck_4 __put_user_asm_word
|
||||
#define __put_user_nocheck_8 __put_user_asm_dword
|
||||
|
||||
#define __put_user_asm(x, __pu_addr, err, instr) \
|
||||
__asm__ __volatile__( \
|
||||
|
|
|
@ -107,7 +107,10 @@ int main(void)
|
|||
DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc));
|
||||
DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr));
|
||||
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
|
||||
DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
|
||||
DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit));
|
||||
DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
|
||||
BLANK();
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/cputype.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach-types.h>
|
||||
|
@ -213,6 +214,8 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
|
|||
|
||||
#if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M)
|
||||
DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
|
||||
.l2c_aux_val = 0x0,
|
||||
.l2c_aux_mask = ~0x0,
|
||||
MACHINE_END
|
||||
|
||||
mdesc_best = &__mach_desc_GENERIC_DT;
|
||||
|
|
|
@ -92,7 +92,7 @@
|
|||
* Invalid mode handlers
|
||||
*/
|
||||
.macro inv_entry, reason
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
ARM( stmib sp, {r1 - lr} )
|
||||
THUMB( stmia sp, {r0 - r12} )
|
||||
THUMB( str sp, [sp, #S_SP] )
|
||||
|
@ -152,7 +152,7 @@ ENDPROC(__und_invalid)
|
|||
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.save {r0 - pc} )
|
||||
sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
|
||||
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
SPFIX( str r0, [sp] ) @ temporarily saved
|
||||
SPFIX( mov r0, sp )
|
||||
|
@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
|
|||
ldmia r0, {r3 - r5}
|
||||
add r7, sp, #S_SP - 4 @ here for interlock avoidance
|
||||
mov r6, #-1 @ "" "" "" ""
|
||||
add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
|
||||
add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
|
||||
SPFIX( addeq r2, r2, #4 )
|
||||
str r3, [sp, #-4]! @ save the "real" r0 copied
|
||||
@ from the exception stack
|
||||
|
@ -185,6 +185,12 @@ ENDPROC(__und_invalid)
|
|||
@
|
||||
stmia r7, {r2 - r6}
|
||||
|
||||
get_thread_info tsk
|
||||
ldr r0, [tsk, #TI_ADDR_LIMIT]
|
||||
mov r1, #TASK_SIZE
|
||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
||||
str r0, [sp, #SVC_ADDR_LIMIT]
|
||||
|
||||
uaccess_save r0
|
||||
.if \uaccess
|
||||
uaccess_disable r0
|
||||
|
@ -213,7 +219,6 @@ __irq_svc:
|
|||
irq_handler
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
get_thread_info tsk
|
||||
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
||||
ldr r0, [tsk, #TI_FLAGS] @ get flags
|
||||
teq r8, #0 @ if preempt count != 0
|
||||
|
@ -366,17 +371,17 @@ ENDPROC(__fiq_abt)
|
|||
/*
|
||||
* User mode handlers
|
||||
*
|
||||
* EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
|
||||
* EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
|
||||
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
|
||||
#error "sizeof(struct pt_regs) must be a multiple of 8"
|
||||
#endif
|
||||
|
||||
.macro usr_entry, trace=1, uaccess=1
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.cantunwind ) @ don't unwind the user space
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
ARM( stmib sp, {r1 - r12} )
|
||||
THUMB( stmia sp, {r0 - r12} )
|
||||
|
||||
|
|
|
@ -145,7 +145,7 @@ ENTRY(vector_swi)
|
|||
#ifdef CONFIG_CPU_V7M
|
||||
v7m_exception_entry
|
||||
#else
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
stmia sp, {r0 - r12} @ Calling r0 - r12
|
||||
ARM( add r8, sp, #S_PC )
|
||||
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
|
||||
|
|
|
@ -90,7 +90,7 @@
|
|||
@ Linux expects to have irqs off. Do it here before taking stack space
|
||||
cpsid i
|
||||
|
||||
sub sp, #S_FRAME_SIZE-S_IP
|
||||
sub sp, #PT_REGS_SIZE-S_IP
|
||||
stmdb sp!, {r0-r11}
|
||||
|
||||
@ load saved r12, lr, return address and xPSR.
|
||||
|
@ -160,7 +160,7 @@
|
|||
ldmia sp!, {r0-r11}
|
||||
|
||||
@ restore main sp
|
||||
add sp, sp, #S_FRAME_SIZE-S_IP
|
||||
add sp, sp, #PT_REGS_SIZE-S_IP
|
||||
|
||||
cpsie i
|
||||
bx lr
|
||||
|
@ -215,7 +215,9 @@
|
|||
blne trace_hardirqs_off
|
||||
#endif
|
||||
.endif
|
||||
ldr r1, [sp, #SVC_ADDR_LIMIT]
|
||||
uaccess_restore
|
||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
||||
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode SVC restore
|
||||
|
@ -259,7 +261,9 @@
|
|||
@ on the stack remains correct).
|
||||
@
|
||||
.macro svc_exit_via_fiq
|
||||
ldr r1, [sp, #SVC_ADDR_LIMIT]
|
||||
uaccess_restore
|
||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
@ ARM mode restore
|
||||
mov r0, sp
|
||||
|
@ -307,7 +311,7 @@
|
|||
.endif
|
||||
mov r0, r0 @ ARMv5T and earlier require a nop
|
||||
@ after ldm {}^
|
||||
add sp, sp, #\offset + S_FRAME_SIZE
|
||||
add sp, sp, #\offset + PT_REGS_SIZE
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
#elif defined(CONFIG_CPU_V7M)
|
||||
@ V7M restore.
|
||||
|
@ -334,7 +338,7 @@
|
|||
.else
|
||||
ldmdb sp, {r0 - r12} @ get calling r0 - r12
|
||||
.endif
|
||||
add sp, sp, #S_FRAME_SIZE - S_SP
|
||||
add sp, sp, #PT_REGS_SIZE - S_SP
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
.endm
|
||||
|
|
|
@ -73,7 +73,7 @@ __irq_entry:
|
|||
@ correctness they don't need to be restored. So only r8-r11 must be
|
||||
@ restored here. The easiest way to do so is to restore r0-r7, too.
|
||||
ldmia sp!, {r0-r11}
|
||||
add sp, #S_FRAME_SIZE-S_IP
|
||||
add sp, #PT_REGS_SIZE-S_IP
|
||||
cpsie i
|
||||
bx lr
|
||||
ENDPROC(__irq_entry)
|
||||
|
|
|
@ -96,19 +96,23 @@ void __show_regs(struct pt_regs *regs)
|
|||
unsigned long flags;
|
||||
char buf[64];
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
unsigned int domain;
|
||||
unsigned int domain, fs;
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Get the domain register for the parent context. In user
|
||||
* mode, we don't save the DACR, so lets use what it should
|
||||
* be. For other modes, we place it after the pt_regs struct.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
if (user_mode(regs)) {
|
||||
domain = DACR_UACCESS_ENABLE;
|
||||
else
|
||||
domain = *(unsigned int *)(regs + 1);
|
||||
fs = get_fs();
|
||||
} else {
|
||||
domain = to_svc_pt_regs(regs)->dacr;
|
||||
fs = to_svc_pt_regs(regs)->addr_limit;
|
||||
}
|
||||
#else
|
||||
domain = get_domain();
|
||||
fs = get_fs();
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -144,7 +148,7 @@ void __show_regs(struct pt_regs *regs)
|
|||
if ((domain & domain_mask(DOMAIN_USER)) ==
|
||||
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
|
||||
segment = "none";
|
||||
else if (get_fs() == get_ds())
|
||||
else if (fs == get_ds())
|
||||
segment = "kernel";
|
||||
else
|
||||
segment = "user";
|
||||
|
|
|
@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
vfp_flush_hwstate(thread);
|
||||
thread->vfpstate.hard = new_vfp;
|
||||
vfp_flush_hwstate(thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -844,7 +844,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
|
|||
struct resource *res;
|
||||
|
||||
kernel_code.start = virt_to_phys(_text);
|
||||
kernel_code.end = virt_to_phys(_etext - 1);
|
||||
kernel_code.end = virt_to_phys(__init_begin - 1);
|
||||
kernel_data.start = virt_to_phys(_sdata);
|
||||
kernel_data.end = virt_to_phys(_end - 1);
|
||||
|
||||
|
|
|
@ -93,17 +93,53 @@ void erratum_a15_798181_init(void)
|
|||
unsigned int revidr = read_cpuid(CPUID_REVIDR);
|
||||
|
||||
/* Brahma-B15 r0p0..r0p2 affected
|
||||
* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
|
||||
if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2)
|
||||
* Cortex-A15 r0p0..r3p3 w/o ECO fix affected
|
||||
* Fixes applied to A15 with respect to the revision and revidr are:
|
||||
*
|
||||
* r0p0-r2p1: No fixes applied
|
||||
* r2p2,r2p3:
|
||||
* REVIDR[4]: 798181 Moving a virtual page that is being accessed
|
||||
* by an active process can lead to unexpected behavior
|
||||
* REVIDR[9]: Not defined
|
||||
* r2p4,r3p0,r3p1,r3p2:
|
||||
* REVIDR[4]: 798181 Moving a virtual page that is being accessed
|
||||
* by an active process can lead to unexpected behavior
|
||||
* REVIDR[9]: 798181 Moving a virtual page that is being accessed
|
||||
* by an active process can lead to unexpected behavior
|
||||
* - This is an update to a previously released ECO.
|
||||
* r3p3:
|
||||
* REVIDR[4]: Reserved
|
||||
* REVIDR[9]: 798181 Moving a virtual page that is being accessed
|
||||
* by an active process can lead to unexpected behavior
|
||||
* - This is an update to a previously released ECO.
|
||||
*
|
||||
* Handling:
|
||||
* REVIDR[9] set -> No WA
|
||||
* REVIDR[4] set, REVIDR[9] cleared -> Partial WA
|
||||
* Both cleared -> Full WA
|
||||
*/
|
||||
if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) {
|
||||
erratum_a15_798181_handler = erratum_a15_798181_broadcast;
|
||||
else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 &&
|
||||
(revidr & 0x210) != 0x210) {
|
||||
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) {
|
||||
erratum_a15_798181_handler = erratum_a15_798181_broadcast;
|
||||
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) {
|
||||
if (revidr & 0x10)
|
||||
erratum_a15_798181_handler =
|
||||
erratum_a15_798181_partial;
|
||||
else
|
||||
erratum_a15_798181_handler =
|
||||
erratum_a15_798181_broadcast;
|
||||
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) {
|
||||
if ((revidr & 0x210) == 0)
|
||||
erratum_a15_798181_handler =
|
||||
erratum_a15_798181_broadcast;
|
||||
else if (revidr & 0x10)
|
||||
erratum_a15_798181_handler =
|
||||
erratum_a15_798181_partial;
|
||||
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) {
|
||||
if ((revidr & 0x200) == 0)
|
||||
erratum_a15_798181_handler =
|
||||
erratum_a15_798181_partial;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -125,6 +125,8 @@ SECTIONS
|
|||
#ifdef CONFIG_DEBUG_ALIGN_RODATA
|
||||
. = ALIGN(1<<SECTION_SHIFT);
|
||||
#endif
|
||||
_etext = .; /* End of text section */
|
||||
|
||||
RO_DATA(PAGE_SIZE)
|
||||
|
||||
. = ALIGN(4);
|
||||
|
@ -155,8 +157,6 @@ SECTIONS
|
|||
|
||||
NOTES
|
||||
|
||||
_etext = .; /* End of text and rodata section */
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
. = ALIGN(1<<SECTION_SHIFT);
|
||||
#else
|
||||
|
|
|
@ -29,7 +29,10 @@ else
|
|||
lib-y += io-readsw-armv4.o io-writesw-armv4.o
|
||||
endif
|
||||
|
||||
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
|
||||
ifeq ($(CONFIG_ARCH_RPC),y)
|
||||
lib-y += ecard.o io-acorn.o floppydma.o
|
||||
AFLAGS_delay-loop.o += -march=armv4
|
||||
endif
|
||||
|
||||
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
|
||||
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
.text
|
||||
|
||||
.LC0: .word loops_per_jiffy
|
||||
|
@ -17,7 +18,6 @@
|
|||
|
||||
/*
|
||||
* r0 <= 2000
|
||||
* lpj <= 0x01ffffff (max. 3355 bogomips)
|
||||
* HZ <= 1000
|
||||
*/
|
||||
|
||||
|
@ -25,16 +25,11 @@ ENTRY(__loop_udelay)
|
|||
ldr r2, .LC1
|
||||
mul r0, r2, r0
|
||||
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
|
||||
mov r1, #-1
|
||||
ldr r2, .LC0
|
||||
ldr r2, [r2] @ max = 0x01ffffff
|
||||
add r0, r0, r1, lsr #32-14
|
||||
mov r0, r0, lsr #14 @ max = 0x0001ffff
|
||||
add r2, r2, r1, lsr #32-10
|
||||
mov r2, r2, lsr #10 @ max = 0x00007fff
|
||||
mul r0, r2, r0 @ max = 2^32-1
|
||||
add r0, r0, r1, lsr #32-6
|
||||
movs r0, r0, lsr #6
|
||||
ldr r2, [r2]
|
||||
umull r1, r0, r2, r0
|
||||
adds r1, r1, #0xffffffff
|
||||
adcs r0, r0, r0
|
||||
reteq lr
|
||||
|
||||
/*
|
||||
|
|
|
@ -1025,12 +1025,6 @@ config ARM_DMA_MEM_BUFFERABLE
|
|||
|
||||
You are recommended say 'Y' here and debug any affected drivers.
|
||||
|
||||
config ARCH_HAS_BARRIERS
|
||||
bool
|
||||
help
|
||||
This option allows the use of custom mandatory barriers
|
||||
included via the mach/barriers.h file.
|
||||
|
||||
config ARM_HEAVY_MB
|
||||
bool
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ struct arm_dma_alloc_args {
|
|||
pgprot_t prot;
|
||||
const void *caller;
|
||||
bool want_vaddr;
|
||||
int coherent_flag;
|
||||
};
|
||||
|
||||
struct arm_dma_free_args {
|
||||
|
@ -59,6 +60,9 @@ struct arm_dma_free_args {
|
|||
bool want_vaddr;
|
||||
};
|
||||
|
||||
#define NORMAL 0
|
||||
#define COHERENT 1
|
||||
|
||||
struct arm_dma_allocator {
|
||||
void *(*alloc)(struct arm_dma_alloc_args *args,
|
||||
struct page **ret_page);
|
||||
|
@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
|
|||
return mask;
|
||||
}
|
||||
|
||||
static void __dma_clear_buffer(struct page *page, size_t size)
|
||||
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
|
||||
{
|
||||
/*
|
||||
* Ensure that the allocated pages are zeroed, and that any data
|
||||
|
@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size)
|
|||
while (size > 0) {
|
||||
void *ptr = kmap_atomic(page);
|
||||
memset(ptr, 0, PAGE_SIZE);
|
||||
dmac_flush_range(ptr, ptr + PAGE_SIZE);
|
||||
if (coherent_flag != COHERENT)
|
||||
dmac_flush_range(ptr, ptr + PAGE_SIZE);
|
||||
kunmap_atomic(ptr);
|
||||
page++;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
outer_flush_range(base, end);
|
||||
if (coherent_flag != COHERENT)
|
||||
outer_flush_range(base, end);
|
||||
} else {
|
||||
void *ptr = page_address(page);
|
||||
memset(ptr, 0, size);
|
||||
dmac_flush_range(ptr, ptr + size);
|
||||
outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
||||
if (coherent_flag != COHERENT) {
|
||||
dmac_flush_range(ptr, ptr + size);
|
||||
outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
|
|||
* Allocate a DMA buffer for 'dev' of size 'size' using the
|
||||
* specified gfp mask. Note that 'size' must be page aligned.
|
||||
*/
|
||||
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
|
||||
static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
|
||||
gfp_t gfp, int coherent_flag)
|
||||
{
|
||||
unsigned long order = get_order(size);
|
||||
struct page *page, *p, *e;
|
||||
|
@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
|
|||
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
|
||||
__free_page(p);
|
||||
|
||||
__dma_clear_buffer(page, size);
|
||||
__dma_clear_buffer(page, size, coherent_flag);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
|
|||
|
||||
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
||||
pgprot_t prot, struct page **ret_page,
|
||||
const void *caller, bool want_vaddr);
|
||||
const void *caller, bool want_vaddr,
|
||||
int coherent_flag);
|
||||
|
||||
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
||||
pgprot_t prot, struct page **ret_page,
|
||||
|
@ -405,10 +415,13 @@ static int __init atomic_pool_init(void)
|
|||
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!atomic_pool)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* The atomic pool is only used for non-coherent allocations
|
||||
* so we must pass NORMAL for coherent_flag.
|
||||
*/
|
||||
if (dev_get_cma_area(NULL))
|
||||
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
|
||||
&page, atomic_pool_init, true);
|
||||
&page, atomic_pool_init, true, NORMAL);
|
||||
else
|
||||
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
|
||||
&page, atomic_pool_init, true);
|
||||
|
@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|||
{
|
||||
struct page *page;
|
||||
void *ptr = NULL;
|
||||
page = __dma_alloc_buffer(dev, size, gfp);
|
||||
/*
|
||||
* __alloc_remap_buffer is only called when the device is
|
||||
* non-coherent
|
||||
*/
|
||||
page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (!want_vaddr)
|
||||
|
@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size)
|
|||
|
||||
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
||||
pgprot_t prot, struct page **ret_page,
|
||||
const void *caller, bool want_vaddr)
|
||||
const void *caller, bool want_vaddr,
|
||||
int coherent_flag)
|
||||
{
|
||||
unsigned long order = get_order(size);
|
||||
size_t count = size >> PAGE_SHIFT;
|
||||
|
@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|||
if (!page)
|
||||
return NULL;
|
||||
|
||||
__dma_clear_buffer(page, size);
|
||||
__dma_clear_buffer(page, size, coherent_flag);
|
||||
|
||||
if (!want_vaddr)
|
||||
goto out;
|
||||
|
@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
|
|||
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
|
||||
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
|
||||
#define __alloc_from_pool(size, ret_page) NULL
|
||||
#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
|
||||
#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
|
||||
#define __free_from_pool(cpu_addr, size) do { } while (0)
|
||||
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
|
||||
#define __dma_free_remap(cpu_addr, size) do { } while (0)
|
||||
|
@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|||
struct page **ret_page)
|
||||
{
|
||||
struct page *page;
|
||||
page = __dma_alloc_buffer(dev, size, gfp);
|
||||
/* __alloc_simple_buffer is only called when the device is coherent */
|
||||
page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
|
|||
{
|
||||
return __alloc_from_contiguous(args->dev, args->size, args->prot,
|
||||
ret_page, args->caller,
|
||||
args->want_vaddr);
|
||||
args->want_vaddr, args->coherent_flag);
|
||||
}
|
||||
|
||||
static void cma_allocator_free(struct arm_dma_free_args *args)
|
||||
|
@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
.prot = prot,
|
||||
.caller = caller,
|
||||
.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
|
||||
.coherent_flag = is_coherent ? COHERENT : NORMAL,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DMA_API_DEBUG
|
||||
|
@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|||
static const int iommu_order_array[] = { 9, 8, 4, 0 };
|
||||
|
||||
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
||||
gfp_t gfp, struct dma_attrs *attrs)
|
||||
gfp_t gfp, struct dma_attrs *attrs,
|
||||
int coherent_flag)
|
||||
{
|
||||
struct page **pages;
|
||||
int count = size >> PAGE_SHIFT;
|
||||
|
@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|||
if (!page)
|
||||
goto error;
|
||||
|
||||
__dma_clear_buffer(page, size);
|
||||
__dma_clear_buffer(page, size, coherent_flag);
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
pages[i] = page + i;
|
||||
|
@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|||
pages[i + j] = pages[i] + j;
|
||||
}
|
||||
|
||||
__dma_clear_buffer(pages[i], PAGE_SIZE << order);
|
||||
__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
|
||||
i += 1 << order;
|
||||
count -= 1 << order;
|
||||
}
|
||||
|
@ -1455,13 +1476,16 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
||||
dma_addr_t *handle)
|
||||
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
|
||||
dma_addr_t *handle, int coherent_flag)
|
||||
{
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
if (coherent_flag == COHERENT)
|
||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||
else
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
|
@ -1477,14 +1501,18 @@ err_mapping:
|
|||
}
|
||||
|
||||
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
|
||||
dma_addr_t handle, size_t size)
|
||||
dma_addr_t handle, size_t size, int coherent_flag)
|
||||
{
|
||||
__iommu_remove_mapping(dev, handle, size);
|
||||
__free_from_pool(cpu_addr, size);
|
||||
if (coherent_flag == COHERENT)
|
||||
__dma_free_buffer(virt_to_page(cpu_addr), size);
|
||||
else
|
||||
__free_from_pool(cpu_addr, size);
|
||||
}
|
||||
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
|
||||
int coherent_flag)
|
||||
{
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||
struct page **pages;
|
||||
|
@ -1493,8 +1521,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
*handle = DMA_ERROR_CODE;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (!gfpflags_allow_blocking(gfp))
|
||||
return __iommu_alloc_atomic(dev, size, handle);
|
||||
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
|
||||
return __iommu_alloc_simple(dev, size, gfp, handle,
|
||||
coherent_flag);
|
||||
|
||||
/*
|
||||
* Following is a work-around (a.k.a. hack) to prevent pages
|
||||
|
@ -1505,7 +1534,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
*/
|
||||
gfp &= ~(__GFP_COMP);
|
||||
|
||||
pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
|
||||
pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
|
@ -1530,7 +1559,19 @@ err_buffer:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
|
||||
}
|
||||
|
||||
static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
|
||||
}
|
||||
|
||||
static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
|
@ -1540,8 +1581,6 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
|
||||
if (!pages)
|
||||
return -ENXIO;
|
||||
|
||||
|
@ -1562,19 +1601,34 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|||
|
||||
return 0;
|
||||
}
|
||||
static int arm_iommu_mmap_attrs(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
|
||||
{
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
|
||||
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
static int arm_coherent_iommu_mmap_attrs(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
|
||||
{
|
||||
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* free a page as defined by the above mapping.
|
||||
* Must not be called with IRQs disabled.
|
||||
*/
|
||||
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs)
|
||||
void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
|
||||
{
|
||||
struct page **pages;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
__iommu_free_atomic(dev, cpu_addr, handle, size);
|
||||
if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
|
||||
__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1593,6 +1647,18 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||
__iommu_free_buffer(dev, pages, size, attrs);
|
||||
}
|
||||
|
||||
void arm_iommu_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
|
||||
{
|
||||
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
|
||||
}
|
||||
|
||||
void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
|
||||
{
|
||||
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
|
||||
}
|
||||
|
||||
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size, struct dma_attrs *attrs)
|
||||
|
@ -1997,9 +2063,9 @@ struct dma_map_ops iommu_ops = {
|
|||
};
|
||||
|
||||
struct dma_map_ops iommu_coherent_ops = {
|
||||
.alloc = arm_iommu_alloc_attrs,
|
||||
.free = arm_iommu_free_attrs,
|
||||
.mmap = arm_iommu_mmap_attrs,
|
||||
.alloc = arm_coherent_iommu_alloc_attrs,
|
||||
.free = arm_coherent_iommu_free_attrs,
|
||||
.mmap = arm_coherent_iommu_mmap_attrs,
|
||||
.get_sgtable = arm_iommu_get_sgtable,
|
||||
|
||||
.map_page = arm_coherent_iommu_map_page,
|
||||
|
|
|
@ -362,6 +362,39 @@ __ca15_errata:
|
|||
#endif
|
||||
b __errata_finish
|
||||
|
||||
__ca12_errata:
|
||||
#ifdef CONFIG_ARM_ERRATA_818325_852422
|
||||
mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orr r10, r10, #1 << 12 @ set bit #12
|
||||
mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_821420
|
||||
mrc p15, 0, r10, c15, c0, 2 @ read internal feature reg
|
||||
orr r10, r10, #1 << 1 @ set bit #1
|
||||
mcr p15, 0, r10, c15, c0, 2 @ write internal feature reg
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_825619
|
||||
mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orr r10, r10, #1 << 24 @ set bit #24
|
||||
mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
b __errata_finish
|
||||
|
||||
__ca17_errata:
|
||||
#ifdef CONFIG_ARM_ERRATA_852421
|
||||
cmp r6, #0x12 @ only present up to r1p2
|
||||
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orrle r10, r10, #1 << 24 @ set bit #24
|
||||
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_852423
|
||||
cmp r6, #0x12 @ only present up to r1p2
|
||||
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orrle r10, r10, #1 << 12 @ set bit #12
|
||||
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
b __errata_finish
|
||||
|
||||
__v7_pj4b_setup:
|
||||
#ifdef CONFIG_CPU_PJ4B
|
||||
|
||||
|
@ -443,6 +476,16 @@ __v7_setup_cont:
|
|||
teq r0, r10
|
||||
beq __ca9_errata
|
||||
|
||||
/* Cortex-A12 Errata */
|
||||
ldr r10, =0x00000c0d @ Cortex-A12 primary part number
|
||||
teq r0, r10
|
||||
beq __ca12_errata
|
||||
|
||||
/* Cortex-A17 Errata */
|
||||
ldr r10, =0x00000c0e @ Cortex-A17 primary part number
|
||||
teq r0, r10
|
||||
beq __ca17_errata
|
||||
|
||||
/* Cortex-A15 Errata */
|
||||
ldr r10, =0x00000c0f @ Cortex-A15 primary part number
|
||||
teq r0, r10
|
||||
|
|
|
@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT
|
|||
config MMU
|
||||
def_bool y
|
||||
|
||||
config ARM64_PAGE_SHIFT
|
||||
int
|
||||
default 16 if ARM64_64K_PAGES
|
||||
default 14 if ARM64_16K_PAGES
|
||||
default 12
|
||||
|
||||
config ARM64_CONT_SHIFT
|
||||
int
|
||||
default 5 if ARM64_64K_PAGES
|
||||
default 7 if ARM64_16K_PAGES
|
||||
default 4
|
||||
|
||||
config ARCH_MMAP_RND_BITS_MIN
|
||||
default 14 if ARM64_64K_PAGES
|
||||
default 16 if ARM64_16K_PAGES
|
||||
|
@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_23144
|
||||
bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
|
||||
depends on NUMA
|
||||
default y
|
||||
help
|
||||
ITS SYNC command hang for cross node io and collections/cpu mapping.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_23154
|
||||
bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
|
||||
default y
|
||||
|
|
|
@ -12,7 +12,8 @@ config ARM64_PTDUMP
|
|||
who are working in architecture specific areas of the kernel.
|
||||
It is probably not a good idea to enable this feature in a production
|
||||
kernel.
|
||||
If in doubt, say "N"
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config PID_IN_CONTEXTIDR
|
||||
bool "Write the current PID to the CONTEXTIDR register"
|
||||
|
@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
|
|||
value.
|
||||
|
||||
config DEBUG_SET_MODULE_RONX
|
||||
bool "Set loadable kernel module data as NX and text as RO"
|
||||
depends on MODULES
|
||||
help
|
||||
This option helps catch unintended modifications to loadable
|
||||
kernel module's text and read-only data. It also prevents execution
|
||||
of module data. Such protection may interfere with run-time code
|
||||
patching and dynamic kernel tracing - and they might also protect
|
||||
against certain classes of kernel exploits.
|
||||
If in doubt, say "N".
|
||||
bool "Set loadable kernel module data as NX and text as RO"
|
||||
depends on MODULES
|
||||
default y
|
||||
help
|
||||
Is this is set, kernel module text and rodata will be made read-only.
|
||||
This is to help catch accidental or malicious attempts to change the
|
||||
kernel's executable code.
|
||||
|
||||
If in doubt, say Y.
|
||||
|
||||
config DEBUG_RODATA
|
||||
bool "Make kernel text and rodata read-only"
|
||||
|
@ -56,7 +57,7 @@ config DEBUG_RODATA
|
|||
is to help catch accidental or malicious attempts to change the
|
||||
kernel's executable code.
|
||||
|
||||
If in doubt, say Y
|
||||
If in doubt, say Y.
|
||||
|
||||
config DEBUG_ALIGN_RODATA
|
||||
depends on DEBUG_RODATA
|
||||
|
@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
|
|||
alignment and potentially wasted space. Turn on this option if
|
||||
performance is more important than memory pressure.
|
||||
|
||||
If in doubt, say N
|
||||
If in doubt, say N.
|
||||
|
||||
source "drivers/hwtracing/coresight/Kconfig"
|
||||
|
||||
|
|
|
@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o
|
|||
|
||||
# The byte offset of the kernel image in RAM from the start of RAM.
|
||||
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
|
||||
TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
|
||||
TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
|
||||
int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
|
||||
rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
|
||||
else
|
||||
TEXT_OFFSET := 0x00080000
|
||||
endif
|
||||
|
|
|
@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
#define COMPAT_ELF_PLATFORM ("v8b")
|
||||
#else
|
||||
#define COMPAT_ELF_PLATFORM ("v8l")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
|
||||
|
||||
/* AArch32 registers. */
|
||||
|
|
|
@ -55,8 +55,9 @@
|
|||
#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
|
||||
|
||||
/*
|
||||
* PAGE_OFFSET - the virtual address of the start of the kernel image (top
|
||||
* PAGE_OFFSET - the virtual address of the start of the linear map (top
|
||||
* (VA_BITS - 1))
|
||||
* KIMAGE_VADDR - the virtual address of the start of the kernel image
|
||||
* VA_BITS - the maximum number of bits for virtual addresses.
|
||||
* VA_START - the first kernel virtual address.
|
||||
* TASK_SIZE - the maximum size of a user space task.
|
||||
|
|
|
@ -23,16 +23,8 @@
|
|||
|
||||
/* PAGE_SHIFT determines the page size */
|
||||
/* CONT_SHIFT determines the number of pages which can be tracked together */
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
#define PAGE_SHIFT 16
|
||||
#define CONT_SHIFT 5
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
#define PAGE_SHIFT 14
|
||||
#define CONT_SHIFT 7
|
||||
#else
|
||||
#define PAGE_SHIFT 12
|
||||
#define CONT_SHIFT 4
|
||||
#endif
|
||||
#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
|
||||
#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
|
||||
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
|
||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||
|
||||
|
|
|
@ -80,19 +80,6 @@ static inline void set_fs(mm_segment_t fs)
|
|||
|
||||
#define segment_eq(a, b) ((a) == (b))
|
||||
|
||||
/*
|
||||
* Return 1 if addr < current->addr_limit, 0 otherwise.
|
||||
*/
|
||||
#define __addr_ok(addr) \
|
||||
({ \
|
||||
unsigned long flag; \
|
||||
asm("cmp %1, %0; cset %0, lo" \
|
||||
: "=&r" (flag) \
|
||||
: "r" (addr), "0" (current_thread_info()->addr_limit) \
|
||||
: "cc"); \
|
||||
flag; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Test whether a block of memory is a valid user space address.
|
||||
* Returns 1 if the range is valid, 0 otherwise.
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
|
||||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
|
||||
|
||||
#define __NR_compat_syscalls 390
|
||||
#define __NR_compat_syscalls 394
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
|
|
@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
|
|||
__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
|
||||
#define __NR_membarrier 389
|
||||
__SYSCALL(__NR_membarrier, sys_membarrier)
|
||||
#define __NR_mlock2 390
|
||||
__SYSCALL(__NR_mlock2, sys_mlock2)
|
||||
#define __NR_copy_file_range 391
|
||||
__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
|
||||
#define __NR_preadv2 392
|
||||
__SYSCALL(__NR_preadv2, compat_sys_preadv2)
|
||||
#define __NR_pwritev2 393
|
||||
__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
|
||||
|
||||
/*
|
||||
* Please add new compat syscalls above this comment and update
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/personality.h>
|
||||
|
@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
|
|||
static int c_show(struct seq_file *m, void *v)
|
||||
{
|
||||
int i, j;
|
||||
bool compat = personality(current->personality) == PER_LINUX32;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
|
||||
|
@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
|
|||
* "processor". Give glibc what it expects.
|
||||
*/
|
||||
seq_printf(m, "processor\t: %d\n", i);
|
||||
if (compat)
|
||||
seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
|
||||
MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
|
||||
|
||||
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
||||
loops_per_jiffy / (500000UL/HZ),
|
||||
|
@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
|
|||
* software which does already (at least for 32-bit).
|
||||
*/
|
||||
seq_puts(m, "Features\t:");
|
||||
if (personality(current->personality) == PER_LINUX32) {
|
||||
if (compat) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
for (j = 0; compat_hwcap_str[j]; j++)
|
||||
if (compat_elf_hwcap & (1 << j))
|
||||
|
|
|
@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
|||
void __user *pc = (void __user *)instruction_pointer(regs);
|
||||
console_verbose();
|
||||
|
||||
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
|
||||
handler[reason], esr, esr_get_class_string(esr));
|
||||
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
|
||||
handler[reason], smp_processor_id(), esr,
|
||||
esr_get_class_string(esr));
|
||||
__show_regs(regs);
|
||||
|
||||
info.si_signo = SIGILL;
|
||||
|
|
|
@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
|||
* Make sure stores to the GIC via the memory mapped interface
|
||||
* are now visible to the system register interface.
|
||||
*/
|
||||
dsb(st);
|
||||
if (!cpu_if->vgic_sre)
|
||||
dsb(st);
|
||||
|
||||
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
|
||||
|
||||
|
@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
|||
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
|
||||
continue;
|
||||
|
||||
if (cpu_if->vgic_elrsr & (1 << i)) {
|
||||
if (cpu_if->vgic_elrsr & (1 << i))
|
||||
cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
|
||||
|
||||
cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
|
||||
__gic_v3_set_lr(0, i);
|
||||
}
|
||||
|
||||
|
@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
|||
|
||||
val = read_gicreg(ICC_SRE_EL2);
|
||||
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
|
||||
isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
|
||||
write_gicreg(1, ICC_SRE_EL1);
|
||||
|
||||
if (!cpu_if->vgic_sre) {
|
||||
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
|
||||
isb();
|
||||
write_gicreg(1, ICC_SRE_EL1);
|
||||
}
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
||||
|
@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
|||
* been actually programmed with the value we want before
|
||||
* starting to mess with the rest of the GIC.
|
||||
*/
|
||||
write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
|
||||
isb();
|
||||
if (!cpu_if->vgic_sre) {
|
||||
write_gicreg(0, ICC_SRE_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
val = read_gicreg(ICH_VTR_EL2);
|
||||
max_lr_idx = vtr_to_max_lr_idx(val);
|
||||
|
@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
|||
* (re)distributors. This ensure the guest will read the
|
||||
* correct values from the memory-mapped interface.
|
||||
*/
|
||||
isb();
|
||||
dsb(sy);
|
||||
if (!cpu_if->vgic_sre) {
|
||||
isb();
|
||||
dsb(sy);
|
||||
}
|
||||
vcpu->arch.vgic_cpu.live_lrs = live_lrs;
|
||||
|
||||
/*
|
||||
* Prevent the guest from touching the GIC system registers if
|
||||
* SRE isn't enabled for GICv3 emulation.
|
||||
*/
|
||||
if (!cpu_if->vgic_sre) {
|
||||
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
|
||||
ICC_SRE_EL2);
|
||||
}
|
||||
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
|
||||
ICC_SRE_EL2);
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_init_lrs(void)
|
||||
|
|
|
@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool access_gic_sre(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return ignore_write(vcpu, p);
|
||||
|
||||
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
|
@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
access_gic_sgi },
|
||||
/* ICC_SRE_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
|
||||
trap_raz_wi },
|
||||
access_gic_sre },
|
||||
|
||||
/* CONTEXTIDR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
|
||||
|
|
|
@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = {
|
|||
|
||||
struct pg_level {
|
||||
const struct prot_bits *bits;
|
||||
const char *name;
|
||||
size_t num;
|
||||
u64 mask;
|
||||
};
|
||||
|
@ -157,15 +158,19 @@ struct pg_level {
|
|||
static struct pg_level pg_level[] = {
|
||||
{
|
||||
}, { /* pgd */
|
||||
.name = "PGD",
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
}, { /* pud */
|
||||
.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
}, { /* pmd */
|
||||
.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
}, { /* pte */
|
||||
.name = "PTE",
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
},
|
||||
|
@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
|||
delta >>= 10;
|
||||
unit++;
|
||||
}
|
||||
seq_printf(st->seq, "%9lu%c", delta, *unit);
|
||||
seq_printf(st->seq, "%9lu%c %s", delta, *unit,
|
||||
pg_level[st->level].name);
|
||||
if (pg_level[st->level].bits)
|
||||
dump_prot(st, pg_level[st->level].bits,
|
||||
pg_level[st->level].num);
|
||||
|
|
|
@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
|
|||
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
||||
} else if (ps == PUD_SIZE) {
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
} else if (ps == (PAGE_SIZE * CONT_PTES)) {
|
||||
hugetlb_add_hstate(CONT_PTE_SHIFT);
|
||||
} else if (ps == (PMD_SIZE * CONT_PMDS)) {
|
||||
hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
|
||||
} else {
|
||||
hugetlb_bad_size();
|
||||
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
|
||||
|
@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
|
|||
return 1;
|
||||
}
|
||||
__setup("hugepagesz=", setup_hugepagesz);
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
static __init int add_default_hugepagesz(void)
|
||||
{
|
||||
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
|
||||
hugetlb_add_hstate(CONT_PMD_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(add_default_hugepagesz);
|
||||
#endif
|
||||
|
|
|
@ -8,6 +8,8 @@ struct pt_regs;
|
|||
void parisc_terminate(char *msg, struct pt_regs *regs,
|
||||
int code, unsigned long offset) __noreturn __cold;
|
||||
|
||||
void die_if_kernel(char *str, struct pt_regs *regs, long err);
|
||||
|
||||
/* mm/fault.c */
|
||||
void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||
unsigned long address);
|
||||
|
|
|
@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
|
|||
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
|
||||
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
|
||||
|
||||
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
|
||||
cpunum, coproc_cfg.revision, coproc_cfg.model);
|
||||
if (cpunum == 0)
|
||||
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
|
||||
cpunum, coproc_cfg.revision, coproc_cfg.model);
|
||||
|
||||
/*
|
||||
** store status register to stack (hopefully aligned)
|
||||
|
|
|
@ -309,11 +309,6 @@ void __init time_init(void)
|
|||
clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
|
||||
NSEC_PER_MSEC, 0);
|
||||
|
||||
#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
|
||||
/* At bootup only one 64bit CPU is online and cr16 is "stable" */
|
||||
set_sched_clock_stable();
|
||||
#endif
|
||||
|
||||
start_cpu_itimer(); /* get CPU 0 started */
|
||||
|
||||
/* register at clocksource framework */
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/ratelimit.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/* #define DEBUG_UNALIGNED 1 */
|
||||
|
||||
|
@ -130,8 +131,6 @@
|
|||
|
||||
int unaligned_enabled __read_mostly = 1;
|
||||
|
||||
void die_if_kernel (char *str, struct pt_regs *regs, long err);
|
||||
|
||||
static int emulate_ldh(struct pt_regs *regs, int toreg)
|
||||
{
|
||||
unsigned long saddr = regs->ior;
|
||||
|
@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
|
|||
break;
|
||||
}
|
||||
|
||||
if (modify && R1(regs->iir))
|
||||
if (ret == 0 && modify && R1(regs->iir))
|
||||
regs->gr[R1(regs->iir)] = newbase;
|
||||
|
||||
|
||||
|
@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
|
|||
|
||||
if (ret)
|
||||
{
|
||||
/*
|
||||
* The unaligned handler failed.
|
||||
* If we were called by __get_user() or __put_user() jump
|
||||
* to it's exception fixup handler instead of crashing.
|
||||
*/
|
||||
if (!user_mode(regs) && fixup_exception(regs))
|
||||
return;
|
||||
|
||||
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
|
||||
die_if_kernel("Unaligned data reference", regs, 28);
|
||||
|
||||
|
|
|
@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
|
|||
if (addr >= kernel_unwind_table.start &&
|
||||
addr <= kernel_unwind_table.end)
|
||||
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
|
||||
else
|
||||
else {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&unwind_lock, flags);
|
||||
list_for_each_entry(table, &unwind_tables, list) {
|
||||
if (addr >= table->start &&
|
||||
addr <= table->end)
|
||||
|
@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
}
|
||||
|
||||
return e;
|
||||
}
|
||||
|
@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
|
|||
|
||||
insn = *(unsigned int *)npc;
|
||||
|
||||
if ((insn & 0xffffc000) == 0x37de0000 ||
|
||||
(insn & 0xffe00000) == 0x6fc00000) {
|
||||
if ((insn & 0xffffc001) == 0x37de0000 ||
|
||||
(insn & 0xffe00001) == 0x6fc00000) {
|
||||
/* ldo X(sp), sp, or stwm X,D(sp) */
|
||||
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
|
||||
((insn & 0x3fff) >> 1);
|
||||
frame_size += (insn & 0x3fff) >> 1;
|
||||
dbg("analyzing func @ %lx, insn=%08x @ "
|
||||
"%lx, frame_size = %ld\n", info->ip,
|
||||
insn, npc, frame_size);
|
||||
} else if ((insn & 0xffe00008) == 0x73c00008) {
|
||||
} else if ((insn & 0xffe00009) == 0x73c00008) {
|
||||
/* std,ma X,D(sp) */
|
||||
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
|
||||
(((insn >> 4) & 0x3ff) << 3);
|
||||
frame_size += ((insn >> 4) & 0x3ff) << 3;
|
||||
dbg("analyzing func @ %lx, insn=%08x @ "
|
||||
"%lx, frame_size = %ld\n", info->ip,
|
||||
insn, npc, frame_size);
|
||||
|
@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
|
|||
}
|
||||
}
|
||||
|
||||
if (frame_size > e->Total_frame_size << 3)
|
||||
frame_size = e->Total_frame_size << 3;
|
||||
|
||||
if (!unwind_special(info, e->region_start, frame_size)) {
|
||||
info->prev_sp = info->sp - frame_size;
|
||||
if (e->Millicode)
|
||||
|
|
|
@ -717,7 +717,7 @@
|
|||
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
|
||||
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
|
||||
#define SPRN_MMCR1 798
|
||||
#define SPRN_MMCR2 769
|
||||
#define SPRN_MMCR2 785
|
||||
#define SPRN_MMCRA 0x312
|
||||
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
|
||||
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
|
||||
|
@ -754,13 +754,13 @@
|
|||
#define SPRN_PMC6 792
|
||||
#define SPRN_PMC7 793
|
||||
#define SPRN_PMC8 794
|
||||
#define SPRN_SIAR 780
|
||||
#define SPRN_SDAR 781
|
||||
#define SPRN_SIER 784
|
||||
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
|
||||
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
|
||||
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
|
||||
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
|
||||
#define SPRN_SIAR 796
|
||||
#define SPRN_SDAR 797
|
||||
#define SPRN_TACR 888
|
||||
#define SPRN_TCSCR 889
|
||||
#define SPRN_CSIGR 890
|
||||
|
|
|
@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
|
|||
W(0xffff0000), W(0x003e0000), /* POWER6 */
|
||||
W(0xffff0000), W(0x003f0000), /* POWER7 */
|
||||
W(0xffff0000), W(0x004b0000), /* POWER8E */
|
||||
W(0xffff0000), W(0x004c0000), /* POWER8NVL */
|
||||
W(0xffff0000), W(0x004d0000), /* POWER8 */
|
||||
W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
|
||||
W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
|
||||
|
|
|
@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
|
|||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* 'R' and 'C' update notes:
|
||||
* - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
|
||||
* create writeable HPTEs without C set, because the hcall H_PROTECT
|
||||
* that we use in that case will not update C
|
||||
* - The above is however not a problem, because we also don't do that
|
||||
* fancy "no flush" variant of eviction and we use H_REMOVE which will
|
||||
* do the right thing and thus we don't have the race I described earlier
|
||||
*
|
||||
* - Under bare metal, we do have the race, so we need R and C set
|
||||
* - We make sure R is always set and never lost
|
||||
* - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
|
||||
*/
|
||||
unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
||||
{
|
||||
unsigned long rflags = 0;
|
||||
|
@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
|||
rflags |= 0x1;
|
||||
}
|
||||
/*
|
||||
* Always add "C" bit for perf. Memory coherence is always enabled
|
||||
* We can't allow hardware to update hpte bits. Hence always
|
||||
* set 'R' bit and set 'C' if it is a write fault
|
||||
* Memory coherence is always enabled
|
||||
*/
|
||||
rflags |= HPTE_R_C | HPTE_R_M;
|
||||
rflags |= HPTE_R_R | HPTE_R_M;
|
||||
|
||||
if (pteflags & _PAGE_DIRTY)
|
||||
rflags |= HPTE_R_C;
|
||||
/*
|
||||
* Add in WIG bits
|
||||
*/
|
||||
|
|
|
@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
|||
changed = !pmd_same(*(pmdp), entry);
|
||||
if (changed) {
|
||||
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
|
||||
/*
|
||||
* Since we are not supporting SW TLB systems, we don't
|
||||
* have any thing similar to flush_tlb_page_nohash()
|
||||
*/
|
||||
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -296,11 +296,6 @@ found:
|
|||
void __init radix__early_init_mmu(void)
|
||||
{
|
||||
unsigned long lpcr;
|
||||
/*
|
||||
* setup LPCR UPRT based on mmu_features
|
||||
*/
|
||||
lpcr = mfspr(SPRN_LPCR);
|
||||
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/* PAGE_SIZE mappings */
|
||||
|
@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void)
|
|||
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
|
||||
|
||||
radix_init_page_sizes();
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
lpcr = mfspr(SPRN_LPCR);
|
||||
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
|
||||
radix_init_partition_table();
|
||||
}
|
||||
|
||||
radix_init_pgtable();
|
||||
}
|
||||
|
@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void)
|
|||
{
|
||||
unsigned long lpcr;
|
||||
/*
|
||||
* setup LPCR UPRT based on mmu_features
|
||||
* update partition table control register and UPRT
|
||||
*/
|
||||
lpcr = mfspr(SPRN_LPCR);
|
||||
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
|
||||
/*
|
||||
* update partition table control register, 64 K size.
|
||||
*/
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
lpcr = mfspr(SPRN_LPCR);
|
||||
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
|
||||
|
||||
mtspr(SPRN_PTCR,
|
||||
__pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
|
||||
}
|
||||
}
|
||||
|
||||
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
|
|
|
@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
|
|||
static int ibm_slot_error_detail;
|
||||
static int ibm_get_config_addr_info;
|
||||
static int ibm_get_config_addr_info2;
|
||||
static int ibm_configure_bridge;
|
||||
static int ibm_configure_pe;
|
||||
|
||||
/*
|
||||
|
@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
|
|||
ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
|
||||
ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
|
||||
ibm_configure_pe = rtas_token("ibm,configure-pe");
|
||||
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
|
||||
|
||||
/*
|
||||
* ibm,configure-pe and ibm,configure-bridge have the same semantics,
|
||||
* however ibm,configure-pe can be faster. If we can't find
|
||||
* ibm,configure-pe then fall back to using ibm,configure-bridge.
|
||||
*/
|
||||
if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
|
||||
ibm_configure_pe = rtas_token("ibm,configure-bridge");
|
||||
|
||||
/*
|
||||
* Necessary sanity check. We needn't check "get-config-addr-info"
|
||||
|
@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
|
|||
(ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
|
||||
ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
|
||||
ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
|
||||
(ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
|
||||
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
|
||||
ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
|
||||
pr_info("EEH functionality not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
|
|||
{
|
||||
int config_addr;
|
||||
int ret;
|
||||
/* Waiting 0.2s maximum before skipping configuration */
|
||||
int max_wait = 200;
|
||||
|
||||
/* Figure out the PE address */
|
||||
config_addr = pe->config_addr;
|
||||
if (pe->addr)
|
||||
config_addr = pe->addr;
|
||||
|
||||
/* Use new configure-pe function, if supported */
|
||||
if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
|
||||
while (max_wait > 0) {
|
||||
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else {
|
||||
return -EFAULT;
|
||||
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If RTAS returns a delay value that's above 100ms, cut it
|
||||
* down to 100ms in case firmware made a mistake. For more
|
||||
* on how these delay values work see rtas_busy_delay_time
|
||||
*/
|
||||
if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
|
||||
ret <= RTAS_EXTENDED_DELAY_MAX)
|
||||
ret = RTAS_EXTENDED_DELAY_MIN+2;
|
||||
|
||||
max_wait -= rtas_busy_delay_time(ret);
|
||||
|
||||
if (max_wait < 0)
|
||||
break;
|
||||
|
||||
rtas_busy_delay(ret);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
||||
__func__, pe->phb->global_number, pe->addr, ret);
|
||||
|
||||
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
||||
__func__, pe->phb->global_number, pe->addr, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_FHANDLE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_NO_HZ_IDLE=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
|
@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
|
|||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_NUMA_BALANCING=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_MEMCG_SWAP=y
|
||||
CONFIG_MEMCG_KMEM=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CFS_BANDWIDTH=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
|
@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_CFQ_GROUP_IOSCHED=y
|
||||
CONFIG_DEFAULT_DEADLINE=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_Z196=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_NR_CPUS=256
|
||||
CONFIG_NUMA=y
|
||||
|
@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y
|
|||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_ZBUD=m
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_IDLE_PAGE_TRACKING=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_PCI_DEBUG=y
|
||||
CONFIG_HOTPLUG_PCI=y
|
||||
|
@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
|
|||
CONFIG_RAW_DRIVER=m
|
||||
CONFIG_HANGCHECK_TIMER=m
|
||||
CONFIG_TN3270_FS=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_WATCHDOG=y
|
||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
CONFIG_SOFT_WATCHDOG=m
|
||||
|
@ -537,6 +545,8 @@ CONFIG_DLM=m
|
|||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
CONFIG_FRAME_WARN=1024
|
||||
CONFIG_READABLE_ASM=y
|
||||
CONFIG_UNUSED_SYMBOLS=y
|
||||
|
@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y
|
|||
CONFIG_SLUB_STATS=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_VM_VMACACHE=y
|
||||
CONFIG_DEBUG_VM_RB=y
|
||||
CONFIG_DEBUG_VM_PGFLAGS=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_DEBUG_PER_CPU_MAPS=y
|
||||
CONFIG_DEBUG_SHIRQ=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_WQ_WATCHDOG=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_DEBUG_TIMEKEEPING=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_DEBUG_RT_MUTEXES=y
|
||||
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
|
||||
|
@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y
|
|||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_TEST_LIST_SORT=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
|
@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y
|
|||
CONFIG_TEST_KSTRTOX=y
|
||||
CONFIG_DMA_API_DEBUG=y
|
||||
CONFIG_TEST_BPF=m
|
||||
# CONFIG_STRICT_DEVMEM is not set
|
||||
CONFIG_S390_PTDUMP=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_SECURITY=y
|
||||
|
@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m
|
|||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_TEA=m
|
||||
CONFIG_CRYPTO_TWOFISH=m
|
||||
CONFIG_CRYPTO_ZLIB=y
|
||||
CONFIG_CRYPTO_LZO=m
|
||||
CONFIG_CRYPTO_LZ4=m
|
||||
CONFIG_CRYPTO_LZ4HC=m
|
||||
|
@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
|
|||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=m
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
|
||||
CONFIG_X509_CERTIFICATE_PARSER=m
|
||||
CONFIG_CRC7=m
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_FHANDLE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_NO_HZ_IDLE=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
|
@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y
|
|||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_NUMA_BALANCING=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_MEMCG_SWAP=y
|
||||
CONFIG_MEMCG_KMEM=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
|
@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
|
|||
CONFIG_UNIXWARE_DISKLABEL=y
|
||||
CONFIG_CFQ_GROUP_IOSCHED=y
|
||||
CONFIG_DEFAULT_DEADLINE=y
|
||||
CONFIG_MARCH_Z196=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_NR_CPUS=256
|
||||
CONFIG_NUMA=y
|
||||
|
@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
|
|||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZBUD=m
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_IDLE_PAGE_TRACKING=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_HOTPLUG_PCI=y
|
||||
CONFIG_HOTPLUG_PCI_S390=y
|
||||
|
@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m
|
|||
CONFIG_DLM=m
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
CONFIG_FRAME_WARN=1024
|
||||
CONFIG_UNUSED_SYMBOLS=y
|
||||
|
@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y
|
|||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
# CONFIG_KPROBE_EVENT is not set
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_RBTREE_TEST=m
|
||||
CONFIG_INTERVAL_TREE_TEST=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_TEST_BPF=m
|
||||
# CONFIG_STRICT_DEVMEM is not set
|
||||
CONFIG_S390_PTDUMP=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_SECURITY=y
|
||||
|
@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m
|
|||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_TEA=m
|
||||
CONFIG_CRYPTO_TWOFISH=m
|
||||
CONFIG_CRYPTO_ZLIB=y
|
||||
CONFIG_CRYPTO_LZO=m
|
||||
CONFIG_CRYPTO_LZ4=m
|
||||
CONFIG_CRYPTO_LZ4HC=m
|
||||
CONFIG_CRYPTO_USER_API_HASH=m
|
||||
|
@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
|
|||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=m
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
|
||||
CONFIG_X509_CERTIFICATE_PARSER=m
|
||||
CONFIG_CRC7=m
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_FHANDLE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_NO_HZ_IDLE=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
|
@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y
|
|||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_NUMA_BALANCING=y
|
||||
# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_MEMCG_SWAP=y
|
||||
CONFIG_MEMCG_KMEM=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
|
@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_CFQ_GROUP_IOSCHED=y
|
||||
CONFIG_DEFAULT_DEADLINE=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_Z196=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_NR_CPUS=512
|
||||
CONFIG_NUMA=y
|
||||
|
@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
|
|||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZBUD=m
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_IDLE_PAGE_TRACKING=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_HOTPLUG_PCI=y
|
||||
CONFIG_HOTPLUG_PCI_S390=y
|
||||
|
@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
|
|||
CONFIG_RAW_DRIVER=m
|
||||
CONFIG_HANGCHECK_TIMER=m
|
||||
CONFIG_TN3270_FS=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_WATCHDOG=y
|
||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
CONFIG_SOFT_WATCHDOG=m
|
||||
|
@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m
|
|||
CONFIG_DLM=m
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
CONFIG_FRAME_WARN=1024
|
||||
CONFIG_UNUSED_SYMBOLS=y
|
||||
|
@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y
|
|||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_TEST_BPF=m
|
||||
# CONFIG_STRICT_DEVMEM is not set
|
||||
CONFIG_S390_PTDUMP=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_SECURITY=y
|
||||
|
@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m
|
|||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_TEA=m
|
||||
CONFIG_CRYPTO_TWOFISH=m
|
||||
CONFIG_CRYPTO_ZLIB=y
|
||||
CONFIG_CRYPTO_LZO=m
|
||||
CONFIG_CRYPTO_LZ4=m
|
||||
CONFIG_CRYPTO_LZ4HC=m
|
||||
CONFIG_CRYPTO_USER_API_HASH=m
|
||||
|
@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
|
|||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=m
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
|
||||
CONFIG_X509_CERTIFICATE_PARSER=m
|
||||
CONFIG_CRC7=m
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CONFIG_SWAP is not set
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_NO_HZ_IDLE=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
|
@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
|||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_IBM_PARTITION=y
|
||||
CONFIG_DEFAULT_DEADLINE=y
|
||||
CONFIG_MARCH_Z196=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
# CONFIG_COMPAT is not set
|
||||
CONFIG_NR_CPUS=2
|
||||
|
@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y
|
|||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_STRICT_DEVMEM is not set
|
||||
# CONFIG_PFAULT is not set
|
||||
# CONFIG_S390_HYPFS_FS is not set
|
||||
# CONFIG_VIRTUALIZATION is not set
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_FHANDLE=y
|
||||
CONFIG_USELIB=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_NO_HZ_IDLE=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_TASKSTATS=y
|
||||
CONFIG_TASK_DELAY_ACCT=y
|
||||
|
@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
|
|||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_MEMCG_SWAP=y
|
||||
CONFIG_MEMCG_KMEM=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
|
@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y
|
|||
CONFIG_IBM_PARTITION=y
|
||||
CONFIG_DEFAULT_DEADLINE=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_Z196=y
|
||||
CONFIG_NR_CPUS=256
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
|
@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y
|
|||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZBUD=m
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_IDLE_PAGE_TRACKING=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_HIBERNATION=y
|
||||
|
@ -61,7 +68,6 @@ CONFIG_UNIX=y
|
|||
CONFIG_NET_KEY=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_L2TP=m
|
||||
CONFIG_L2TP_DEBUGFS=m
|
||||
CONFIG_VLAN_8021Q=y
|
||||
|
@ -144,6 +150,9 @@ CONFIG_TMPFS=y
|
|||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
CONFIG_UNUSED_SYMBOLS=y
|
||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
||||
|
@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y
|
|||
CONFIG_DEBUG_LOCKDEP=y
|
||||
CONFIG_DEBUG_ATOMIC_SLEEP=y
|
||||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_DEBUG_PI_LIST=y
|
||||
CONFIG_DEBUG_SG=y
|
||||
CONFIG_DEBUG_NOTIFIERS=y
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_RCU_TRACE=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
|
||||
CONFIG_TRACER_SNAPSHOT=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
# CONFIG_STRICT_DEVMEM is not set
|
||||
CONFIG_S390_PTDUMP=y
|
||||
CONFIG_CRYPTO_CRYPTD=m
|
||||
CONFIG_CRYPTO_AUTHENC=m
|
||||
|
@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m
|
|||
CONFIG_CRYPTO_TEA=m
|
||||
CONFIG_CRYPTO_TWOFISH=m
|
||||
CONFIG_CRYPTO_DEFLATE=m
|
||||
CONFIG_CRYPTO_ZLIB=m
|
||||
CONFIG_CRYPTO_LZO=m
|
||||
CONFIG_CRYPTO_LZ4=m
|
||||
CONFIG_CRYPTO_LZ4HC=m
|
||||
CONFIG_CRYPTO_ANSI_CPRNG=m
|
||||
|
|
|
@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
|
|||
|
||||
report_user_fault(regs, SIGSEGV, 1);
|
||||
si.si_signo = SIGSEGV;
|
||||
si.si_errno = 0;
|
||||
si.si_code = si_code;
|
||||
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
|
||||
force_sig_info(SIGSEGV, &si, current);
|
||||
|
|
|
@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
|
|||
* | | |
|
||||
* +---------------+ |
|
||||
* | 8 byte skbp | |
|
||||
* R15+170 -> +---------------+ |
|
||||
* R15+176 -> +---------------+ |
|
||||
* | 8 byte hlen | |
|
||||
* R15+168 -> +---------------+ |
|
||||
* | 4 byte align | |
|
||||
|
@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
|
|||
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
|
||||
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
|
||||
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
|
||||
#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
|
||||
#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
|
||||
|
||||
#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
|
||||
#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
|
||||
|
|
|
@ -45,7 +45,7 @@ struct bpf_jit {
|
|||
int labels[1]; /* Labels for local jumps */
|
||||
};
|
||||
|
||||
#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
|
||||
#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
|
||||
|
||||
#define SEEN_SKB 1 /* skb access */
|
||||
#define SEEN_MEM 2 /* use mem[] for temporary storage */
|
||||
|
@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
|
|||
emit_load_skb_data_hlen(jit);
|
||||
if (jit->seen & SEEN_SKB_CHANGE)
|
||||
/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
|
||||
STK_OFF_SKBP);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
|
||||
#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
|
||||
|
||||
#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
|
||||
#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
|
||||
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
|
||||
|
||||
#define __CHEETAH_ID 0x003e0014
|
||||
#define __JALAPENO_ID 0x003e0016
|
||||
#define __SERRANO_ID 0x003e0022
|
||||
|
|
|
@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
|
|||
restored; \
|
||||
nop; nop; nop; nop; nop; nop; \
|
||||
nop; nop; nop; nop; nop; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup;
|
||||
|
||||
|
||||
|
@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
|
|||
restored; \
|
||||
nop; nop; nop; nop; nop; \
|
||||
nop; nop; nop; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup;
|
||||
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
|
|||
CFLAGS_REMOVE_pcr.o := -pg
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_SPARC64) += urtt_fill.o
|
||||
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
|
||||
obj-$(CONFIG_SPARC32) += etrap_32.o
|
||||
obj-$(CONFIG_SPARC32) += rtrap_32.o
|
||||
|
|
|
@ -14,10 +14,6 @@
|
|||
#include <asm/visasm.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
|
||||
#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
|
||||
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
|
||||
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
# define SCHEDULE_USER schedule_user
|
||||
#else
|
||||
|
@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
|
|||
wrpr %g1, %cwp
|
||||
ba,a,pt %xcc, user_rtt_fill_64bit
|
||||
|
||||
user_rtt_fill_fixup_dax:
|
||||
ba,pt %xcc, user_rtt_fill_fixup_common
|
||||
mov 1, %g3
|
||||
|
||||
user_rtt_fill_fixup_mna:
|
||||
ba,pt %xcc, user_rtt_fill_fixup_common
|
||||
mov 2, %g3
|
||||
|
||||
user_rtt_fill_fixup:
|
||||
rdpr %cwp, %g1
|
||||
add %g1, 1, %g1
|
||||
wrpr %g1, 0x0, %cwp
|
||||
|
||||
rdpr %wstate, %g2
|
||||
sll %g2, 3, %g2
|
||||
wrpr %g2, 0x0, %wstate
|
||||
|
||||
/* We know %canrestore and %otherwin are both zero. */
|
||||
|
||||
sethi %hi(sparc64_kern_pri_context), %g2
|
||||
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
|
||||
mov PRIMARY_CONTEXT, %g1
|
||||
|
||||
661: stxa %g2, [%g1] ASI_DMMU
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
stxa %g2, [%g1] ASI_MMU
|
||||
.previous
|
||||
|
||||
sethi %hi(KERNBASE), %g1
|
||||
flush %g1
|
||||
|
||||
or %g4, FAULT_CODE_WINFIXUP, %g4
|
||||
stb %g4, [%g6 + TI_FAULT_CODE]
|
||||
stx %g5, [%g6 + TI_FAULT_ADDR]
|
||||
|
||||
mov %g6, %l1
|
||||
wrpr %g0, 0x0, %tl
|
||||
|
||||
661: nop
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
SET_GL(0)
|
||||
.previous
|
||||
|
||||
wrpr %g0, RTRAP_PSTATE, %pstate
|
||||
|
||||
mov %l1, %g6
|
||||
ldx [%g6 + TI_TASK], %g4
|
||||
LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
|
||||
call do_sparc64_fault
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,pt %xcc, user_rtt_fill_fixup_common
|
||||
clr %g3
|
||||
|
||||
user_rtt_pre_restore:
|
||||
add %g1, 1, %g1
|
||||
|
|
|
@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid. We always build signal frames which are
|
||||
* 16-byte aligned, therefore we can always enforce that the restore
|
||||
* frame has that property as well.
|
||||
*/
|
||||
static bool invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 15) ||
|
||||
((unsigned long)fp) > 0x100000000ULL - fplen)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void do_sigreturn32(struct pt_regs *regs)
|
||||
{
|
||||
struct signal_frame32 __user *sf;
|
||||
compat_uptr_t fpu_save;
|
||||
compat_uptr_t rwin_save;
|
||||
unsigned int psr;
|
||||
unsigned int psr, ufp;
|
||||
unsigned int pc, npc;
|
||||
sigset_t set;
|
||||
compat_sigset_t seta;
|
||||
|
@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
|
|||
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
|
||||
(((unsigned long) sf) & 3))
|
||||
if (invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv;
|
||||
|
||||
if (get_user(pc, &sf->info.si_regs.pc) ||
|
||||
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv;
|
||||
|
||||
if (__get_user(pc, &sf->info.si_regs.pc) ||
|
||||
__get_user(npc, &sf->info.si_regs.npc))
|
||||
goto segv;
|
||||
|
||||
|
@ -227,7 +244,7 @@ segv:
|
|||
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
|
||||
{
|
||||
struct rt_signal_frame32 __user *sf;
|
||||
unsigned int psr, pc, npc;
|
||||
unsigned int psr, pc, npc, ufp;
|
||||
compat_uptr_t fpu_save;
|
||||
compat_uptr_t rwin_save;
|
||||
sigset_t set;
|
||||
|
@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
|
|||
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
|
||||
(((unsigned long) sf) & 3))
|
||||
if (invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv;
|
||||
|
||||
if (get_user(pc, &sf->regs.pc) ||
|
||||
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv;
|
||||
|
||||
if (__get_user(pc, &sf->regs.pc) ||
|
||||
__get_user(npc, &sf->regs.npc))
|
||||
goto segv;
|
||||
|
||||
|
@ -307,14 +329,6 @@ segv:
|
|||
force_sig(SIGSEGV, current);
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid */
|
||||
static int invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
|
|
@ -60,10 +60,22 @@ struct rt_signal_frame {
|
|||
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
|
||||
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
|
||||
|
||||
/* Checks if the fp is valid. We always build signal frames which are
|
||||
* 16-byte aligned, therefore we can always enforce that the restore
|
||||
* frame has that property as well.
|
||||
*/
|
||||
static inline bool invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
asmlinkage void do_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long up_psr, pc, npc, ufp;
|
||||
struct signal_frame __user *sf;
|
||||
unsigned long up_psr, pc, npc;
|
||||
sigset_t set;
|
||||
__siginfo_fpu_t __user *fpu_save;
|
||||
__siginfo_rwin_t __user *rwin_save;
|
||||
|
@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
|
|||
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
|
||||
if (!invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv_and_exit;
|
||||
|
||||
if (((unsigned long) sf) & 3)
|
||||
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
|
||||
goto segv_and_exit;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv_and_exit;
|
||||
|
||||
err = __get_user(pc, &sf->info.si_regs.pc);
|
||||
|
@ -127,7 +142,7 @@ segv_and_exit:
|
|||
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
struct rt_signal_frame __user *sf;
|
||||
unsigned int psr, pc, npc;
|
||||
unsigned int psr, pc, npc, ufp;
|
||||
__siginfo_fpu_t __user *fpu_save;
|
||||
__siginfo_rwin_t __user *rwin_save;
|
||||
sigset_t set;
|
||||
|
@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
|
|||
|
||||
synchronize_user_stack();
|
||||
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
|
||||
(((unsigned long) sf) & 0x03))
|
||||
if (!invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv;
|
||||
|
||||
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv;
|
||||
|
||||
err = __get_user(pc, &sf->regs.pc);
|
||||
|
@ -178,15 +198,6 @@ segv:
|
|||
force_sig(SIGSEGV, current);
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid */
|
||||
static inline int invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
|
||||
{
|
||||
unsigned long sp = regs->u_regs[UREG_FP];
|
||||
|
|
|
@ -234,6 +234,17 @@ do_sigsegv:
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid. We always build rt signal frames which
|
||||
* are 16-byte aligned, therefore we can always enforce that the
|
||||
* restore frame has that property as well.
|
||||
*/
|
||||
static bool invalid_frame_pointer(void __user *fp)
|
||||
{
|
||||
if (((unsigned long) fp) & 15)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
struct rt_signal_frame {
|
||||
struct sparc_stackf ss;
|
||||
siginfo_t info;
|
||||
|
@ -246,8 +257,8 @@ struct rt_signal_frame {
|
|||
|
||||
void do_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tpc, tnpc, tstate, ufp;
|
||||
struct rt_signal_frame __user *sf;
|
||||
unsigned long tpc, tnpc, tstate;
|
||||
__siginfo_fpu_t __user *fpu_save;
|
||||
__siginfo_rwin_t __user *rwin_save;
|
||||
sigset_t set;
|
||||
|
@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
|
|||
(regs->u_regs [UREG_FP] + STACK_BIAS);
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (((unsigned long) sf) & 3)
|
||||
if (invalid_frame_pointer(sf))
|
||||
goto segv;
|
||||
|
||||
err = get_user(tpc, &sf->regs.tpc);
|
||||
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if ((ufp + STACK_BIAS) & 0x7)
|
||||
goto segv;
|
||||
|
||||
err = __get_user(tpc, &sf->regs.tpc);
|
||||
err |= __get_user(tnpc, &sf->regs.tnpc);
|
||||
if (test_thread_flag(TIF_32BIT)) {
|
||||
tpc &= 0xffffffff;
|
||||
|
@ -308,14 +325,6 @@ segv:
|
|||
force_sig(SIGSEGV, current);
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid */
|
||||
static int invalid_frame_pointer(void __user *fp)
|
||||
{
|
||||
if (((unsigned long) fp) & 15)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
|
||||
{
|
||||
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
|
||||
|
|
|
@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
|
|||
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (((unsigned long) fpu) & 3)
|
||||
return -EFAULT;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (test_tsk_thread_flag(current, TIF_USEDFPU))
|
||||
regs->psr &= ~PSR_EF;
|
||||
|
@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
|
|||
struct thread_info *t = current_thread_info();
|
||||
int i, wsaved, err;
|
||||
|
||||
__get_user(wsaved, &rp->wsaved);
|
||||
if (((unsigned long) rp) & 3)
|
||||
return -EFAULT;
|
||||
|
||||
get_user(wsaved, &rp->wsaved);
|
||||
if (wsaved > NSWINS)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
|
|||
unsigned long fprs;
|
||||
int err;
|
||||
|
||||
err = __get_user(fprs, &fpu->si_fprs);
|
||||
if (((unsigned long) fpu) & 7)
|
||||
return -EFAULT;
|
||||
|
||||
err = get_user(fprs, &fpu->si_fprs);
|
||||
fprs_write(0);
|
||||
regs->tstate &= ~TSTATE_PEF;
|
||||
if (fprs & FPRS_DL)
|
||||
|
@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
|
|||
struct thread_info *t = current_thread_info();
|
||||
int i, wsaved, err;
|
||||
|
||||
__get_user(wsaved, &rp->wsaved);
|
||||
if (((unsigned long) rp) & 7)
|
||||
return -EFAULT;
|
||||
|
||||
get_user(wsaved, &rp->wsaved);
|
||||
if (wsaved > NSWINS)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/trap_block.h>
|
||||
#include <asm/spitfire.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/head.h>
|
||||
|
||||
.text
|
||||
.align 8
|
||||
.globl user_rtt_fill_fixup_common
|
||||
user_rtt_fill_fixup_common:
|
||||
rdpr %cwp, %g1
|
||||
add %g1, 1, %g1
|
||||
wrpr %g1, 0x0, %cwp
|
||||
|
||||
rdpr %wstate, %g2
|
||||
sll %g2, 3, %g2
|
||||
wrpr %g2, 0x0, %wstate
|
||||
|
||||
/* We know %canrestore and %otherwin are both zero. */
|
||||
|
||||
sethi %hi(sparc64_kern_pri_context), %g2
|
||||
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
|
||||
mov PRIMARY_CONTEXT, %g1
|
||||
|
||||
661: stxa %g2, [%g1] ASI_DMMU
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
stxa %g2, [%g1] ASI_MMU
|
||||
.previous
|
||||
|
||||
sethi %hi(KERNBASE), %g1
|
||||
flush %g1
|
||||
|
||||
mov %g4, %l4
|
||||
mov %g5, %l5
|
||||
brnz,pn %g3, 1f
|
||||
mov %g3, %l3
|
||||
|
||||
or %g4, FAULT_CODE_WINFIXUP, %g4
|
||||
stb %g4, [%g6 + TI_FAULT_CODE]
|
||||
stx %g5, [%g6 + TI_FAULT_ADDR]
|
||||
1:
|
||||
mov %g6, %l1
|
||||
wrpr %g0, 0x0, %tl
|
||||
|
||||
661: nop
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
SET_GL(0)
|
||||
.previous
|
||||
|
||||
wrpr %g0, RTRAP_PSTATE, %pstate
|
||||
|
||||
mov %l1, %g6
|
||||
ldx [%g6 + TI_TASK], %g4
|
||||
LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
|
||||
|
||||
brnz,pn %l3, 1f
|
||||
nop
|
||||
|
||||
call do_sparc64_fault
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
|
||||
1: cmp %g3, 2
|
||||
bne,pn %xcc, 2f
|
||||
nop
|
||||
|
||||
sethi %hi(tlb_type), %g1
|
||||
lduw [%g1 + %lo(tlb_type)], %g1
|
||||
cmp %g1, 3
|
||||
bne,pt %icc, 1f
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
mov %l4, %o2
|
||||
call sun4v_do_mna
|
||||
mov %l5, %o1
|
||||
ba,a,pt %xcc, rtrap
|
||||
1: mov %l4, %o1
|
||||
mov %l5, %o2
|
||||
call mem_address_unaligned
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
|
||||
2: sethi %hi(tlb_type), %g1
|
||||
mov %l4, %o1
|
||||
lduw [%g1 + %lo(tlb_type)], %g1
|
||||
mov %l5, %o2
|
||||
cmp %g1, 3
|
||||
bne,pt %icc, 1f
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
call sun4v_data_access_exception
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
|
||||
1: call spitfire_data_access_exception
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
|
@ -2824,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs)
|
|||
* the Data-TLB for huge pages.
|
||||
*/
|
||||
if (tlb_type == cheetah_plus) {
|
||||
bool need_context_reload = false;
|
||||
unsigned long ctx;
|
||||
|
||||
spin_lock(&ctx_alloc_lock);
|
||||
spin_lock_irq(&ctx_alloc_lock);
|
||||
ctx = mm->context.sparc64_ctx_val;
|
||||
ctx &= ~CTX_PGSZ_MASK;
|
||||
ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
|
||||
|
@ -2845,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs)
|
|||
* also executing in this address space.
|
||||
*/
|
||||
mm->context.sparc64_ctx_val = ctx;
|
||||
on_each_cpu(context_reload, mm, 0);
|
||||
need_context_reload = true;
|
||||
}
|
||||
spin_unlock(&ctx_alloc_lock);
|
||||
spin_unlock_irq(&ctx_alloc_lock);
|
||||
|
||||
if (need_context_reload)
|
||||
on_each_cpu(context_reload, mm, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|||
struct kvm_cpuid_entry __user *entries)
|
||||
{
|
||||
int r, i;
|
||||
struct kvm_cpuid_entry *cpuid_entries;
|
||||
struct kvm_cpuid_entry *cpuid_entries = NULL;
|
||||
|
||||
r = -E2BIG;
|
||||
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
|
||||
goto out;
|
||||
r = -ENOMEM;
|
||||
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
|
||||
if (!cpuid_entries)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(cpuid_entries, entries,
|
||||
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
|
||||
goto out_free;
|
||||
if (cpuid->nent) {
|
||||
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
|
||||
cpuid->nent);
|
||||
if (!cpuid_entries)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(cpuid_entries, entries,
|
||||
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
|
||||
goto out;
|
||||
}
|
||||
for (i = 0; i < cpuid->nent; i++) {
|
||||
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
|
||||
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
|
||||
|
@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|||
kvm_x86_ops->cpuid_update(vcpu);
|
||||
r = kvm_update_cpuid(vcpu);
|
||||
|
||||
out_free:
|
||||
vfree(cpuid_entries);
|
||||
out:
|
||||
vfree(cpuid_entries);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
|
|||
#ifdef CONFIG_X86_64
|
||||
static void __set_spte(u64 *sptep, u64 spte)
|
||||
{
|
||||
*sptep = spte;
|
||||
WRITE_ONCE(*sptep, spte);
|
||||
}
|
||||
|
||||
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
|
||||
{
|
||||
*sptep = spte;
|
||||
WRITE_ONCE(*sptep, spte);
|
||||
}
|
||||
|
||||
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
|
||||
|
@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
|
|||
*/
|
||||
smp_wmb();
|
||||
|
||||
ssptep->spte_low = sspte.spte_low;
|
||||
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
|
||||
}
|
||||
|
||||
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
|
||||
|
@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
|
|||
ssptep = (union split_spte *)sptep;
|
||||
sspte = (union split_spte)spte;
|
||||
|
||||
ssptep->spte_low = sspte.spte_low;
|
||||
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
|
||||
|
||||
/*
|
||||
* If we map the spte from present to nonpresent, we should clear
|
||||
|
|
|
@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
case MSR_AMD64_NB_CFG:
|
||||
case MSR_FAM10H_MMIO_CONF_BASE:
|
||||
case MSR_AMD64_BU_CFG2:
|
||||
case MSR_IA32_PERF_CTL:
|
||||
msr_info->data = 0;
|
||||
break;
|
||||
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
|
||||
|
@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|||
| KVM_VCPUEVENT_VALID_SMM))
|
||||
return -EINVAL;
|
||||
|
||||
if (events->exception.injected &&
|
||||
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
|
||||
return -EINVAL;
|
||||
|
||||
process_nmi(vcpu);
|
||||
vcpu->arch.exception.pending = events->exception.injected;
|
||||
vcpu->arch.exception.nr = events->exception.nr;
|
||||
|
@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
|||
if (dbgregs->flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (dbgregs->dr6 & ~0xffffffffull)
|
||||
return -EINVAL;
|
||||
if (dbgregs->dr7 & ~0xffffffffull)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
|
||||
kvm_update_dr0123(vcpu);
|
||||
vcpu->arch.dr6 = dbgregs->dr6;
|
||||
|
@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
|
|||
|
||||
slot = id_to_memslot(slots, id);
|
||||
if (size) {
|
||||
if (WARN_ON(slot->npages))
|
||||
if (slot->npages)
|
||||
return -EEXIST;
|
||||
|
||||
/*
|
||||
|
|
|
@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
|
|||
tristate "Asymmetric public-key crypto algorithm subtype"
|
||||
select MPILIB
|
||||
select CRYPTO_HASH_INFO
|
||||
select CRYPTO_AKCIPHER
|
||||
help
|
||||
This option provides support for asymmetric public key type handling.
|
||||
If signature generation and/or verification are to be used,
|
||||
|
|
|
@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
|||
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
|
||||
|
||||
pr->pblk = object.processor.pblk_address;
|
||||
|
||||
/*
|
||||
* We don't care about error returns - we just try to mark
|
||||
* these reserved so that nobody else is confused into thinking
|
||||
* that this region might be unused..
|
||||
*
|
||||
* (In particular, allocating the IO range for Cardbus)
|
||||
*/
|
||||
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
|
|||
}
|
||||
|
||||
int acpi_video_get_levels(struct acpi_device *device,
|
||||
struct acpi_video_device_brightness **dev_br)
|
||||
struct acpi_video_device_brightness **dev_br,
|
||||
int *pmax_level)
|
||||
{
|
||||
union acpi_object *obj = NULL;
|
||||
int i, max_level = 0, count = 0, level_ac_battery = 0;
|
||||
|
@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device,
|
|||
|
||||
br->count = count;
|
||||
*dev_br = br;
|
||||
if (pmax_level)
|
||||
*pmax_level = max_level;
|
||||
|
||||
out:
|
||||
kfree(obj);
|
||||
|
@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
|
|||
struct acpi_video_device_brightness *br = NULL;
|
||||
int result = -EINVAL;
|
||||
|
||||
result = acpi_video_get_levels(device->dev, &br);
|
||||
result = acpi_video_get_levels(device->dev, &br, &max_level);
|
||||
if (result)
|
||||
return result;
|
||||
device->brightness = br;
|
||||
|
@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
|
|||
|
||||
mutex_lock(&video->device_list_lock);
|
||||
list_for_each_entry(dev, &video->video_device_list, entry) {
|
||||
if (!acpi_video_device_lcd_query_levels(dev, &levels))
|
||||
if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
|
||||
kfree(levels);
|
||||
}
|
||||
mutex_unlock(&video->device_list_lock);
|
||||
|
|
|
@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value,
|
|||
static u8
|
||||
acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
|
||||
{
|
||||
u64 address;
|
||||
|
||||
if (!reg->access_width) {
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
max_bit_width = 32;
|
||||
}
|
||||
|
||||
/*
|
||||
* Detect old register descriptors where only the bit_width field
|
||||
* makes senses. The target address is copied to handle possible
|
||||
* alignment issues.
|
||||
* makes senses.
|
||||
*/
|
||||
ACPI_MOVE_64_TO_64(&address, ®->address);
|
||||
if (!reg->bit_offset && reg->bit_width &&
|
||||
if (reg->bit_width < max_bit_width &&
|
||||
!reg->bit_offset && reg->bit_width &&
|
||||
ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
|
||||
ACPI_IS_ALIGNED(reg->bit_width, 8) &&
|
||||
ACPI_IS_ALIGNED(address, reg->bit_width)) {
|
||||
ACPI_IS_ALIGNED(reg->bit_width, 8)) {
|
||||
return (reg->bit_width);
|
||||
} else {
|
||||
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
return (32);
|
||||
} else {
|
||||
return (max_bit_width);
|
||||
}
|
||||
}
|
||||
return (max_bit_width);
|
||||
} else {
|
||||
return (1 << (reg->access_width + 2));
|
||||
}
|
||||
|
|
|
@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
|
|||
if (!pr->flags.throttling)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* We don't care about error returns - we just try to mark
|
||||
* these reserved so that nobody else is confused into thinking
|
||||
* that this region might be unused..
|
||||
*
|
||||
* (In particular, allocating the IO range for Cardbus)
|
||||
*/
|
||||
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
|
||||
|
||||
pr->throttling.state = 0;
|
||||
|
||||
duty_mask = pr->throttling.state_count - 1;
|
||||
|
|
|
@ -181,13 +181,17 @@ static char *res_strings[] = {
|
|||
"reserved 27",
|
||||
"reserved 28",
|
||||
"reserved 29",
|
||||
"reserved 30",
|
||||
"reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
|
||||
"reassembly abort: no buffers",
|
||||
"receive buffer overflow",
|
||||
"change in GFC",
|
||||
"receive buffer full",
|
||||
"low priority discard - no receive descriptor",
|
||||
"low priority discard - missing end of packet",
|
||||
"reserved 37",
|
||||
"reserved 38",
|
||||
"reserved 39",
|
||||
"reseverd 40",
|
||||
"reserved 41",
|
||||
"reserved 42",
|
||||
"reserved 43",
|
||||
|
|
|
@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
|
|||
/* make the ptr point to the corresponding buffer desc entry */
|
||||
buf_desc_ptr += desc;
|
||||
if (!desc || (desc > iadev->num_rx_desc) ||
|
||||
((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
|
||||
((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
|
||||
free_desc(dev, desc);
|
||||
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
|
||||
return -1;
|
||||
|
|
|
@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
|
|||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
clamp_val(target_freq, policy->min, policy->max);
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
|
||||
return cpufreq_driver->fast_switch(policy, target_freq);
|
||||
}
|
||||
|
|
|
@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
|
|||
cpu->acpi_perf_data.states[0].core_frequency =
|
||||
policy->cpuinfo.max_freq / 1000;
|
||||
cpu->valid_pss_table = true;
|
||||
pr_info("_PPC limits will be enforced\n");
|
||||
pr_debug("_PPC limits will be enforced\n");
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
|||
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
unsigned int unit;
|
||||
u32 unit_size;
|
||||
int ret;
|
||||
|
||||
if (!ctx->u.aes.key_len)
|
||||
|
@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
|||
if (!req->info)
|
||||
return -EINVAL;
|
||||
|
||||
for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
|
||||
if (!(req->nbytes & (unit_size_map[unit].size - 1)))
|
||||
break;
|
||||
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
|
||||
if (req->nbytes <= unit_size_map[0].size) {
|
||||
for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
|
||||
if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
|
||||
unit_size = unit_size_map[unit].value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
|
||||
if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
|
||||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
|
||||
/* Use the fallback to process the request for any
|
||||
* unsupported unit sizes or key sizes
|
||||
|
@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
|||
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
|
||||
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
|
||||
: CCP_AES_ACTION_DECRYPT;
|
||||
rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
|
||||
rctx->cmd.u.xts.unit_size = unit_size;
|
||||
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
|
||||
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
|
||||
rctx->cmd.u.xts.iv = &rctx->iv_sg;
|
||||
|
|
|
@ -1986,7 +1986,7 @@ err_algs:
|
|||
&dd->pdata->algs_info[i].algs_list[j]);
|
||||
err_pm:
|
||||
pm_runtime_disable(dev);
|
||||
if (dd->polling_mode)
|
||||
if (!dd->polling_mode)
|
||||
dma_release_channel(dd->dma_lch);
|
||||
data_err:
|
||||
dev_err(dev, "initialization failed.\n");
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/reservation.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <uapi/linux/dma-buf.h>
|
||||
|
||||
|
@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
|
|||
dmabuf = file->private_data;
|
||||
|
||||
/* check for overflowing the buffer's size */
|
||||
if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
|
||||
if (vma->vm_pgoff + vma_pages(vma) >
|
||||
dmabuf->size >> PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
|
|||
return -EINVAL;
|
||||
|
||||
/* check for offset overflow */
|
||||
if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
|
||||
if (pgoff + vma_pages(vma) < pgoff)
|
||||
return -EOVERFLOW;
|
||||
|
||||
/* check for overflowing the buffer's size */
|
||||
if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
|
||||
if (pgoff + vma_pages(vma) >
|
||||
dmabuf->size >> PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -35,6 +35,17 @@
|
|||
#include <linux/reservation.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/**
|
||||
* DOC: Reservation Object Overview
|
||||
*
|
||||
* The reservation object provides a mechanism to manage shared and
|
||||
* exclusive fences associated with a buffer. A reservation object
|
||||
* can have attached one exclusive fence (normally associated with
|
||||
* write operations) or N shared fences (read operations). The RCU
|
||||
* mechanism is used to protect read access to fences from locked
|
||||
* write-side updates.
|
||||
*/
|
||||
|
||||
DEFINE_WW_CLASS(reservation_ww_class);
|
||||
EXPORT_SYMBOL(reservation_ww_class);
|
||||
|
||||
|
@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
|
|||
|
||||
const char reservation_seqcount_string[] = "reservation_seqcount";
|
||||
EXPORT_SYMBOL(reservation_seqcount_string);
|
||||
/*
|
||||
* Reserve space to add a shared fence to a reservation_object,
|
||||
* must be called with obj->lock held.
|
||||
|
||||
/**
|
||||
* reservation_object_reserve_shared - Reserve space to add a shared
|
||||
* fence to a reservation_object.
|
||||
* @obj: reservation object
|
||||
*
|
||||
* Should be called before reservation_object_add_shared_fence(). Must
|
||||
* be called with obj->lock held.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success, or -errno
|
||||
*/
|
||||
int reservation_object_reserve_shared(struct reservation_object *obj)
|
||||
{
|
||||
|
@ -180,7 +199,11 @@ done:
|
|||
fence_put(old_fence);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* reservation_object_add_shared_fence - Add a fence to a shared slot
|
||||
* @obj: the reservation object
|
||||
* @fence: the shared fence to add
|
||||
*
|
||||
* Add a fence to a shared slot, obj->lock must be held, and
|
||||
* reservation_object_reserve_shared_fence has been called.
|
||||
*/
|
||||
|
@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
|
|||
}
|
||||
EXPORT_SYMBOL(reservation_object_add_shared_fence);
|
||||
|
||||
/**
|
||||
* reservation_object_add_excl_fence - Add an exclusive fence.
|
||||
* @obj: the reservation object
|
||||
* @fence: the shared fence to add
|
||||
*
|
||||
* Add a fence to the exclusive slot. The obj->lock must be held.
|
||||
*/
|
||||
void reservation_object_add_excl_fence(struct reservation_object *obj,
|
||||
struct fence *fence)
|
||||
{
|
||||
|
@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
|
|||
}
|
||||
EXPORT_SYMBOL(reservation_object_add_excl_fence);
|
||||
|
||||
/**
|
||||
* reservation_object_get_fences_rcu - Get an object's shared and exclusive
|
||||
* fences without update side lock held
|
||||
* @obj: the reservation object
|
||||
* @pfence_excl: the returned exclusive fence (or NULL)
|
||||
* @pshared_count: the number of shared fences returned
|
||||
* @pshared: the array of shared fence ptrs returned (array is krealloc'd to
|
||||
* the required size, and must be freed by caller)
|
||||
*
|
||||
* RETURNS
|
||||
* Zero or -errno
|
||||
*/
|
||||
int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
||||
struct fence **pfence_excl,
|
||||
unsigned *pshared_count,
|
||||
|
@ -319,6 +361,18 @@ unlock:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
|
||||
|
||||
/**
|
||||
* reservation_object_wait_timeout_rcu - Wait on reservation's objects
|
||||
* shared and/or exclusive fences.
|
||||
* @obj: the reservation object
|
||||
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
|
||||
* @intr: if true, do interruptible wait
|
||||
* @timeout: timeout value in jiffies or zero to return immediately
|
||||
*
|
||||
* RETURNS
|
||||
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
|
||||
* greater than zer on success.
|
||||
*/
|
||||
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
|
||||
bool wait_all, bool intr,
|
||||
unsigned long timeout)
|
||||
|
@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_test_signaled_rcu - Test if a reservation object's
|
||||
* fences have been signaled.
|
||||
* @obj: the reservation object
|
||||
* @test_all: if true, test all fences, otherwise only test the exclusive
|
||||
* fence
|
||||
*
|
||||
* RETURNS
|
||||
* true if all fences signaled, else false
|
||||
*/
|
||||
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
|
||||
bool test_all)
|
||||
{
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/platform.h>
|
||||
#include <mach/irqs.h>
|
||||
|
||||
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
|
||||
#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
|
||||
|
@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
|
|||
|
||||
static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
return IRQ_LPC32XX_P0_P1_IRQ;
|
||||
}
|
||||
|
||||
static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
|
||||
IRQ_LPC32XX_GPIO_00,
|
||||
IRQ_LPC32XX_GPIO_01,
|
||||
IRQ_LPC32XX_GPIO_02,
|
||||
IRQ_LPC32XX_GPIO_03,
|
||||
IRQ_LPC32XX_GPIO_04,
|
||||
IRQ_LPC32XX_GPIO_05,
|
||||
};
|
||||
|
||||
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
|
||||
return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
|
||||
IRQ_LPC32XX_GPI_00,
|
||||
IRQ_LPC32XX_GPI_01,
|
||||
IRQ_LPC32XX_GPI_02,
|
||||
IRQ_LPC32XX_GPI_03,
|
||||
IRQ_LPC32XX_GPI_04,
|
||||
IRQ_LPC32XX_GPI_05,
|
||||
IRQ_LPC32XX_GPI_06,
|
||||
IRQ_LPC32XX_GPI_07,
|
||||
IRQ_LPC32XX_GPI_08,
|
||||
IRQ_LPC32XX_GPI_09,
|
||||
-ENXIO, /* 10 */
|
||||
-ENXIO, /* 11 */
|
||||
-ENXIO, /* 12 */
|
||||
-ENXIO, /* 13 */
|
||||
-ENXIO, /* 14 */
|
||||
-ENXIO, /* 15 */
|
||||
-ENXIO, /* 16 */
|
||||
-ENXIO, /* 17 */
|
||||
-ENXIO, /* 18 */
|
||||
IRQ_LPC32XX_GPI_19,
|
||||
-ENXIO, /* 20 */
|
||||
-ENXIO, /* 21 */
|
||||
-ENXIO, /* 22 */
|
||||
-ENXIO, /* 23 */
|
||||
-ENXIO, /* 24 */
|
||||
-ENXIO, /* 25 */
|
||||
-ENXIO, /* 26 */
|
||||
-ENXIO, /* 27 */
|
||||
IRQ_LPC32XX_GPI_28,
|
||||
};
|
||||
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
|
||||
{
|
||||
if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
|
||||
return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче