Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Merge net into net-next because some upcoming net-next changes build on top of bug fixes that went into net. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
e6ff4c75f9
|
@ -191,9 +191,11 @@ Linux it will look something like this:
|
|||
};
|
||||
|
||||
The bootargs property contains the kernel arguments, and the initrd-*
|
||||
properties define the address and size of an initrd blob. The
|
||||
chosen node may also optionally contain an arbitrary number of
|
||||
additional properties for platform-specific configuration data.
|
||||
properties define the address and size of an initrd blob. Note that
|
||||
initrd-end is the first address after the initrd image, so this doesn't
|
||||
match the usual semantic of struct resource. The chosen node may also
|
||||
optionally contain an arbitrary number of additional properties for
|
||||
platform-specific configuration data.
|
||||
|
||||
During early boot, the architecture setup code calls of_scan_flat_dt()
|
||||
several times with different helper callbacks to parse device tree
|
||||
|
|
|
@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
Force threading of all interrupt handlers except those
|
||||
marked explicitly IRQF_NO_THREAD.
|
||||
|
||||
tmem [KNL,XEN]
|
||||
Enable the Transcendent memory driver if built-in.
|
||||
|
||||
tmem.cleancache=0|1 [KNL, XEN]
|
||||
Default is on (1). Disable the usage of the cleancache
|
||||
API to send anonymous pages to the hypervisor.
|
||||
|
||||
tmem.frontswap=0|1 [KNL, XEN]
|
||||
Default is on (1). Disable the usage of the frontswap
|
||||
API to send swap pages to the hypervisor. If disabled
|
||||
the selfballooning and selfshrinking are force disabled.
|
||||
|
||||
tmem.selfballooning=0|1 [KNL, XEN]
|
||||
Default is on (1). Disable the driving of swap pages
|
||||
to the hypervisor.
|
||||
|
||||
tmem.selfshrinking=0|1 [KNL, XEN]
|
||||
Default is on (1). Partial swapoff that immediately
|
||||
transfers pages from Xen hypervisor back to the
|
||||
kernel based on different criteria.
|
||||
|
||||
topology= [S390]
|
||||
Format: {off | on}
|
||||
Specify if the kernel should make use of the cpu
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
REDUCING OS JITTER DUE TO PER-CPU KTHREADS
|
||||
|
||||
This document lists per-CPU kthreads in the Linux kernel and presents
|
||||
options to control their OS jitter. Note that non-per-CPU kthreads are
|
||||
not listed here. To reduce OS jitter from non-per-CPU kthreads, bind
|
||||
them to a "housekeeping" CPU dedicated to such work.
|
||||
|
||||
|
||||
REFERENCES
|
||||
|
||||
o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
|
||||
|
||||
o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs.
|
||||
|
||||
o man taskset: Using the taskset command to bind tasks to sets
|
||||
of CPUs.
|
||||
|
||||
o man sched_setaffinity: Using the sched_setaffinity() system
|
||||
call to bind tasks to sets of CPUs.
|
||||
|
||||
o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
|
||||
writing "0" to offline and "1" to online.
|
||||
|
||||
o In order to locate kernel-generated OS jitter on CPU N:
|
||||
|
||||
cd /sys/kernel/debug/tracing
|
||||
echo 1 > max_graph_depth # Increase the "1" for more detail
|
||||
echo function_graph > current_tracer
|
||||
# run workload
|
||||
cat per_cpu/cpuN/trace
|
||||
|
||||
|
||||
KTHREADS
|
||||
|
||||
Name: ehca_comp/%u
|
||||
Purpose: Periodically process Infiniband-related work.
|
||||
To reduce its OS jitter, do any of the following:
|
||||
1. Don't use eHCA Infiniband hardware, instead choosing hardware
|
||||
that does not require per-CPU kthreads. This will prevent these
|
||||
kthreads from being created in the first place. (This will
|
||||
work for most people, as this hardware, though important, is
|
||||
relatively old and is produced in relatively low unit volumes.)
|
||||
2. Do all eHCA-Infiniband-related work on other CPUs, including
|
||||
interrupts.
|
||||
3. Rework the eHCA driver so that its per-CPU kthreads are
|
||||
provisioned only on selected CPUs.
|
||||
|
||||
|
||||
Name: irq/%d-%s
|
||||
Purpose: Handle threaded interrupts.
|
||||
To reduce its OS jitter, do the following:
|
||||
1. Use irq affinity to force the irq threads to execute on
|
||||
some other CPU.
|
||||
|
||||
Name: kcmtpd_ctr_%d
|
||||
Purpose: Handle Bluetooth work.
|
||||
To reduce its OS jitter, do one of the following:
|
||||
1. Don't use Bluetooth, in which case these kthreads won't be
|
||||
created in the first place.
|
||||
2. Use irq affinity to force Bluetooth-related interrupts to
|
||||
occur on some other CPU and furthermore initiate all
|
||||
Bluetooth activity on some other CPU.
|
||||
|
||||
Name: ksoftirqd/%u
|
||||
Purpose: Execute softirq handlers when threaded or when under heavy load.
|
||||
To reduce its OS jitter, each softirq vector must be handled
|
||||
separately as follows:
|
||||
TIMER_SOFTIRQ: Do all of the following:
|
||||
1. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle, for example, by avoiding system calls and by forcing
|
||||
both kernel threads and interrupts to execute elsewhere.
|
||||
2. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force
|
||||
the CPU offline, then bring it back online. This forces
|
||||
recurring timers to migrate elsewhere. If you are concerned
|
||||
with multiple CPUs, force them all offline before bringing the
|
||||
first one back online. Once you have onlined the CPUs in question,
|
||||
do not offline any other CPUs, because doing so could force the
|
||||
timer back onto one of the CPUs in question.
|
||||
NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following:
|
||||
1. Force networking interrupts onto other CPUs.
|
||||
2. Initiate any network I/O on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
BLOCK_SOFTIRQ: Do all of the following:
|
||||
1. Force block-device interrupts onto some other CPU.
|
||||
2. Initiate any block I/O on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
BLOCK_IOPOLL_SOFTIRQ: Do all of the following:
|
||||
1. Force block-device interrupts onto some other CPU.
|
||||
2. Initiate any block I/O and block-I/O polling on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
TASKLET_SOFTIRQ: Do one or more of the following:
|
||||
1. Avoid use of drivers that use tasklets. (Such drivers will contain
|
||||
calls to things like tasklet_schedule().)
|
||||
2. Convert all drivers that you must use from tasklets to workqueues.
|
||||
3. Force interrupts for drivers using tasklets onto other CPUs,
|
||||
and also do I/O involving these drivers on other CPUs.
|
||||
SCHED_SOFTIRQ: Do all of the following:
|
||||
1. Avoid sending scheduler IPIs to the CPU to be de-jittered,
|
||||
for example, ensure that at most one runnable kthread is present
|
||||
on that CPU. If a thread that expects to run on the de-jittered
|
||||
CPU awakens, the scheduler will send an IPI that can result in
|
||||
a subsequent SCHED_SOFTIRQ.
|
||||
2. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
|
||||
to be de-jittered is marked as an adaptive-ticks CPU using the
|
||||
"nohz_full=" boot parameter. This reduces the number of
|
||||
scheduler-clock interrupts that the de-jittered CPU receives,
|
||||
minimizing its chances of being selected to do the load balancing
|
||||
work that runs in SCHED_SOFTIRQ context.
|
||||
3. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle, for example, by avoiding system calls and by
|
||||
forcing both kernel threads and interrupts to execute elsewhere.
|
||||
This further reduces the number of scheduler-clock interrupts
|
||||
received by the de-jittered CPU.
|
||||
HRTIMER_SOFTIRQ: Do all of the following:
|
||||
1. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle. For example, avoid system calls and force both
|
||||
kernel threads and interrupts to execute elsewhere.
|
||||
2. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the
|
||||
CPU offline, then bring it back online. This forces recurring
|
||||
timers to migrate elsewhere. If you are concerned with multiple
|
||||
CPUs, force them all offline before bringing the first one
|
||||
back online. Once you have onlined the CPUs in question, do not
|
||||
offline any other CPUs, because doing so could force the timer
|
||||
back onto one of the CPUs in question.
|
||||
RCU_SOFTIRQ: Do at least one of the following:
|
||||
1. Offload callbacks and keep the CPU in either dyntick-idle or
|
||||
adaptive-ticks state by doing all of the following:
|
||||
a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
|
||||
to be de-jittered is marked as an adaptive-ticks CPU using
|
||||
the "nohz_full=" boot parameter. Bind the rcuo kthreads
|
||||
to housekeeping CPUs, which can tolerate OS jitter.
|
||||
b. To the extent possible, keep the CPU out of the kernel
|
||||
when it is non-idle, for example, by avoiding system
|
||||
calls and by forcing both kernel threads and interrupts
|
||||
to execute elsewhere.
|
||||
2. Enable RCU to do its processing remotely via dyntick-idle by
|
||||
doing all of the following:
|
||||
a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
|
||||
b. Ensure that the CPU goes idle frequently, allowing other
|
||||
CPUs to detect that it has passed through an RCU quiescent
|
||||
state. If the kernel is built with CONFIG_NO_HZ_FULL=y,
|
||||
userspace execution also allows other CPUs to detect that
|
||||
the CPU in question has passed through a quiescent state.
|
||||
c. To the extent possible, keep the CPU out of the kernel
|
||||
when it is non-idle, for example, by avoiding system
|
||||
calls and by forcing both kernel threads and interrupts
|
||||
to execute elsewhere.
|
||||
|
||||
Name: rcuc/%u
|
||||
Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Build the kernel with CONFIG_PREEMPT=n. This prevents these
|
||||
kthreads from being created in the first place, and also obviates
|
||||
the need for RCU priority boosting. This approach is feasible
|
||||
for workloads that do not require high degrees of responsiveness.
|
||||
2. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these
|
||||
kthreads from being created in the first place. This approach
|
||||
is feasible only if your workload never requires RCU priority
|
||||
boosting, for example, if you ensure frequent idle time on all
|
||||
CPUs that might execute within the kernel.
|
||||
3. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
which offloads all RCU callbacks to kthreads that can be moved
|
||||
off of CPUs susceptible to OS jitter. This approach prevents the
|
||||
rcuc/%u kthreads from having any work to do, so that they are
|
||||
never awakened.
|
||||
4. Ensure that the CPU never enters the kernel, and, in particular,
|
||||
avoid initiating any CPU hotplug operations on this CPU. This is
|
||||
another way of preventing any callbacks from being queued on the
|
||||
CPU, again preventing the rcuc/%u kthreads from having any work
|
||||
to do.
|
||||
|
||||
Name: rcuob/%d, rcuop/%d, and rcuos/%d
|
||||
Purpose: Offload RCU callbacks from the corresponding CPU.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Use affinity, cgroups, or other mechanism to force these kthreads
|
||||
to execute on some other CPU.
|
||||
2. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
|
||||
kthreads from being created in the first place. However, please
|
||||
note that this will not eliminate OS jitter, but will instead
|
||||
shift it to RCU_SOFTIRQ.
|
||||
|
||||
Name: watchdog/%u
|
||||
Purpose: Detect software lockups on each CPU.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
|
||||
kthreads from being created in the first place.
|
||||
2. Echo a zero to /proc/sys/kernel/watchdog to disable the
|
||||
watchdog timer.
|
||||
3. Echo a large number of /proc/sys/kernel/watchdog_thresh in
|
||||
order to reduce the frequency of OS jitter due to the watchdog
|
||||
timer down to a level that is acceptable for your workload.
|
|
@ -268,7 +268,7 @@ situations.
|
|||
System Power Management Phases
|
||||
------------------------------
|
||||
Suspending or resuming the system is done in several phases. Different phases
|
||||
are used for standby or memory sleep states ("suspend-to-RAM") and the
|
||||
are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the
|
||||
hibernation state ("suspend-to-disk"). Each phase involves executing callbacks
|
||||
for every device before the next phase begins. Not all busses or classes
|
||||
support all these callbacks and not all drivers use all the callbacks. The
|
||||
|
@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one.
|
|||
|
||||
Entering System Suspend
|
||||
-----------------------
|
||||
When the system goes into the standby or memory sleep state, the phases are:
|
||||
When the system goes into the freeze, standby or memory sleep state,
|
||||
the phases are:
|
||||
|
||||
prepare, suspend, suspend_late, suspend_noirq.
|
||||
|
||||
|
@ -368,7 +369,7 @@ the devices that were suspended.
|
|||
|
||||
Leaving System Suspend
|
||||
----------------------
|
||||
When resuming from standby or memory sleep, the phases are:
|
||||
When resuming from freeze, standby or memory sleep, the phases are:
|
||||
|
||||
resume_noirq, resume_early, resume, complete.
|
||||
|
||||
|
@ -433,8 +434,8 @@ the system log.
|
|||
|
||||
Entering Hibernation
|
||||
--------------------
|
||||
Hibernating the system is more complicated than putting it into the standby or
|
||||
memory sleep state, because it involves creating and saving a system image.
|
||||
Hibernating the system is more complicated than putting it into the other
|
||||
sleep states, because it involves creating and saving a system image.
|
||||
Therefore there are more phases for hibernation, with a different set of
|
||||
callbacks. These phases always run after tasks have been frozen and memory has
|
||||
been freed.
|
||||
|
@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state.
|
|||
|
||||
At this point the system image is saved, and the devices then need to be
|
||||
prepared for the upcoming system shutdown. This is much like suspending them
|
||||
before putting the system into the standby or memory sleep state, and the phases
|
||||
are similar.
|
||||
before putting the system into the freeze, standby or memory sleep state,
|
||||
and the phases are similar.
|
||||
|
||||
9. The prepare phase is discussed above.
|
||||
|
||||
|
|
|
@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs
|
|||
is mounted at /sys).
|
||||
|
||||
/sys/power/state controls system power state. Reading from this file
|
||||
returns what states are supported, which is hard-coded to 'standby'
|
||||
(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
|
||||
returns what states are supported, which is hard-coded to 'freeze',
|
||||
'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
|
||||
(Suspend-to-Disk).
|
||||
|
||||
Writing to this file one of those strings causes the system to
|
||||
|
|
|
@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose.
|
|||
The subsystems or drivers having such needs can register suspend notifiers that
|
||||
will be called upon the following events by the PM core:
|
||||
|
||||
PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will
|
||||
be frozen immediately.
|
||||
PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen
|
||||
immediately. This is different from PM_SUSPEND_PREPARE
|
||||
below because here we do additional work between notifiers
|
||||
and drivers freezing.
|
||||
|
||||
PM_POST_HIBERNATION The system memory state has been restored from a
|
||||
hibernation image or an error occurred during
|
||||
|
|
|
@ -2,12 +2,26 @@
|
|||
System Power Management States
|
||||
|
||||
|
||||
The kernel supports three power management states generically, though
|
||||
each is dependent on platform support code to implement the low-level
|
||||
details for each state. This file describes each state, what they are
|
||||
The kernel supports four power management states generically, though
|
||||
one is generic and the other three are dependent on platform support
|
||||
code to implement the low-level details for each state.
|
||||
This file describes each state, what they are
|
||||
commonly called, what ACPI state they map to, and what string to write
|
||||
to /sys/power/state to enter that state
|
||||
|
||||
state: Freeze / Low-Power Idle
|
||||
ACPI state: S0
|
||||
String: "freeze"
|
||||
|
||||
This state is a generic, pure software, light-weight, low-power state.
|
||||
It allows more energy to be saved relative to idle by freezing user
|
||||
space and putting all I/O devices into low-power states (possibly
|
||||
lower-power than available at run time), such that the processors can
|
||||
spend more time in their idle states.
|
||||
This state can be used for platforms without Standby/Suspend-to-RAM
|
||||
support, or it can be used in addition to Suspend-to-RAM (memory sleep)
|
||||
to provide reduced resume latency.
|
||||
|
||||
|
||||
State: Standby / Power-On Suspend
|
||||
ACPI State: S1
|
||||
|
@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which
|
|||
also offers low power savings, but low resume latency. Not all devices
|
||||
support D1, and those that don't are left on.
|
||||
|
||||
A transition from Standby to the On state should take about 1-2
|
||||
seconds.
|
||||
|
||||
|
||||
State: Suspend-to-RAM
|
||||
ACPI State: S3
|
||||
|
@ -42,9 +53,6 @@ transition back to the On state.
|
|||
For at least ACPI, STR requires some minimal boot-strapping code to
|
||||
resume the system from STR. This may be true on other platforms.
|
||||
|
||||
A transition from Suspend-to-RAM to the On state should take about
|
||||
3-5 seconds.
|
||||
|
||||
|
||||
State: Suspend-to-disk
|
||||
ACPI State: S4
|
||||
|
@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering
|
|||
down offers greater savings, and allows this mechanism to work on any
|
||||
system. However, entering a real low-power state allows the user to
|
||||
trigger wake up events (e.g. pressing a key or opening a laptop lid).
|
||||
|
||||
A transition from Suspend-to-Disk to the On state should take about 30
|
||||
seconds, though it's typically a bit more with the current
|
||||
implementation.
|
||||
|
|
42
MAINTAINERS
42
MAINTAINERS
|
@ -3865,9 +3865,16 @@ M: K. Y. Srinivasan <kys@microsoft.com>
|
|||
M: Haiyang Zhang <haiyangz@microsoft.com>
|
||||
L: devel@linuxdriverproject.org
|
||||
S: Maintained
|
||||
F: drivers/hv/
|
||||
F: arch/x86/include/asm/mshyperv.h
|
||||
F: arch/x86/include/uapi/asm/hyperv.h
|
||||
F: arch/x86/kernel/cpu/mshyperv.c
|
||||
F: drivers/hid/hid-hyperv.c
|
||||
F: drivers/hv/
|
||||
F: drivers/net/hyperv/
|
||||
F: drivers/scsi/storvsc_drv.c
|
||||
F: drivers/video/hyperv_fb.c
|
||||
F: include/linux/hyperv.h
|
||||
F: tools/hv/
|
||||
|
||||
I2C OVER PARALLEL PORT
|
||||
M: Jean Delvare <khali@linux-fr.org>
|
||||
|
@ -4641,12 +4648,13 @@ F: include/linux/sunrpc/
|
|||
F: include/uapi/linux/sunrpc/
|
||||
|
||||
KERNEL VIRTUAL MACHINE (KVM)
|
||||
M: Marcelo Tosatti <mtosatti@redhat.com>
|
||||
M: Gleb Natapov <gleb@redhat.com>
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://kvm.qumranet.com
|
||||
W: http://linux-kvm.org
|
||||
S: Supported
|
||||
F: Documentation/*/kvm.txt
|
||||
F: Documentation/*/kvm*.txt
|
||||
F: Documentation/virtual/kvm/
|
||||
F: arch/*/kvm/
|
||||
F: arch/*/include/asm/kvm*
|
||||
F: include/linux/kvm*
|
||||
|
@ -4976,6 +4984,13 @@ S: Maintained
|
|||
F: Documentation/hwmon/lm90
|
||||
F: drivers/hwmon/lm90.c
|
||||
|
||||
LM95234 HARDWARE MONITOR DRIVER
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/lm95234
|
||||
F: drivers/hwmon/lm95234.c
|
||||
|
||||
LME2510 MEDIA DRIVER
|
||||
M: Malcolm Priestley <tvboxspy@gmail.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
@ -5509,18 +5524,18 @@ F: Documentation/networking/s2io.txt
|
|||
F: Documentation/networking/vxge.txt
|
||||
F: drivers/net/ethernet/neterion/
|
||||
|
||||
NETFILTER/IPTABLES/IPCHAINS
|
||||
P: Harald Welte
|
||||
P: Jozsef Kadlecsik
|
||||
NETFILTER/IPTABLES
|
||||
M: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
|
||||
L: netfilter-devel@vger.kernel.org
|
||||
L: netfilter@vger.kernel.org
|
||||
L: coreteam@netfilter.org
|
||||
W: http://www.netfilter.org/
|
||||
W: http://www.iptables.org/
|
||||
T: git git://1984.lsi.us.es/nf
|
||||
T: git git://1984.lsi.us.es/nf-next
|
||||
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
|
||||
S: Supported
|
||||
F: include/linux/netfilter*
|
||||
F: include/linux/netfilter/
|
||||
|
@ -7854,7 +7869,7 @@ L: linux-scsi@vger.kernel.org
|
|||
L: target-devel@vger.kernel.org
|
||||
L: http://groups.google.com/group/linux-iscsi-target-dev
|
||||
W: http://www.linux-iscsi.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
|
||||
S: Supported
|
||||
F: drivers/target/
|
||||
F: include/target/
|
||||
|
@ -8182,6 +8197,13 @@ F: drivers/mmc/host/sh_mobile_sdhi.c
|
|||
F: include/linux/mmc/tmio.h
|
||||
F: include/linux/mmc/sh_mobile_sdhi.h
|
||||
|
||||
TMP401 HARDWARE MONITOR DRIVER
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/tmp401
|
||||
F: drivers/hwmon/tmp401.c
|
||||
|
||||
TMPFS (SHMEM FILESYSTEM)
|
||||
M: Hugh Dickins <hughd@google.com>
|
||||
L: linux-mm@kvack.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS
|
|||
config GENERIC_SMP_IDLE_THREAD
|
||||
bool
|
||||
|
||||
config GENERIC_IDLE_POLL_SETUP
|
||||
bool
|
||||
|
||||
# Select if arch init_task initializer is different to init/init_task.c
|
||||
config ARCH_INIT_TASK
|
||||
bool
|
||||
|
|
|
@ -38,6 +38,7 @@ config ARM
|
|||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
|
||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZMA
|
||||
select HAVE_KERNEL_LZO
|
||||
|
@ -488,7 +489,7 @@ config ARCH_IXP4XX
|
|||
config ARCH_DOVE
|
||||
bool "Marvell Dove"
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select CPU_V7
|
||||
select CPU_PJ4
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select MIGHT_HAVE_PCI
|
||||
select PINCTRL
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
|
||||
#include <asm/mcpm.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
|
|||
static void __cpuinit mcpm_secondary_init(unsigned int cpu)
|
||||
{
|
||||
mcpm_cpu_powered_up();
|
||||
gic_secondary_init(0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
|
|
@ -199,7 +199,6 @@ CONFIG_USB_PHY=y
|
|||
CONFIG_USB_DEBUG=y
|
||||
CONFIG_USB_DEVICEFS=y
|
||||
# CONFIG_USB_DEVICE_CLASS is not set
|
||||
CONFIG_USB_SUSPEND=y
|
||||
CONFIG_USB_MON=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
|
|
|
@ -204,7 +204,6 @@ CONFIG_USB=y
|
|||
CONFIG_USB_DEBUG=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_DEVICEFS=y
|
||||
CONFIG_USB_SUSPEND=y
|
||||
CONFIG_USB_MON=y
|
||||
CONFIG_USB_WDM=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
|
|
|
@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
|||
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
|
||||
atomic64_t, \
|
||||
counter), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n)))
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
|
||||
local64_t, \
|
||||
a), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n)))
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
|
|
|
@ -251,7 +251,7 @@ void __ref cpu_die(void)
|
|||
* this returns, power and/or clocks can be removed at any point
|
||||
* from this CPU and its cache by platform_cpu_kill().
|
||||
*/
|
||||
RCU_NONIDLE(complete(&cpu_died));
|
||||
complete(&cpu_died);
|
||||
|
||||
/*
|
||||
* Ensure that the cache lines associated with that completion are
|
||||
|
|
|
@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "missing register base\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
emc_regbase = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(emc_regbase))
|
||||
return PTR_ERR(emc_regbase);
|
||||
|
|
|
@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!regs) {
|
||||
dev_err(dev, "failed to find registers\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
adc->regs = devm_ioremap_resource(dev, regs);
|
||||
if (IS_ERR(adc->regs))
|
||||
return PTR_ERR(adc->regs);
|
||||
|
|
|
@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
|
||||
|
||||
static int __init xen_secondary_init(unsigned int cpu)
|
||||
static void __init xen_percpu_init(void *unused)
|
||||
{
|
||||
struct vcpu_register_vcpu_info info;
|
||||
struct vcpu_info *vcpup;
|
||||
int err;
|
||||
int cpu = get_cpu();
|
||||
|
||||
pr_info("Xen: initializing cpu%d\n", cpu);
|
||||
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
|
||||
|
@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu)
|
|||
info.offset = offset_in_page(vcpup);
|
||||
|
||||
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
|
||||
if (err) {
|
||||
pr_debug("register_vcpu_info failed: err=%d\n", err);
|
||||
} else {
|
||||
/* This cpu is using the registered vcpu info, even if
|
||||
later ones fail to. */
|
||||
BUG_ON(err);
|
||||
per_cpu(xen_vcpu, cpu) = vcpup;
|
||||
}
|
||||
return 0;
|
||||
|
||||
enable_percpu_irq(xen_events_irq, 0);
|
||||
}
|
||||
|
||||
static void xen_restart(char str, const char *cmd)
|
||||
|
@ -208,7 +205,6 @@ static int __init xen_guest_init(void)
|
|||
const char *version = NULL;
|
||||
const char *xen_prefix = "xen,xen-";
|
||||
struct resource res;
|
||||
int i;
|
||||
|
||||
node = of_find_compatible_node(NULL, NULL, "xen,xen");
|
||||
if (!node) {
|
||||
|
@ -265,19 +261,23 @@ static int __init xen_guest_init(void)
|
|||
sizeof(struct vcpu_info));
|
||||
if (xen_vcpu_info == NULL)
|
||||
return -ENOMEM;
|
||||
for_each_online_cpu(i)
|
||||
xen_secondary_init(i);
|
||||
|
||||
gnttab_init();
|
||||
if (!xen_initial_domain())
|
||||
xenbus_probe(NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(xen_guest_init);
|
||||
|
||||
static int __init xen_pm_init(void)
|
||||
{
|
||||
pm_power_off = xen_power_off;
|
||||
arm_pm_restart = xen_restart;
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(xen_guest_init);
|
||||
subsys_initcall(xen_pm_init);
|
||||
|
||||
static irqreturn_t xen_arm_callback(int irq, void *arg)
|
||||
{
|
||||
|
@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static __init void xen_percpu_enable_events(void *unused)
|
||||
{
|
||||
enable_percpu_irq(xen_events_irq, 0);
|
||||
}
|
||||
|
||||
static int __init xen_init_events(void)
|
||||
{
|
||||
if (!xen_domain() || xen_events_irq < 0)
|
||||
|
@ -303,7 +298,7 @@ static int __init xen_init_events(void)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
on_each_cpu(xen_percpu_enable_events, NULL, 0);
|
||||
on_each_cpu(xen_percpu_init, NULL, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -122,8 +122,6 @@ endmenu
|
|||
|
||||
menu "Kernel Features"
|
||||
|
||||
source "kernel/time/Kconfig"
|
||||
|
||||
config ARM64_64K_PAGES
|
||||
bool "Enable 64KB pages support"
|
||||
help
|
||||
|
|
|
@ -82,7 +82,7 @@
|
|||
|
||||
.macro enable_dbg_if_not_stepping, tmp
|
||||
mrs \tmp, mdscr_el1
|
||||
tbnz \tmp, #1, 9990f
|
||||
tbnz \tmp, #0, 9990f
|
||||
enable_dbg
|
||||
9990:
|
||||
.endm
|
||||
|
|
|
@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el)
|
|||
*/
|
||||
static void clear_os_lock(void *unused)
|
||||
{
|
||||
asm volatile("msr mdscr_el1, %0" : : "r" (0));
|
||||
isb();
|
||||
asm volatile("msr oslar_el1, %0" : : "r" (0));
|
||||
isb();
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n)
|
|||
}
|
||||
}
|
||||
|
||||
static struct console early_console = {
|
||||
static struct console early_console_dev = {
|
||||
.name = "earlycon",
|
||||
.write = early_write,
|
||||
.flags = CON_PRINTBUFFER | CON_BOOT,
|
||||
|
@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf)
|
|||
early_base = early_io_map(paddr, EARLYCON_IOBASE);
|
||||
|
||||
printch = match->printch;
|
||||
register_console(&early_console);
|
||||
early_console = &early_console_dev;
|
||||
register_console(&early_console_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
}
|
||||
|
||||
static int __init arm64_of_clk_init(void)
|
||||
static int __init arm64_device_init(void)
|
||||
{
|
||||
of_clk_init(NULL);
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(arm64_of_clk_init);
|
||||
arch_initcall(arm64_device_init);
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu, cpu_data);
|
||||
|
||||
|
@ -305,13 +306,6 @@ static int __init topology_init(void)
|
|||
}
|
||||
subsys_initcall(topology_init);
|
||||
|
||||
static int __init arm64_device_probe(void)
|
||||
{
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(arm64_device_probe);
|
||||
|
||||
static const char *hwcap_str[] = {
|
||||
"fp",
|
||||
"asimd",
|
||||
|
|
|
@ -52,7 +52,7 @@ loop1:
|
|||
add x2, x2, #4 // add 4 (line length offset)
|
||||
mov x4, #0x3ff
|
||||
and x4, x4, x1, lsr #3 // find maximum number on the way size
|
||||
clz x5, x4 // find bit position of way size increment
|
||||
clz w5, w4 // find bit position of way size increment
|
||||
mov x7, #0x7fff
|
||||
and x7, x7, x1, lsr #13 // extract max number of the index size
|
||||
loop2:
|
||||
|
|
|
@ -119,8 +119,7 @@ ENTRY(__cpu_setup)
|
|||
|
||||
mov x0, #3 << 20
|
||||
msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
mov x0, #1
|
||||
msr oslar_el1, x0 // Set the debug OS lock
|
||||
msr mdscr_el1, xzr // Reset mdscr_el1
|
||||
tlbi vmalle1is // invalidate I + D TLBs
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
|
|
|
@ -205,6 +205,11 @@ config ARCH_DISCONTIGMEM_ENABLE
|
|||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool n
|
||||
|
||||
config NODES_SHIFT
|
||||
int
|
||||
default "2"
|
||||
depends on NEED_MULTIPLE_NODES
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
config OWNERSHIP_TRACE
|
||||
|
|
|
@ -2,3 +2,4 @@
|
|||
generic-y += clkdev.h
|
||||
generic-y += exec.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += param.h
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
#ifndef __ASM_AVR32_NUMNODES_H
|
||||
#define __ASM_AVR32_NUMNODES_H
|
||||
|
||||
/* Max 4 nodes */
|
||||
#define NODES_SHIFT 2
|
||||
|
||||
#endif /* __ASM_AVR32_NUMNODES_H */
|
|
@ -1,9 +0,0 @@
|
|||
#ifndef __ASM_AVR32_PARAM_H
|
||||
#define __ASM_AVR32_PARAM_H
|
||||
|
||||
#include <uapi/asm/param.h>
|
||||
|
||||
# define HZ CONFIG_HZ
|
||||
# define USER_HZ 100 /* User interfaces are in "ticks" */
|
||||
# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
|
||||
#endif /* __ASM_AVR32_PARAM_H */
|
|
@ -33,3 +33,4 @@ header-y += termbits.h
|
|||
header-y += termios.h
|
||||
header-y += types.h
|
||||
header-y += unistd.h
|
||||
generic-y += param.h
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#ifndef _UAPI__ASM_AVR32_PARAM_H
|
||||
#define _UAPI__ASM_AVR32_PARAM_H
|
||||
|
||||
|
||||
#ifndef HZ
|
||||
# define HZ 100
|
||||
#endif
|
||||
|
||||
/* TODO: Should be configurable */
|
||||
#define EXEC_PAGESIZE 4096
|
||||
|
||||
#ifndef NOGROUP
|
||||
# define NOGROUP (-1)
|
||||
#endif
|
||||
|
||||
#define MAXHOSTNAMELEN 64
|
||||
|
||||
#endif /* _UAPI__ASM_AVR32_PARAM_H */
|
|
@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|||
break;
|
||||
case R_AVR32_GOT18SW:
|
||||
if ((relocation & 0xfffe0003) != 0
|
||||
&& (relocation & 0xfffc0003) != 0xffff0000)
|
||||
&& (relocation & 0xfffc0000) != 0xfffc0000)
|
||||
return reloc_overflow(module, "R_AVR32_GOT18SW",
|
||||
relocation);
|
||||
relocation >>= 2;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-gpio.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/mach-au1x00/au1000.h>
|
||||
#include <prom.h>
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/idle.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/mach-au1x00/au1000.h>
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/clk.h>
|
||||
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/time.h> /* for mips_hpt_frequency */
|
||||
#include <asm/reboot.h> /* for _machine_{restart,halt} */
|
||||
#include <asm/mips_machine.h>
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/leds.h>
|
||||
|
||||
#include <asm/idle.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#include <cobalt.h>
|
||||
|
|
|
@ -228,7 +228,6 @@ CONFIG_HIDRAW=y
|
|||
CONFIG_USB_HID=y
|
||||
CONFIG_USB_SUPPORT=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_SUSPEND=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_ROOT_HUB_TT=y
|
||||
CONFIG_USB_EHCI_TT_NEWSCHED=y
|
||||
|
|
|
@ -344,7 +344,6 @@ CONFIG_UHID=y
|
|||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_DYNAMIC_MINORS=y
|
||||
CONFIG_USB_SUSPEND=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_EHCI_ROOT_HUB_TT=y
|
||||
|
|
|
@ -300,7 +300,6 @@ CONFIG_USB=y
|
|||
CONFIG_USB_DEVICEFS=y
|
||||
# CONFIG_USB_DEVICE_CLASS is not set
|
||||
CONFIG_USB_DYNAMIC_MINORS=y
|
||||
CONFIG_USB_SUSPEND=y
|
||||
CONFIG_USB_OTG_WHITELIST=y
|
||||
CONFIG_USB_MON=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/clk.h>
|
||||
|
||||
extern void (*cpu_wait) (void);
|
||||
|
||||
struct clk;
|
||||
|
||||
struct clk_ops {
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
#ifndef __ASM_IDLE_H
|
||||
#define __ASM_IDLE_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
extern void (*cpu_wait)(void);
|
||||
extern void r4k_wait(void);
|
||||
extern asmlinkage void __r4k_wait(void);
|
||||
extern void r4k_wait_irqoff(void);
|
||||
extern void __pastwait(void);
|
||||
|
||||
static inline int using_rollback_handler(void)
|
||||
{
|
||||
return cpu_wait == r4k_wait;
|
||||
}
|
||||
|
||||
static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
|
||||
{
|
||||
return addr >= (unsigned long)r4k_wait_irqoff &&
|
||||
addr < (unsigned long)__pastwait;
|
||||
}
|
||||
|
||||
#endif /* __ASM_IDLE_H */
|
|
@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base)
|
|||
*/
|
||||
static inline unsigned long virt_to_phys(volatile const void *address)
|
||||
{
|
||||
return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -336,7 +336,7 @@ enum emulation_result {
|
|||
#define VPN2_MASK 0xffffe000
|
||||
#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
|
||||
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
|
||||
#define TLB_ASID(x) (ASID_MASK((x).tlb_hi))
|
||||
#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
|
||||
#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
|
||||
|
||||
struct kvm_mips_tlb {
|
||||
|
|
|
@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
|
|||
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
|
||||
#endif
|
||||
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
|
||||
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
||||
|
||||
#define ASID_INC(asid) \
|
||||
({ \
|
||||
unsigned long __asid = asid; \
|
||||
__asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \
|
||||
".section\t__asid_inc,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid) \
|
||||
:"0" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_MASK(asid) \
|
||||
({ \
|
||||
unsigned long __asid = asid; \
|
||||
__asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \
|
||||
".section\t__asid_mask,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid) \
|
||||
:"r" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_VERSION_MASK \
|
||||
({ \
|
||||
unsigned long __asid; \
|
||||
__asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
|
||||
".section\t__asid_version_mask,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_FIRST_VERSION \
|
||||
({ \
|
||||
unsigned long __asid = asid; \
|
||||
__asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
|
||||
".section\t__asid_first_version,\"a\"\n\t" \
|
||||
".word\t1b\n\t" \
|
||||
".previous" \
|
||||
:"=r" (__asid)); \
|
||||
__asid; \
|
||||
})
|
||||
#define ASID_INC 0x40
|
||||
#define ASID_MASK 0xfc0
|
||||
|
||||
#define ASID_FIRST_VERSION_R3000 0x1000
|
||||
#define ASID_FIRST_VERSION_R4000 0x100
|
||||
#define ASID_FIRST_VERSION_R8000 0x1000
|
||||
#define ASID_FIRST_VERSION_RM9000 0x1000
|
||||
#elif defined(CONFIG_CPU_R8000)
|
||||
|
||||
#define ASID_INC 0x10
|
||||
#define ASID_MASK 0xff0
|
||||
|
||||
#elif defined(CONFIG_MIPS_MT_SMTC)
|
||||
|
||||
#define ASID_INC 0x1
|
||||
extern unsigned long smtc_asid_mask;
|
||||
#define ASID_MASK (smtc_asid_mask)
|
||||
#define HW_ASID_MASK 0xff
|
||||
/* End SMTC/34K debug hack */
|
||||
#else /* FIXME: not correct for R6000 */
|
||||
|
||||
#define ASID_INC 0x1
|
||||
#define ASID_MASK 0xff
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#define SMTC_HW_ASID_MASK 0xff
|
||||
extern unsigned int smtc_asid_mask;
|
||||
#endif
|
||||
|
||||
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
|
||||
#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm)))
|
||||
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
|
||||
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* All unused by hardware upper bits will be considered
|
||||
* as a software asid extension.
|
||||
*/
|
||||
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
|
||||
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
|
||||
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
/* Normal, classic MIPS get_new_mmu_context */
|
||||
static inline void
|
||||
|
@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|||
extern void kvm_local_flush_tlb_all(void);
|
||||
unsigned long asid = asid_cache(cpu);
|
||||
|
||||
if (!ASID_MASK((asid = ASID_INC(asid)))) {
|
||||
if (! ((asid += ASID_INC) & ASID_MASK) ) {
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
#ifdef CONFIG_VIRTUALIZATION
|
||||
|
@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
* free up the ASID value for use and flush any old
|
||||
* instances of it from the TLB.
|
||||
*/
|
||||
oldasid = ASID_MASK(read_c0_entryhi());
|
||||
oldasid = (read_c0_entryhi() & ASID_MASK);
|
||||
if(smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
|
@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
* having ASID_MASK smaller than the hardware maximum,
|
||||
* make sure no "soft" bits become "hard"...
|
||||
*/
|
||||
write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
|
||||
cpu_asid(cpu, next));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(mtflags);
|
||||
|
@ -264,14 +241,14 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* See comments for similar code above */
|
||||
mtflags = dvpe();
|
||||
oldasid = ASID_MASK(read_c0_entryhi());
|
||||
oldasid = read_c0_entryhi() & ASID_MASK;
|
||||
if(smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
smtc_flush_tlb_asid(oldasid);
|
||||
}
|
||||
/* See comments for similar code above */
|
||||
write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
|
||||
cpu_asid(cpu, next));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(mtflags);
|
||||
|
@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
|
|||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* See comments for similar code above */
|
||||
prevvpe = dvpe();
|
||||
oldasid = ASID_MASK(read_c0_entryhi());
|
||||
oldasid = (read_c0_entryhi() & ASID_MASK);
|
||||
if (smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
smtc_flush_tlb_asid(oldasid);
|
||||
}
|
||||
/* See comments for similar code above */
|
||||
write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
|
||||
| cpu_asid(cpu, mm));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(prevvpe);
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
|
||||
|
||||
#include <linux/pfn.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
extern void build_clear_page(void);
|
||||
extern void build_copy_page(void);
|
||||
|
@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|||
((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
|
||||
#endif
|
||||
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
|
||||
#include <asm/io.h>
|
||||
|
||||
/*
|
||||
* RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
|
||||
|
@ -171,14 +171,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
|
||||
#define pfn_valid(pfn) \
|
||||
({ \
|
||||
unsigned long __pfn = (pfn); \
|
||||
/* avoid <linux/bootmem.h> include hell */ \
|
||||
extern unsigned long min_low_pfn; \
|
||||
\
|
||||
__pfn >= min_low_pfn && __pfn < max_mapnr; \
|
||||
})
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
/* avoid <linux/mm.h> include hell */
|
||||
extern unsigned long max_mapnr;
|
||||
|
||||
return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_SPARSEMEM)
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
/*
|
||||
* System setup and hardware flags..
|
||||
*/
|
||||
extern void (*cpu_wait)(void);
|
||||
|
||||
extern unsigned int vced_count, vcei_count;
|
||||
|
||||
|
|
|
@ -694,16 +694,17 @@
|
|||
#define __NR_process_vm_writev (__NR_Linux + 305)
|
||||
#define __NR_kcmp (__NR_Linux + 306)
|
||||
#define __NR_finit_module (__NR_Linux + 307)
|
||||
#define __NR_getdents64 (__NR_Linux + 308)
|
||||
|
||||
/*
|
||||
* Offset of the last Linux 64-bit flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 307
|
||||
#define __NR_Linux_syscalls 308
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
|
||||
|
||||
#define __NR_64_Linux 5000
|
||||
#define __NR_64_Linux_syscalls 307
|
||||
#define __NR_64_Linux_syscalls 308
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_NABI32
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
extra-y := head.o vmlinux.lds
|
||||
|
||||
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
|
||||
obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
|
||||
prom.o ptrace.o reset.o setup.o signal.o syscall.o \
|
||||
time.o topology.o traps.o unaligned.o watch.o vdso.o
|
||||
|
||||
|
|
|
@ -27,105 +27,6 @@
|
|||
#include <asm/spram.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
|
||||
* the implementation of the "wait" feature differs between CPU families. This
|
||||
* points to the function that implements CPU specific wait.
|
||||
* The wait instruction stops the pipeline and reduces the power consumption of
|
||||
* the CPU very much.
|
||||
*/
|
||||
void (*cpu_wait)(void);
|
||||
EXPORT_SYMBOL(cpu_wait);
|
||||
|
||||
static void r3081_wait(void)
|
||||
{
|
||||
unsigned long cfg = read_c0_conf();
|
||||
write_c0_conf(cfg | R30XX_CONF_HALT);
|
||||
}
|
||||
|
||||
static void r39xx_wait(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
if (!need_resched())
|
||||
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
extern void r4k_wait(void);
|
||||
|
||||
/*
|
||||
* This variant is preferable as it allows testing need_resched and going to
|
||||
* sleep depending on the outcome atomically. Unfortunately the "It is
|
||||
* implementation-dependent whether the pipeline restarts when a non-enabled
|
||||
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
|
||||
* using this version a gamble.
|
||||
*/
|
||||
void r4k_wait_irqoff(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
if (!need_resched())
|
||||
__asm__(" .set push \n"
|
||||
" .set mips3 \n"
|
||||
" wait \n"
|
||||
" .set pop \n");
|
||||
local_irq_enable();
|
||||
__asm__(" .globl __pastwait \n"
|
||||
"__pastwait: \n");
|
||||
}
|
||||
|
||||
/*
|
||||
* The RM7000 variant has to handle erratum 38. The workaround is to not
|
||||
* have any pending stores when the WAIT instruction is executed.
|
||||
*/
|
||||
static void rm7k_wait_irqoff(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
if (!need_resched())
|
||||
__asm__(
|
||||
" .set push \n"
|
||||
" .set mips3 \n"
|
||||
" .set noat \n"
|
||||
" mfc0 $1, $12 \n"
|
||||
" sync \n"
|
||||
" mtc0 $1, $12 # stalls until W stage \n"
|
||||
" wait \n"
|
||||
" mtc0 $1, $12 # stalls until W stage \n"
|
||||
" .set pop \n");
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* The Au1xxx wait is available only if using 32khz counter or
|
||||
* external timer source, but specifically not CP0 Counter.
|
||||
* alchemy/common/time.c may override cpu_wait!
|
||||
*/
|
||||
static void au1k_wait(void)
|
||||
{
|
||||
__asm__(" .set mips3 \n"
|
||||
" cache 0x14, 0(%0) \n"
|
||||
" cache 0x14, 32(%0) \n"
|
||||
" sync \n"
|
||||
" nop \n"
|
||||
" wait \n"
|
||||
" nop \n"
|
||||
" nop \n"
|
||||
" nop \n"
|
||||
" nop \n"
|
||||
" .set mips0 \n"
|
||||
: : "r" (au1k_wait));
|
||||
}
|
||||
|
||||
static int __initdata nowait;
|
||||
|
||||
static int __init wait_disable(char *s)
|
||||
{
|
||||
nowait = 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("nowait", wait_disable);
|
||||
|
||||
static int __cpuinitdata mips_fpu_disabled;
|
||||
|
||||
static int __init fpu_disable(char *s)
|
||||
|
@ -150,105 +51,6 @@ static int __init dsp_disable(char *s)
|
|||
|
||||
__setup("nodsp", dsp_disable);
|
||||
|
||||
void __init check_wait(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
if (nowait) {
|
||||
printk("Wait instruction disabled.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (c->cputype) {
|
||||
case CPU_R3081:
|
||||
case CPU_R3081E:
|
||||
cpu_wait = r3081_wait;
|
||||
break;
|
||||
case CPU_TX3927:
|
||||
cpu_wait = r39xx_wait;
|
||||
break;
|
||||
case CPU_R4200:
|
||||
/* case CPU_R4300: */
|
||||
case CPU_R4600:
|
||||
case CPU_R4640:
|
||||
case CPU_R4650:
|
||||
case CPU_R4700:
|
||||
case CPU_R5000:
|
||||
case CPU_R5500:
|
||||
case CPU_NEVADA:
|
||||
case CPU_4KC:
|
||||
case CPU_4KEC:
|
||||
case CPU_4KSC:
|
||||
case CPU_5KC:
|
||||
case CPU_25KF:
|
||||
case CPU_PR4450:
|
||||
case CPU_BMIPS3300:
|
||||
case CPU_BMIPS4350:
|
||||
case CPU_BMIPS4380:
|
||||
case CPU_BMIPS5000:
|
||||
case CPU_CAVIUM_OCTEON:
|
||||
case CPU_CAVIUM_OCTEON_PLUS:
|
||||
case CPU_CAVIUM_OCTEON2:
|
||||
case CPU_JZRISC:
|
||||
case CPU_LOONGSON1:
|
||||
case CPU_XLR:
|
||||
case CPU_XLP:
|
||||
cpu_wait = r4k_wait;
|
||||
break;
|
||||
|
||||
case CPU_RM7000:
|
||||
cpu_wait = rm7k_wait_irqoff;
|
||||
break;
|
||||
|
||||
case CPU_M14KC:
|
||||
case CPU_M14KEC:
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
||||
case CPU_1004K:
|
||||
cpu_wait = r4k_wait;
|
||||
if (read_c0_config7() & MIPS_CONF7_WII)
|
||||
cpu_wait = r4k_wait_irqoff;
|
||||
break;
|
||||
|
||||
case CPU_74K:
|
||||
cpu_wait = r4k_wait;
|
||||
if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
|
||||
cpu_wait = r4k_wait_irqoff;
|
||||
break;
|
||||
|
||||
case CPU_TX49XX:
|
||||
cpu_wait = r4k_wait_irqoff;
|
||||
break;
|
||||
case CPU_ALCHEMY:
|
||||
cpu_wait = au1k_wait;
|
||||
break;
|
||||
case CPU_20KC:
|
||||
/*
|
||||
* WAIT on Rev1.0 has E1, E2, E3 and E16.
|
||||
* WAIT on Rev2.0 and Rev3.0 has E16.
|
||||
* Rev3.1 WAIT is nop, why bother
|
||||
*/
|
||||
if ((c->processor_id & 0xff) <= 0x64)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Another rev is incremeting c0_count at a reduced clock
|
||||
* rate while in WAIT mode. So we basically have the choice
|
||||
* between using the cp0 timer as clocksource or avoiding
|
||||
* the WAIT instruction. Until more details are known,
|
||||
* disable the use of WAIT for 20Kc entirely.
|
||||
cpu_wait = r4k_wait;
|
||||
*/
|
||||
break;
|
||||
case CPU_RM9000:
|
||||
if ((c->processor_id & 0x00ff) >= 0x40)
|
||||
cpu_wait = r4k_wait;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void check_errata(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <linux/bootmem.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static int __init parse_savemaxmem(char *p)
|
||||
{
|
||||
|
|
|
@ -122,7 +122,7 @@ handle_vcei:
|
|||
__FINIT
|
||||
|
||||
.align 5 /* 32 byte rollback region */
|
||||
LEAF(r4k_wait)
|
||||
LEAF(__r4k_wait)
|
||||
.set push
|
||||
.set noreorder
|
||||
/* start of rollback region */
|
||||
|
@ -146,14 +146,14 @@ LEAF(r4k_wait)
|
|||
jr ra
|
||||
nop
|
||||
.set pop
|
||||
END(r4k_wait)
|
||||
END(__r4k_wait)
|
||||
|
||||
.macro BUILD_ROLLBACK_PROLOGUE handler
|
||||
FEXPORT(rollback_\handler)
|
||||
.set push
|
||||
.set noat
|
||||
MFC0 k0, CP0_EPC
|
||||
PTR_LA k1, r4k_wait
|
||||
PTR_LA k1, __r4k_wait
|
||||
ori k0, 0x1f /* 32 byte rollback region */
|
||||
xori k0, 0x1f
|
||||
bne k0, k1, 9f
|
||||
|
@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
|||
.set noreorder
|
||||
/* check if TLB contains a entry for EPC */
|
||||
MFC0 k1, CP0_ENTRYHI
|
||||
andi k1, 0xff /* ASID_MASK patched at run-time!! */
|
||||
andi k1, 0xff /* ASID_MASK */
|
||||
MFC0 k0, CP0_EPC
|
||||
PTR_SRL k0, _PAGE_SHIFT + 1
|
||||
PTR_SLL k0, _PAGE_SHIFT + 1
|
||||
|
|
|
@ -0,0 +1,244 @@
|
|||
/*
|
||||
* MIPS idle loop and WAIT instruction support.
|
||||
*
|
||||
* Copyright (C) xxxx the Anonymous
|
||||
* Copyright (C) 1994 - 2006 Ralf Baechle
|
||||
* Copyright (C) 2003, 2004 Maciej W. Rozycki
|
||||
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-info.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/mipsregs.h>
|
||||
|
||||
/*
|
||||
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
|
||||
* the implementation of the "wait" feature differs between CPU families. This
|
||||
* points to the function that implements CPU specific wait.
|
||||
* The wait instruction stops the pipeline and reduces the power consumption of
|
||||
* the CPU very much.
|
||||
*/
|
||||
void (*cpu_wait)(void);
|
||||
EXPORT_SYMBOL(cpu_wait);
|
||||
|
||||
static void r3081_wait(void)
|
||||
{
|
||||
unsigned long cfg = read_c0_conf();
|
||||
write_c0_conf(cfg | R30XX_CONF_HALT);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static void r39xx_wait(void)
|
||||
{
|
||||
if (!need_resched())
|
||||
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
void r4k_wait(void)
|
||||
{
|
||||
local_irq_enable();
|
||||
__r4k_wait();
|
||||
}
|
||||
|
||||
/*
|
||||
* This variant is preferable as it allows testing need_resched and going to
|
||||
* sleep depending on the outcome atomically. Unfortunately the "It is
|
||||
* implementation-dependent whether the pipeline restarts when a non-enabled
|
||||
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
|
||||
* using this version a gamble.
|
||||
*/
|
||||
void r4k_wait_irqoff(void)
|
||||
{
|
||||
if (!need_resched())
|
||||
__asm__(
|
||||
" .set push \n"
|
||||
" .set mips3 \n"
|
||||
" wait \n"
|
||||
" .set pop \n");
|
||||
local_irq_enable();
|
||||
__asm__(
|
||||
" .globl __pastwait \n"
|
||||
"__pastwait: \n");
|
||||
}
|
||||
|
||||
/*
|
||||
* The RM7000 variant has to handle erratum 38. The workaround is to not
|
||||
* have any pending stores when the WAIT instruction is executed.
|
||||
*/
|
||||
static void rm7k_wait_irqoff(void)
|
||||
{
|
||||
if (!need_resched())
|
||||
__asm__(
|
||||
" .set push \n"
|
||||
" .set mips3 \n"
|
||||
" .set noat \n"
|
||||
" mfc0 $1, $12 \n"
|
||||
" sync \n"
|
||||
" mtc0 $1, $12 # stalls until W stage \n"
|
||||
" wait \n"
|
||||
" mtc0 $1, $12 # stalls until W stage \n"
|
||||
" .set pop \n");
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* The Au1xxx wait is available only if using 32khz counter or
|
||||
* external timer source, but specifically not CP0 Counter.
|
||||
* alchemy/common/time.c may override cpu_wait!
|
||||
*/
|
||||
static void au1k_wait(void)
|
||||
{
|
||||
__asm__(
|
||||
" .set mips3 \n"
|
||||
" cache 0x14, 0(%0) \n"
|
||||
" cache 0x14, 32(%0) \n"
|
||||
" sync \n"
|
||||
" nop \n"
|
||||
" wait \n"
|
||||
" nop \n"
|
||||
" nop \n"
|
||||
" nop \n"
|
||||
" nop \n"
|
||||
" .set mips0 \n"
|
||||
: : "r" (au1k_wait));
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static int __initdata nowait;
|
||||
|
||||
static int __init wait_disable(char *s)
|
||||
{
|
||||
nowait = 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("nowait", wait_disable);
|
||||
|
||||
void __init check_wait(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
if (nowait) {
|
||||
printk("Wait instruction disabled.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (c->cputype) {
|
||||
case CPU_R3081:
|
||||
case CPU_R3081E:
|
||||
cpu_wait = r3081_wait;
|
||||
break;
|
||||
case CPU_TX3927:
|
||||
cpu_wait = r39xx_wait;
|
||||
break;
|
||||
case CPU_R4200:
|
||||
/* case CPU_R4300: */
|
||||
case CPU_R4600:
|
||||
case CPU_R4640:
|
||||
case CPU_R4650:
|
||||
case CPU_R4700:
|
||||
case CPU_R5000:
|
||||
case CPU_R5500:
|
||||
case CPU_NEVADA:
|
||||
case CPU_4KC:
|
||||
case CPU_4KEC:
|
||||
case CPU_4KSC:
|
||||
case CPU_5KC:
|
||||
case CPU_25KF:
|
||||
case CPU_PR4450:
|
||||
case CPU_BMIPS3300:
|
||||
case CPU_BMIPS4350:
|
||||
case CPU_BMIPS4380:
|
||||
case CPU_BMIPS5000:
|
||||
case CPU_CAVIUM_OCTEON:
|
||||
case CPU_CAVIUM_OCTEON_PLUS:
|
||||
case CPU_CAVIUM_OCTEON2:
|
||||
case CPU_JZRISC:
|
||||
case CPU_LOONGSON1:
|
||||
case CPU_XLR:
|
||||
case CPU_XLP:
|
||||
cpu_wait = r4k_wait;
|
||||
break;
|
||||
|
||||
case CPU_RM7000:
|
||||
cpu_wait = rm7k_wait_irqoff;
|
||||
break;
|
||||
|
||||
case CPU_M14KC:
|
||||
case CPU_M14KEC:
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
||||
case CPU_1004K:
|
||||
cpu_wait = r4k_wait;
|
||||
if (read_c0_config7() & MIPS_CONF7_WII)
|
||||
cpu_wait = r4k_wait_irqoff;
|
||||
break;
|
||||
|
||||
case CPU_74K:
|
||||
cpu_wait = r4k_wait;
|
||||
if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
|
||||
cpu_wait = r4k_wait_irqoff;
|
||||
break;
|
||||
|
||||
case CPU_TX49XX:
|
||||
cpu_wait = r4k_wait_irqoff;
|
||||
break;
|
||||
case CPU_ALCHEMY:
|
||||
cpu_wait = au1k_wait;
|
||||
break;
|
||||
case CPU_20KC:
|
||||
/*
|
||||
* WAIT on Rev1.0 has E1, E2, E3 and E16.
|
||||
* WAIT on Rev2.0 and Rev3.0 has E16.
|
||||
* Rev3.1 WAIT is nop, why bother
|
||||
*/
|
||||
if ((c->processor_id & 0xff) <= 0x64)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Another rev is incremeting c0_count at a reduced clock
|
||||
* rate while in WAIT mode. So we basically have the choice
|
||||
* between using the cp0 timer as clocksource or avoiding
|
||||
* the WAIT instruction. Until more details are known,
|
||||
* disable the use of WAIT for 20Kc entirely.
|
||||
cpu_wait = r4k_wait;
|
||||
*/
|
||||
break;
|
||||
case CPU_RM9000:
|
||||
if ((c->processor_id & 0x00ff) >= 0x40)
|
||||
cpu_wait = r4k_wait;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void smtc_idle_hook(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_idle_loop_hook(void);
|
||||
|
||||
smtc_idle_loop_hook();
|
||||
#endif
|
||||
}
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
smtc_idle_hook();
|
||||
if (cpu_wait)
|
||||
cpu_wait();
|
||||
else
|
||||
local_irq_enable();
|
||||
}
|
|
@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
|||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (p->ainsn.insn) {
|
||||
free_insn_slot(p->ainsn.insn, 0);
|
||||
p->ainsn.insn = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/bootinfo.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-features.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/prom.h>
|
||||
|
|
|
@ -51,19 +51,6 @@ void arch_cpu_idle_dead(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
extern void smtc_idle_loop_hook(void);
|
||||
|
||||
smtc_idle_loop_hook();
|
||||
#endif
|
||||
if (cpu_wait)
|
||||
(*cpu_wait)();
|
||||
else
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
asmlinkage void ret_from_kernel_thread(void);
|
||||
|
||||
|
@ -224,6 +211,9 @@ struct mips_frame_info {
|
|||
int pc_offset;
|
||||
};
|
||||
|
||||
#define J_TARGET(pc,target) \
|
||||
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
|
||||
|
||||
static inline int is_ra_save_ins(union mips_instruction *ip)
|
||||
{
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
|
@ -264,7 +254,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
|
||||
static inline int is_jump_ins(union mips_instruction *ip)
|
||||
{
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
/*
|
||||
|
@ -288,6 +278,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
|
|||
return 0;
|
||||
return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
|
||||
#else
|
||||
if (ip->j_format.opcode == j_op)
|
||||
return 1;
|
||||
if (ip->j_format.opcode == jal_op)
|
||||
return 1;
|
||||
if (ip->r_format.opcode != spec_op)
|
||||
|
@ -350,7 +342,7 @@ static int get_frame_info(struct mips_frame_info *info)
|
|||
|
||||
for (i = 0; i < max_insns; i++, ip++) {
|
||||
|
||||
if (is_jal_jalr_jr_ins(ip))
|
||||
if (is_jump_ins(ip))
|
||||
break;
|
||||
if (!info->frame_size) {
|
||||
if (is_sp_move_ins(ip))
|
||||
|
@ -393,15 +385,42 @@ err:
|
|||
|
||||
static struct mips_frame_info schedule_mfi __read_mostly;
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
static unsigned long get___schedule_addr(void)
|
||||
{
|
||||
return kallsyms_lookup_name("__schedule");
|
||||
}
|
||||
#else
|
||||
static unsigned long get___schedule_addr(void)
|
||||
{
|
||||
union mips_instruction *ip = (void *)schedule;
|
||||
int max_insns = 8;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max_insns; i++, ip++) {
|
||||
if (ip->j_format.opcode == j_op)
|
||||
return J_TARGET(ip, ip->j_format.target);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init frame_info_init(void)
|
||||
{
|
||||
unsigned long size = 0;
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
unsigned long ofs;
|
||||
|
||||
kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
|
||||
#endif
|
||||
schedule_mfi.func = schedule;
|
||||
unsigned long addr;
|
||||
|
||||
addr = get___schedule_addr();
|
||||
if (!addr)
|
||||
addr = (unsigned long)schedule;
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
kallsyms_lookup_size_offset(addr, &size, &ofs);
|
||||
#endif
|
||||
schedule_mfi.func = (void *)addr;
|
||||
schedule_mfi.func_size = size;
|
||||
|
||||
get_frame_info(&schedule_mfi);
|
||||
|
|
|
@ -423,4 +423,5 @@ sys_call_table:
|
|||
PTR sys_process_vm_writev /* 5305 */
|
||||
PTR sys_kcmp
|
||||
PTR sys_finit_module
|
||||
PTR sys_getdents64
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/atomic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/r4k-timer.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/time.h>
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <asm/hardirq.h>
|
||||
#include <asm/hazards.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -111,7 +112,7 @@ static int vpe0limit;
|
|||
static int ipibuffers;
|
||||
static int nostlb;
|
||||
static int asidmask;
|
||||
unsigned int smtc_asid_mask = 0xff;
|
||||
unsigned long smtc_asid_mask = 0xff;
|
||||
|
||||
static int __init vpe0tcs(char *str)
|
||||
{
|
||||
|
@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|||
unsigned long flags;
|
||||
int mtflags;
|
||||
unsigned long tcrestart;
|
||||
extern void r4k_wait_irqoff(void), __pastwait(void);
|
||||
int set_resched_flag = (type == LINUX_SMP_IPI &&
|
||||
action == SMP_RESCHEDULE_YOURSELF);
|
||||
|
||||
|
@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|||
*/
|
||||
if (cpu_wait == r4k_wait_irqoff) {
|
||||
tcrestart = read_tc_c0_tcrestart();
|
||||
if (tcrestart >= (unsigned long)r4k_wait_irqoff
|
||||
&& tcrestart < (unsigned long)__pastwait) {
|
||||
if (address_is_in_r4k_wait_irqoff(tcrestart)) {
|
||||
write_tc_c0_tcrestart(__pastwait);
|
||||
tcstatus &= ~TCSTATUS_IXMT;
|
||||
write_tc_c0_tcstatus(tcstatus);
|
||||
|
@ -1395,7 +1394,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|||
asid = asid_cache(cpu);
|
||||
|
||||
do {
|
||||
if (!ASID_MASK(ASID_INC(asid))) {
|
||||
if (!((asid += ASID_INC) & ASID_MASK) ) {
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
/* Traverse all online CPUs (hack requires contiguous range) */
|
||||
|
@ -1414,7 +1413,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|||
mips_ihb();
|
||||
}
|
||||
tcstat = read_tc_c0_tcstatus();
|
||||
smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
|
||||
smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
|
||||
if (!prevhalt)
|
||||
write_tc_c0_tchalt(0);
|
||||
}
|
||||
|
@ -1423,7 +1422,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|||
asid = ASID_FIRST_VERSION;
|
||||
local_flush_tlb_all(); /* start new asid cycle */
|
||||
}
|
||||
} while (smtc_live_asid[tlb][ASID_MASK(asid)]);
|
||||
} while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
|
||||
|
||||
/*
|
||||
* SMTC shares the TLB within VPEs and possibly across all VPEs.
|
||||
|
@ -1461,7 +1460,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
|
|||
tlb_read();
|
||||
ehb();
|
||||
ehi = read_c0_entryhi();
|
||||
if (ASID_MASK(ehi) == asid) {
|
||||
if ((ehi & ASID_MASK) == asid) {
|
||||
/*
|
||||
* Invalidate only entries with specified ASID,
|
||||
* makiing sure all entries differ.
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <asm/dsp.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/fpu_emulator.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/module.h>
|
||||
|
@ -57,7 +58,6 @@
|
|||
#include <asm/uasm.h>
|
||||
|
||||
extern void check_wait(void);
|
||||
extern asmlinkage void r4k_wait(void);
|
||||
extern asmlinkage void rollback_handle_int(void);
|
||||
extern asmlinkage void handle_int(void);
|
||||
extern u32 handle_tlbl[];
|
||||
|
@ -1542,7 +1542,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|||
extern char except_vec_vi, except_vec_vi_lui;
|
||||
extern char except_vec_vi_ori, except_vec_vi_end;
|
||||
extern char rollback_except_vec_vi;
|
||||
char *vec_start = (cpu_wait == r4k_wait) ?
|
||||
char *vec_start = using_rollback_handler() ?
|
||||
&rollback_except_vec_vi : &except_vec_vi;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
|
@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
|||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int status_set = ST0_CU0;
|
||||
unsigned int hwrena = cpu_hwrena_impl_bits;
|
||||
unsigned long asid = 0;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
int secondaryTC = 0;
|
||||
int bootTC = (cpu == 0);
|
||||
|
@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
|||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
asid = ASID_FIRST_VERSION;
|
||||
cpu_data[cpu].asid_cache = asid;
|
||||
TLBMISS_HANDLER_SETUP();
|
||||
if (!cpu_data[cpu].asid_cache)
|
||||
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
|
||||
|
||||
atomic_inc(&init_mm.mm_count);
|
||||
current->active_mm = &init_mm;
|
||||
|
@ -1814,10 +1812,8 @@ void __init trap_init(void)
|
|||
extern char except_vec4;
|
||||
extern char except_vec3_r4000;
|
||||
unsigned long i;
|
||||
int rollback;
|
||||
|
||||
check_wait();
|
||||
rollback = (cpu_wait == r4k_wait);
|
||||
|
||||
#if defined(CONFIG_KGDB)
|
||||
if (kgdb_early_setup)
|
||||
|
@ -1894,7 +1890,8 @@ void __init trap_init(void)
|
|||
if (board_be_init)
|
||||
board_be_init();
|
||||
|
||||
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
|
||||
set_except_vector(0, using_rollback_handler() ? rollback_handle_int
|
||||
: handle_int);
|
||||
set_except_vector(1, handle_tlbm);
|
||||
set_except_vector(2, handle_tlbl);
|
||||
set_except_vector(3, handle_tlbs);
|
||||
|
|
|
@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
|
|||
printk("MTCz, cop0->reg[EBASE]: %#lx\n",
|
||||
kvm_read_c0_guest_ebase(cop0));
|
||||
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
|
||||
uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
|
||||
uint32_t nasid =
|
||||
vcpu->arch.gprs[rt] & ASID_MASK;
|
||||
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
|
||||
&&
|
||||
(ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
|
||||
!= nasid)) {
|
||||
((kvm_read_c0_guest_entryhi(cop0) &
|
||||
ASID_MASK) != nasid)) {
|
||||
|
||||
kvm_debug
|
||||
("MTCz, change ASID from %#lx to %#lx\n",
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
|
||||
ASID_MASK(vcpu->arch.gprs[rt]));
|
||||
kvm_read_c0_guest_entryhi(cop0) &
|
||||
ASID_MASK,
|
||||
vcpu->arch.gprs[rt] & ASID_MASK);
|
||||
|
||||
/* Blow away the shadow host TLBs */
|
||||
kvm_mips_flush_host_tlb(1);
|
||||
|
@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
|
|||
* resulting handler will do the right thing
|
||||
*/
|
||||
index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
|
||||
(kvm_read_c0_guest_entryhi
|
||||
(cop0) & ASID_MASK));
|
||||
|
||||
if (index < 0) {
|
||||
vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
|
||||
|
@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
|
|||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
|
@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
|
|||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi =
|
||||
(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
|
@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
|
|||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
|
@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
|
|||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
|
@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
|
|||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
|
@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
|
|||
*/
|
||||
index = kvm_mips_guest_tlb_lookup(vcpu,
|
||||
(va & VPN2_MASK) |
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi
|
||||
(vcpu->arch.cop0)));
|
||||
(kvm_read_c0_guest_entryhi
|
||||
(vcpu->arch.cop0) & ASID_MASK));
|
||||
if (index < 0) {
|
||||
if (exccode == T_TLB_LD_MISS) {
|
||||
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
@ -51,13 +53,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
|
|||
|
||||
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
|
||||
return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
|
||||
}
|
||||
|
||||
|
||||
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
|
||||
return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
|
||||
}
|
||||
|
||||
inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
|
||||
|
@ -84,7 +86,7 @@ void kvm_mips_dump_host_tlbs(void)
|
|||
old_pagemask = read_c0_pagemask();
|
||||
|
||||
printk("HOST TLBs:\n");
|
||||
printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
|
||||
printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
|
||||
|
||||
for (i = 0; i < current_cpu_data.tlbsize; i++) {
|
||||
write_c0_index(i);
|
||||
|
@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
||||
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int srcu_idx, err = 0;
|
||||
pfn_t pfn;
|
||||
|
||||
if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (kvm_mips_is_error_pfn(pfn)) {
|
||||
panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
|
||||
kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.guest_pmap[gfn] = pfn;
|
||||
return;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Translate guest KSEG0 addresses to Host PA */
|
||||
|
@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
|
|||
gva);
|
||||
return KVM_INVALID_PAGE;
|
||||
}
|
||||
kvm_mips_map_page(vcpu->kvm, gfn);
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return KVM_INVALID_ADDR;
|
||||
|
||||
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
|
||||
}
|
||||
|
||||
|
@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
|||
even = !(gfn & 0x1);
|
||||
vaddr = badvaddr & (PAGE_MASK << 1);
|
||||
|
||||
kvm_mips_map_page(vcpu->kvm, gfn);
|
||||
kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
|
||||
return -1;
|
||||
|
||||
if (even) {
|
||||
pfn0 = kvm->arch.guest_pmap[gfn];
|
||||
|
@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
|||
pfn0 = 0;
|
||||
pfn1 = 0;
|
||||
} else {
|
||||
kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
|
||||
kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
|
||||
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
|
||||
|
@ -428,7 +445,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
|
|||
|
||||
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
|
||||
if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
|
||||
(TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
|
||||
(TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
|
||||
index = i;
|
||||
break;
|
||||
}
|
||||
|
@ -626,7 +643,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
|
|||
{
|
||||
unsigned long asid = asid_cache(cpu);
|
||||
|
||||
if (!(ASID_MASK(ASID_INC(asid)))) {
|
||||
if (!((asid += ASID_INC) & ASID_MASK)) {
|
||||
if (cpu_has_vtag_icache) {
|
||||
flush_icache_all();
|
||||
}
|
||||
|
@ -804,7 +821,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
if (!newasid) {
|
||||
/* If we preempted while the guest was executing, then reload the pre-empted ASID */
|
||||
if (current->flags & PF_VCPU) {
|
||||
write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
preempt_entryhi & ASID_MASK);
|
||||
ehb();
|
||||
}
|
||||
} else {
|
||||
|
@ -816,11 +834,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu))
|
||||
write_c0_entryhi(ASID_MASK(vcpu->arch.
|
||||
guest_kernel_asid[cpu]));
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_kernel_asid[cpu] &
|
||||
ASID_MASK);
|
||||
else
|
||||
write_c0_entryhi(ASID_MASK(vcpu->arch.
|
||||
guest_user_asid[cpu]));
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_user_asid[cpu] &
|
||||
ASID_MASK);
|
||||
ehb();
|
||||
}
|
||||
}
|
||||
|
@ -879,7 +899,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
|
|||
kvm_mips_guest_tlb_lookup(vcpu,
|
||||
((unsigned long) opc & VPN2_MASK)
|
||||
|
|
||||
ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
|
||||
(kvm_read_c0_guest_entryhi
|
||||
(cop0) & ASID_MASK));
|
||||
if (index < 0) {
|
||||
kvm_err
|
||||
("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
|
||||
|
|
|
@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Failed to get resource\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* remap gptu register range */
|
||||
gptu_membase = devm_ioremap_resource(&pdev->dev, res);
|
||||
|
@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev)
|
|||
if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
|
||||
dev_err(&pdev->dev, "Failed to find magic\n");
|
||||
gptu_hwexit();
|
||||
clk_disable(clk);
|
||||
clk_put(clk);
|
||||
return -ENAVAIL;
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbdebug.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static inline const char *msk2str(unsigned int mask)
|
||||
{
|
||||
|
@ -56,7 +55,7 @@ static void dump_tlb(int first, int last)
|
|||
s_pagemask = read_c0_pagemask();
|
||||
s_entryhi = read_c0_entryhi();
|
||||
s_index = read_c0_index();
|
||||
asid = ASID_MASK(s_entryhi);
|
||||
asid = s_entryhi & 0xff;
|
||||
|
||||
for (i = first; i <= last; i++) {
|
||||
write_c0_index(i);
|
||||
|
@ -86,7 +85,7 @@ static void dump_tlb(int first, int last)
|
|||
|
||||
printk("va=%0*lx asid=%02lx\n",
|
||||
width, (entryhi & ~0x1fffUL),
|
||||
ASID_MASK(entryhi));
|
||||
entryhi & 0xff);
|
||||
printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
|
||||
width,
|
||||
(entrylo0 << 6) & PAGE_MASK, c0,
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbdebug.h>
|
||||
|
@ -22,7 +21,7 @@ static void dump_tlb(int first, int last)
|
|||
unsigned int asid;
|
||||
unsigned long entryhi, entrylo0;
|
||||
|
||||
asid = ASID_MASK(read_c0_entryhi());
|
||||
asid = read_c0_entryhi() & 0xfc0;
|
||||
|
||||
for (i = first; i <= last; i++) {
|
||||
write_c0_index(i<<8);
|
||||
|
@ -36,7 +35,7 @@ static void dump_tlb(int first, int last)
|
|||
|
||||
/* Unused entries have a virtual address of KSEG0. */
|
||||
if ((entryhi & 0xffffe000) != 0x80000000
|
||||
&& (ASID_MASK(entryhi) == asid)) {
|
||||
&& (entryhi & 0xfc0) == asid) {
|
||||
/*
|
||||
* Only print entries in use
|
||||
*/
|
||||
|
@ -45,7 +44,7 @@ static void dump_tlb(int first, int last)
|
|||
printk("va=%08lx asid=%08lx"
|
||||
" [pa=%06lx n=%d d=%d v=%d g=%d]",
|
||||
(entryhi & 0xffffe000),
|
||||
ASID_MASK(entryhi),
|
||||
entryhi & 0xfc0,
|
||||
entrylo0 & PAGE_MASK,
|
||||
(entrylo0 & (1 << 11)) ? 1 : 0,
|
||||
(entrylo0 & (1 << 10)) ? 1 : 0,
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
|
||||
#include <asm/idle.h>
|
||||
#include <asm/reboot.h>
|
||||
|
||||
#include <loongson.h>
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <linux/io.h>
|
||||
#include <linux/pm.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/reboot.h>
|
||||
|
||||
#include <loongson1.h>
|
||||
|
|
|
@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
|
|||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = ASID_MASK(read_c0_entryhi());
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entrylo0(0);
|
||||
entry = r3k_have_wired_reg ? read_c0_wired() : 8;
|
||||
for (; entry < current_cpu_data.tlbsize; entry++) {
|
||||
|
@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
|
||||
ASID_MASK(cpu_context(cpu, mm)), start, end);
|
||||
cpu_context(cpu, mm) & ASID_MASK, start, end);
|
||||
#endif
|
||||
local_irq_save(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
if (size <= current_cpu_data.tlbsize) {
|
||||
int oldpid = ASID_MASK(read_c0_entryhi());
|
||||
int newpid = ASID_MASK(cpu_context(cpu, mm));
|
||||
int oldpid = read_c0_entryhi() & ASID_MASK;
|
||||
int newpid = cpu_context(cpu, mm) & ASID_MASK;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
end += PAGE_SIZE - 1;
|
||||
|
@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|||
#ifdef DEBUG_TLB
|
||||
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
|
||||
#endif
|
||||
newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
|
||||
newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
|
||||
page &= PAGE_MASK;
|
||||
local_irq_save(flags);
|
||||
oldpid = ASID_MASK(read_c0_entryhi());
|
||||
oldpid = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entryhi(page | newpid);
|
||||
BARRIER;
|
||||
tlb_probe();
|
||||
|
@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
|||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
pid = ASID_MASK(read_c0_entryhi());
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
|
||||
if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
|
||||
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
|
||||
(cpu_context(cpu, vma->vm_mm)), pid);
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = ASID_MASK(read_c0_entryhi());
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
old_pagemask = read_c0_pagemask();
|
||||
w = read_c0_wired();
|
||||
write_c0_wired(w + 1);
|
||||
|
@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = ASID_MASK(read_c0_entryhi());
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_index(wired);
|
||||
|
|
|
@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
|||
|
||||
ENTER_CRITICAL(flags);
|
||||
|
||||
pid = ASID_MASK(read_c0_entryhi());
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
address &= (PAGE_MASK << 1);
|
||||
write_c0_entryhi(address | pid);
|
||||
pgdp = pgd_offset(vma->vm_mm, address);
|
||||
|
|
|
@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
|||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
pid = ASID_MASK(read_c0_entryhi());
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
|
||||
local_irq_save(flags);
|
||||
address &= PAGE_MASK;
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/war.h>
|
||||
|
@ -306,78 +305,6 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
|
|||
static int check_for_high_segbits __cpuinitdata;
|
||||
#endif
|
||||
|
||||
static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
|
||||
unsigned int i_const)
|
||||
{
|
||||
unsigned int **p;
|
||||
|
||||
for (p = start; p < stop; p++) {
|
||||
#ifndef CONFIG_CPU_MICROMIPS
|
||||
unsigned int *ip;
|
||||
|
||||
ip = *p;
|
||||
*ip = (*ip & 0xffff0000) | i_const;
|
||||
#else
|
||||
unsigned short *ip;
|
||||
|
||||
ip = ((unsigned short *)((unsigned int)*p - 1));
|
||||
if ((*ip & 0xf000) == 0x4000) {
|
||||
*ip &= 0xfff1;
|
||||
*ip |= (i_const << 1);
|
||||
} else if ((*ip & 0xf000) == 0x6000) {
|
||||
*ip &= 0xfff1;
|
||||
*ip |= ((i_const >> 2) << 1);
|
||||
} else {
|
||||
ip++;
|
||||
*ip = i_const;
|
||||
}
|
||||
#endif
|
||||
local_flush_icache_range((unsigned long)ip,
|
||||
(unsigned long)ip + sizeof(*ip));
|
||||
}
|
||||
}
|
||||
|
||||
#define asid_insn_fixup(section, const) \
|
||||
do { \
|
||||
extern unsigned int *__start_ ## section; \
|
||||
extern unsigned int *__stop_ ## section; \
|
||||
insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* Caller is assumed to flush the caches before the first context switch.
|
||||
*/
|
||||
static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
|
||||
unsigned int version_mask,
|
||||
unsigned int first_version)
|
||||
{
|
||||
extern asmlinkage void handle_ri_rdhwr_vivt(void);
|
||||
unsigned long *vivt_exc;
|
||||
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
/*
|
||||
* Worst case optimised microMIPS addiu instructions support
|
||||
* only a 3-bit immediate value.
|
||||
*/
|
||||
if(inc > 7)
|
||||
panic("Invalid ASID increment value!");
|
||||
#endif
|
||||
asid_insn_fixup(__asid_inc, inc);
|
||||
asid_insn_fixup(__asid_mask, mask);
|
||||
asid_insn_fixup(__asid_version_mask, version_mask);
|
||||
asid_insn_fixup(__asid_first_version, first_version);
|
||||
|
||||
/* Patch up the 'handle_ri_rdhwr_vivt' handler. */
|
||||
vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
|
||||
#endif
|
||||
vivt_exc++;
|
||||
*vivt_exc = (*vivt_exc & ~mask) | mask;
|
||||
|
||||
current_cpu_data.asid_cache = first_version;
|
||||
}
|
||||
|
||||
static int check_for_high_segbits __cpuinitdata;
|
||||
|
||||
static unsigned int kscratch_used_mask __cpuinitdata;
|
||||
|
@ -2256,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(void)
|
|||
case CPU_TX3922:
|
||||
case CPU_TX3927:
|
||||
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||
setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
|
||||
if (cpu_has_local_ebase)
|
||||
build_r3000_tlb_refill_handler();
|
||||
if (!run_once) {
|
||||
|
@ -2282,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(void)
|
|||
break;
|
||||
|
||||
default:
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
|
||||
#else
|
||||
setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
|
||||
#endif
|
||||
if (!run_once) {
|
||||
scratch_reg = allocate_kscratch();
|
||||
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/pm.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include <asm/idle.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/serial_8250.h>
|
||||
#include <linux/pm.h>
|
||||
|
||||
#include <asm/idle.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
|
|
@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c)
|
|||
return 0; /* foo */
|
||||
}
|
||||
|
||||
static inline int str2eaddr(unsigned char *ea, unsigned char *str)
|
||||
int str2eaddr(unsigned char *ea, unsigned char *str)
|
||||
{
|
||||
int index = 0;
|
||||
unsigned char num = 0;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/r4kcache.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/smp-ops.h>
|
||||
|
|
|
@ -55,4 +55,14 @@
|
|||
reg-shift = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
usb@101c0000 {
|
||||
compatible = "ralink,rt3050-usb", "snps,dwc2";
|
||||
reg = <0x101c0000 40000>;
|
||||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <18>;
|
||||
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -43,4 +43,8 @@
|
|||
reg = <0x50000 0x7b0000>;
|
||||
};
|
||||
};
|
||||
|
||||
usb@101c0000 {
|
||||
status = "ok";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/irq.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/r4kcache.h>
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/reboot.h>
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
|
|
|
@ -245,7 +245,7 @@ config SMP
|
|||
|
||||
config IRQSTACKS
|
||||
bool "Use separate kernel stacks when processing interrupts"
|
||||
default n
|
||||
default y
|
||||
help
|
||||
If you say Y here the kernel will use separate kernel stacks
|
||||
for handling hard and soft interrupts. This can help avoid
|
||||
|
|
|
@ -23,23 +23,20 @@ NM = sh $(srctree)/arch/parisc/nm
|
|||
CHECKFLAGS += -D__hppa__=1
|
||||
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
|
||||
|
||||
MACHINE := $(shell uname -m)
|
||||
NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0)
|
||||
|
||||
ifdef CONFIG_64BIT
|
||||
UTS_MACHINE := parisc64
|
||||
CHECKFLAGS += -D__LP64__=1 -m64
|
||||
WIDTH := 64
|
||||
CC_ARCHES = hppa64
|
||||
else # 32-bit
|
||||
WIDTH :=
|
||||
CC_ARCHES = hppa hppa2.0 hppa1.1
|
||||
endif
|
||||
|
||||
# attempt to help out folks who are cross-compiling
|
||||
ifeq ($(NATIVE),1)
|
||||
CROSS_COMPILE := hppa$(WIDTH)-linux-
|
||||
else
|
||||
ifneq ($(SUBARCH),$(UTS_MACHINE))
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CROSS_COMPILE := hppa$(WIDTH)-linux-gnu-
|
||||
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, \
|
||||
$(foreach a,$(CC_ARCHES), \
|
||||
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
|
||||
endif
|
||||
endif
|
||||
|
||||
|
|
|
@ -11,10 +11,18 @@
|
|||
#include <linux/threads.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
#define __ARCH_HAS_DO_SOFTIRQ
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
unsigned int kernel_stack_usage;
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
unsigned int irq_stack_usage;
|
||||
unsigned int irq_stack_counter;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int irq_resched_count;
|
||||
|
@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|||
#define __ARCH_IRQ_STAT
|
||||
#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
|
||||
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
|
||||
#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
|
||||
#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
|
||||
|
||||
#define __ARCH_SET_SOFTIRQ_PENDING
|
||||
|
|
|
@ -63,10 +63,13 @@
|
|||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
|
||||
|
||||
union irq_stack_union {
|
||||
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
|
||||
|
|
|
@ -452,9 +452,41 @@
|
|||
L2_ptep \pgd,\pte,\index,\va,\fault
|
||||
.endm
|
||||
|
||||
/* Acquire pa_dbit_lock lock. */
|
||||
.macro dbit_lock spc,tmp,tmp1
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,\spc,2f
|
||||
load32 PA(pa_dbit_lock),\tmp
|
||||
1: LDCW 0(\tmp),\tmp1
|
||||
cmpib,COND(=) 0,\tmp1,1b
|
||||
nop
|
||||
2:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Release pa_dbit_lock lock without reloading lock address. */
|
||||
.macro dbit_unlock0 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
stw \spc,0(\tmp)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Release pa_dbit_lock lock. */
|
||||
.macro dbit_unlock1 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
load32 PA(pa_dbit_lock),\tmp
|
||||
dbit_unlock0 \spc,\tmp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
|
||||
* don't needlessly dirty the cache line if it was already set */
|
||||
.macro update_ptep ptep,pte,tmp,tmp1
|
||||
.macro update_ptep spc,ptep,pte,tmp,tmp1
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
LDREG 0(\ptep),\pte
|
||||
#endif
|
||||
ldi _PAGE_ACCESSED,\tmp1
|
||||
or \tmp1,\pte,\tmp
|
||||
and,COND(<>) \tmp1,\pte,%r0
|
||||
|
@ -463,7 +495,11 @@
|
|||
|
||||
/* Set the dirty bit (and accessed bit). No need to be
|
||||
* clever, this is only used from the dirty fault */
|
||||
.macro update_dirty ptep,pte,tmp
|
||||
.macro update_dirty spc,ptep,pte,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
LDREG 0(\ptep),\pte
|
||||
#endif
|
||||
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
|
||||
or \tmp,\pte,\pte
|
||||
STREG \pte,0(\ptep)
|
||||
|
@ -1111,11 +1147,13 @@ dtlb_miss_20w:
|
|||
|
||||
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1135,11 +1173,13 @@ nadtlb_miss_20w:
|
|||
|
||||
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1161,7 +1201,8 @@ dtlb_miss_11:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
|
@ -1172,6 +1213,7 @@ dtlb_miss_11:
|
|||
idtlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1192,7 +1234,8 @@ nadtlb_miss_11:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
|
@ -1204,6 +1247,7 @@ nadtlb_miss_11:
|
|||
idtlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1224,13 +1268,15 @@ dtlb_miss_20:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1250,13 +1296,15 @@ nadtlb_miss_20:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
idtlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1357,11 +1405,13 @@ itlb_miss_20w:
|
|||
|
||||
L3_ptep ptp,pte,t0,va,itlb_fault
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1379,11 +1429,13 @@ naitlb_miss_20w:
|
|||
|
||||
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1405,7 +1457,8 @@ itlb_miss_11:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,itlb_fault
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
|
@ -1416,6 +1469,7 @@ itlb_miss_11:
|
|||
iitlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1427,7 +1481,8 @@ naitlb_miss_11:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
|
@ -1438,6 +1493,7 @@ naitlb_miss_11:
|
|||
iitlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t0, %sr1 /* Restore sr1 */
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1459,13 +1515,15 @@ itlb_miss_20:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,itlb_fault
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1477,13 +1535,15 @@ naitlb_miss_20:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
|
||||
|
||||
update_ptep ptp,pte,t0,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_ptep spc,ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t0
|
||||
|
||||
iitlbt pte,prot
|
||||
dbit_unlock1 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1507,29 +1567,13 @@ dbit_trap_20w:
|
|||
|
||||
L3_ptep ptp,pte,t0,va,dbit_fault
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nolock_20w
|
||||
load32 PA(pa_dbit_lock),t0
|
||||
|
||||
dbit_spin_20w:
|
||||
LDCW 0(t0),t1
|
||||
cmpib,COND(=) 0,t1,dbit_spin_20w
|
||||
nop
|
||||
|
||||
dbit_nolock_20w:
|
||||
#endif
|
||||
update_dirty ptp,pte,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_dirty spc,ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
idtlbt pte,prot
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nounlock_20w
|
||||
ldi 1,t1
|
||||
stw t1,0(t0)
|
||||
|
||||
dbit_nounlock_20w:
|
||||
#endif
|
||||
dbit_unlock0 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1543,18 +1587,8 @@ dbit_trap_11:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,dbit_fault
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nolock_11
|
||||
load32 PA(pa_dbit_lock),t0
|
||||
|
||||
dbit_spin_11:
|
||||
LDCW 0(t0),t1
|
||||
cmpib,= 0,t1,dbit_spin_11
|
||||
nop
|
||||
|
||||
dbit_nolock_11:
|
||||
#endif
|
||||
update_dirty ptp,pte,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_dirty spc,ptp,pte,t1
|
||||
|
||||
make_insert_tlb_11 spc,pte,prot
|
||||
|
||||
|
@ -1565,13 +1599,7 @@ dbit_nolock_11:
|
|||
idtlbp prot,(%sr1,va)
|
||||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nounlock_11
|
||||
ldi 1,t1
|
||||
stw t1,0(t0)
|
||||
|
||||
dbit_nounlock_11:
|
||||
#endif
|
||||
dbit_unlock0 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
@ -1583,32 +1611,15 @@ dbit_trap_20:
|
|||
|
||||
L2_ptep ptp,pte,t0,va,dbit_fault
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nolock_20
|
||||
load32 PA(pa_dbit_lock),t0
|
||||
|
||||
dbit_spin_20:
|
||||
LDCW 0(t0),t1
|
||||
cmpib,= 0,t1,dbit_spin_20
|
||||
nop
|
||||
|
||||
dbit_nolock_20:
|
||||
#endif
|
||||
update_dirty ptp,pte,t1
|
||||
dbit_lock spc,t0,t1
|
||||
update_dirty spc,ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,spc,dbit_nounlock_20
|
||||
ldi 1,t1
|
||||
stw t1,0(t0)
|
||||
|
||||
dbit_nounlock_20:
|
||||
#endif
|
||||
dbit_unlock0 spc,t0
|
||||
|
||||
rfir
|
||||
nop
|
||||
|
|
|
@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
|||
seq_printf(p, "%*s: ", prec, "STK");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
|
||||
seq_printf(p, " Kernel stack usage\n");
|
||||
seq_puts(p, " Kernel stack usage\n");
|
||||
# ifdef CONFIG_IRQSTACKS
|
||||
seq_printf(p, "%*s: ", prec, "IST");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
|
||||
seq_puts(p, " Interrupt stack usage\n");
|
||||
seq_printf(p, "%*s: ", prec, "ISC");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
|
||||
seq_puts(p, " Interrupt stack usage counter\n");
|
||||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(p, "%*s: ", prec, "RES");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
||||
seq_printf(p, " Rescheduling interrupts\n");
|
||||
seq_puts(p, " Rescheduling interrupts\n");
|
||||
seq_printf(p, "%*s: ", prec, "CAL");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
|
||||
seq_printf(p, " Function call interrupts\n");
|
||||
seq_puts(p, " Function call interrupts\n");
|
||||
#endif
|
||||
seq_printf(p, "%*s: ", prec, "TLB");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
||||
seq_printf(p, " TLB shootdowns\n");
|
||||
seq_puts(p, " TLB shootdowns\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|||
unsigned long sp = regs->gr[30];
|
||||
unsigned long stack_usage;
|
||||
unsigned int *last_usage;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* if sr7 != 0, we interrupted a userspace process which we do not want
|
||||
* to check for stack overflow. We will only check the kernel stack. */
|
||||
|
@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|||
|
||||
/* calculate kernel stack usage */
|
||||
stack_usage = sp - stack_start;
|
||||
last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
if (likely(stack_usage <= THREAD_SIZE))
|
||||
goto check_kernel_stack; /* found kernel stack */
|
||||
|
||||
/* check irq stack usage */
|
||||
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
|
||||
stack_usage = sp - stack_start;
|
||||
|
||||
last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
|
||||
if (unlikely(stack_usage > *last_usage))
|
||||
*last_usage = stack_usage;
|
||||
|
||||
if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
|
||||
return;
|
||||
|
||||
pr_emerg("stackcheck: %s will most likely overflow irq stack "
|
||||
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
|
||||
current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
|
||||
goto panic_check;
|
||||
|
||||
check_kernel_stack:
|
||||
#endif
|
||||
|
||||
/* check kernel stack usage */
|
||||
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
|
||||
|
||||
if (unlikely(stack_usage > *last_usage))
|
||||
*last_usage = stack_usage;
|
||||
|
@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|||
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
|
||||
current->comm, sp, stack_start, stack_start + THREAD_SIZE);
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
panic_check:
|
||||
#endif
|
||||
if (sysctl_panic_on_stackoverflow)
|
||||
panic("low stack detected by irq handler - check messages\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
|
||||
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
|
||||
};
|
||||
|
||||
static void execute_on_irq_stack(void *func, unsigned long param1)
|
||||
{
|
||||
unsigned long *irq_stack_start;
|
||||
union irq_stack_union *union_ptr;
|
||||
unsigned long irq_stack;
|
||||
int cpu = smp_processor_id();
|
||||
raw_spinlock_t *irq_stack_in_use;
|
||||
|
||||
irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
|
||||
irq_stack = (unsigned long) irq_stack_start;
|
||||
irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
|
||||
union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
|
||||
irq_stack = (unsigned long) &union_ptr->stack;
|
||||
irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
|
||||
64); /* align for stack frame usage */
|
||||
|
||||
BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
|
||||
*irq_stack_start = 1;
|
||||
/* We may be called recursive. If we are already using the irq stack,
|
||||
* just continue to use it. Use spinlocks to serialize
|
||||
* the irq stack usage.
|
||||
*/
|
||||
irq_stack_in_use = &union_ptr->lock;
|
||||
if (!raw_spin_trylock(irq_stack_in_use)) {
|
||||
void (*direct_call)(unsigned long p1) = func;
|
||||
|
||||
/* We are using the IRQ stack already.
|
||||
* Do direct call on current stack. */
|
||||
direct_call(param1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This is where we switch to the IRQ stack. */
|
||||
call_on_stack(param1, func, irq_stack);
|
||||
|
||||
*irq_stack_start = 0;
|
||||
__inc_irq_stat(irq_stack_counter);
|
||||
|
||||
/* free up irq stack usage. */
|
||||
do_raw_spin_unlock(irq_stack_in_use);
|
||||
}
|
||||
|
||||
asmlinkage void do_softirq(void)
|
||||
{
|
||||
__u32 pending;
|
||||
unsigned long flags;
|
||||
|
||||
if (in_interrupt())
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
pending = local_softirq_pending();
|
||||
|
||||
if (pending)
|
||||
execute_on_irq_stack(__do_softirq, 0);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_IRQSTACKS */
|
||||
|
||||
|
|
|
@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
|
|||
{
|
||||
int do_recycle;
|
||||
|
||||
inc_irq_stat(irq_tlb_count);
|
||||
__inc_irq_stat(irq_tlb_count);
|
||||
do_recycle = 0;
|
||||
spin_lock(&sid_lock);
|
||||
if (dirty_space_ids > RECYCLE_THRESHOLD) {
|
||||
|
@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
|
|||
#else
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
inc_irq_stat(irq_tlb_count);
|
||||
__inc_irq_stat(irq_tlb_count);
|
||||
spin_lock(&sid_lock);
|
||||
flush_tlb_all_local(NULL);
|
||||
recycle_sids();
|
||||
|
|
|
@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
|
|||
Select this to enable early debugging for the PowerNV platform
|
||||
using an "hvsi" console
|
||||
|
||||
config PPC_EARLY_DEBUG_MEMCONS
|
||||
bool "In memory console"
|
||||
help
|
||||
Select this to enable early debugging using an in memory console.
|
||||
This console provides input and output buffers stored within the
|
||||
kernel BSS and should be safe to select on any system. A debugger
|
||||
can then be used to read kernel output or send input to the console.
|
||||
endchoice
|
||||
|
||||
config PPC_MEMCONS_OUTPUT_SIZE
|
||||
int "In memory console output buffer size"
|
||||
depends on PPC_EARLY_DEBUG_MEMCONS
|
||||
default 4096
|
||||
help
|
||||
Selects the size of the output buffer (in bytes) of the in memory
|
||||
console.
|
||||
|
||||
config PPC_MEMCONS_INPUT_SIZE
|
||||
int "In memory console input buffer size"
|
||||
depends on PPC_EARLY_DEBUG_MEMCONS
|
||||
default 128
|
||||
help
|
||||
Selects the size of the input buffer (in bytes) of the in memory
|
||||
console.
|
||||
|
||||
config PPC_EARLY_DEBUG_OPAL
|
||||
def_bool y
|
||||
depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
|
||||
|
|
|
@ -136,7 +136,6 @@ CONFIG_HID_SMARTJOYPLUS=m
|
|||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB=m
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_SUSPEND=y
|
||||
CONFIG_USB_MON=m
|
||||
CONFIG_USB_EHCI_HCD=m
|
||||
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
|
||||
#define _ASM_POWERPC_CONTEXT_TRACKING_H
|
||||
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
#define SCHEDULE_USER bl .schedule_user
|
||||
#else
|
||||
#define SCHEDULE_USER bl .schedule
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -52,6 +52,7 @@
|
|||
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
|
||||
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
|
||||
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
|
||||
#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -69,7 +70,8 @@ enum {
|
|||
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
|
||||
FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
|
||||
FW_FEATURE_PSERIES_ALWAYS = 0,
|
||||
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
|
||||
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
|
||||
FW_FEATURE_OPALv3,
|
||||
FW_FEATURE_POWERNV_ALWAYS = 0,
|
||||
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
|
||||
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
|
||||
|
|
|
@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void)
|
|||
#endif
|
||||
|
||||
#define hard_irq_disable() do { \
|
||||
u8 _was_enabled = get_paca()->soft_enabled; \
|
||||
__hard_irq_disable(); \
|
||||
if (local_paca->soft_enabled) \
|
||||
trace_hardirqs_off(); \
|
||||
get_paca()->soft_enabled = 0; \
|
||||
get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
|
||||
if (_was_enabled) \
|
||||
trace_hardirqs_off(); \
|
||||
} while(0)
|
||||
|
||||
static inline bool lazy_irq_pending(void)
|
||||
|
|
|
@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType {
|
|||
|
||||
enum OpalThreadStatus {
|
||||
OPAL_THREAD_INACTIVE = 0x0,
|
||||
OPAL_THREAD_STARTED = 0x1
|
||||
OPAL_THREAD_STARTED = 0x1,
|
||||
OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
|
||||
};
|
||||
|
||||
enum OpalPciBusCompare {
|
||||
|
@ -563,6 +564,8 @@ extern void opal_nvram_init(void);
|
|||
|
||||
extern int opal_machine_check(struct pt_regs *regs);
|
||||
|
||||
extern void opal_shutdown(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __OPAL_H */
|
||||
|
|
|
@ -174,6 +174,8 @@ struct pci_dn {
|
|||
/* Get the pointer to a device_node's pci_dn */
|
||||
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
|
||||
|
||||
extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
|
||||
|
||||
extern void * update_dn_pci_info(struct device_node *dn, void *data);
|
||||
|
||||
static inline int pci_device_from_OF_node(struct device_node *np,
|
||||
|
|
|
@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
|
||||
static inline pgtable_t pmd_pgtable(pmd_t pmd)
|
||||
{
|
||||
return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE);
|
||||
return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче