Merge 5.10-rc4 into here.
We need the USB/Thunderbolt fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Коммит
0fb2c41f99
5
.mailmap
5
.mailmap
|
@ -82,7 +82,10 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
|
|||
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
|
||||
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
|
||||
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
|
||||
Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_eremin@mentor.com>
|
||||
Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
|
||||
Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
|
||||
Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
|
||||
|
|
|
@ -57,7 +57,7 @@ examples:
|
|||
};
|
||||
|
||||
can@53fc8000 {
|
||||
compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
|
||||
compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
|
||||
reg = <0x53fc8000 0x4000>;
|
||||
interrupts = <82>;
|
||||
clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, <&clks IMX5_CLK_CAN1_SERIAL_GATE>;
|
||||
|
|
|
@ -20,14 +20,17 @@ properties:
|
|||
- fsl,imx8qm-flexcan
|
||||
- fsl,imx8mp-flexcan
|
||||
- fsl,imx6q-flexcan
|
||||
- fsl,imx53-flexcan
|
||||
- fsl,imx35-flexcan
|
||||
- fsl,imx28-flexcan
|
||||
- fsl,imx25-flexcan
|
||||
- fsl,p1010-flexcan
|
||||
- fsl,vf610-flexcan
|
||||
- fsl,ls1021ar2-flexcan
|
||||
- fsl,lx2160ar1-flexcan
|
||||
- items:
|
||||
- enum:
|
||||
- fsl,imx53-flexcan
|
||||
- fsl,imx35-flexcan
|
||||
- const: fsl,imx25-flexcan
|
||||
- items:
|
||||
- enum:
|
||||
- fsl,imx7d-flexcan
|
||||
|
@ -80,6 +83,7 @@ properties:
|
|||
req_gpr is the gpr register offset of CAN stop request.
|
||||
req_bit is the bit offset of CAN stop request.
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
items:
|
||||
items:
|
||||
- description: The 'gpr' is the phandle to general purpose register node.
|
||||
- description: The 'req_gpr' is the gpr register offset of CAN stop request.
|
||||
|
|
|
@ -256,6 +256,10 @@ which is 1024 bytes long:
|
|||
- s\_padding2
|
||||
-
|
||||
* - 0x54
|
||||
- \_\_be32
|
||||
- s\_num\_fc\_blocks
|
||||
- Number of fast commit blocks in the journal.
|
||||
* - 0x58
|
||||
- \_\_u32
|
||||
- s\_padding[42]
|
||||
-
|
||||
|
@ -310,6 +314,8 @@ The journal incompat features are any combination of the following:
|
|||
- This journal uses v3 of the checksum on-disk format. This is the same as
|
||||
v2, but the journal block tag size is fixed regardless of the size of
|
||||
block numbers. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3)
|
||||
* - 0x20
|
||||
- Journal has fast commit blocks. (JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT)
|
||||
|
||||
.. _jbd2_checksum_type:
|
||||
|
||||
|
|
|
@ -596,6 +596,13 @@ following:
|
|||
- Sparse Super Block, v2. If this flag is set, the SB field s\_backup\_bgs
|
||||
points to the two block groups that contain backup superblocks
|
||||
(COMPAT\_SPARSE\_SUPER2).
|
||||
* - 0x400
|
||||
- Fast commits supported. Although fast commits blocks are
|
||||
backward incompatible, fast commit blocks are not always
|
||||
present in the journal. If fast commit blocks are present in
|
||||
the journal, JBD2 incompat feature
|
||||
(JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT) gets
|
||||
set (COMPAT\_FAST\_COMMIT).
|
||||
|
||||
.. _super_incompat:
|
||||
|
||||
|
|
|
@ -136,10 +136,8 @@ Fast commits
|
|||
~~~~~~~~~~~~
|
||||
|
||||
JBD2 to also allows you to perform file-system specific delta commits known as
|
||||
fast commits. In order to use fast commits, you first need to call
|
||||
:c:func:`jbd2_fc_init` and tell how many blocks at the end of journal
|
||||
area should be reserved for fast commits. Along with that, you will also need
|
||||
to set following callbacks that perform correspodning work:
|
||||
fast commits. In order to use fast commits, you will need to set following
|
||||
callbacks that perform correspodning work:
|
||||
|
||||
`journal->j_fc_cleanup_cb`: Cleanup function called after every full commit and
|
||||
fast commit.
|
||||
|
|
|
@ -19,9 +19,9 @@ report the "current" state of the lid as either "opened" or "closed".
|
|||
|
||||
For most platforms, both the _LID method and the lid notifications are
|
||||
reliable. However, there are exceptions. In order to work with these
|
||||
exceptional buggy platforms, special restrictions and expections should be
|
||||
exceptional buggy platforms, special restrictions and exceptions should be
|
||||
taken into account. This document describes the restrictions and the
|
||||
expections of the Linux ACPI lid device driver.
|
||||
exceptions of the Linux ACPI lid device driver.
|
||||
|
||||
|
||||
Restrictions of the returning value of the _LID control method
|
||||
|
@ -46,7 +46,7 @@ state is changed to "closed". The "closed" notification is normally used to
|
|||
trigger some system power saving operations on Windows. Since it is fully
|
||||
tested, it is reliable from all AML tables.
|
||||
|
||||
Expections for the userspace users of the ACPI lid device driver
|
||||
Exceptions for the userspace users of the ACPI lid device driver
|
||||
================================================================
|
||||
|
||||
The ACPI button driver exports the lid state to the userspace via the
|
||||
|
@ -100,7 +100,7 @@ use the following kernel parameter:
|
|||
C. button.lid_init_state=ignore:
|
||||
When this option is specified, the ACPI button driver never reports the
|
||||
initial lid state and there is a compensation mechanism implemented to
|
||||
ensure that the reliable "closed" notifications can always be delievered
|
||||
ensure that the reliable "closed" notifications can always be delivered
|
||||
to the userspace by always pairing "closed" input events with complement
|
||||
"opened" input events. But there is still no guarantee that the "opened"
|
||||
notifications can be delivered to the userspace when the lid is actually
|
||||
|
|
|
@ -20,9 +20,9 @@ index, like the ASL example below shows::
|
|||
|
||||
Name (_CRS, ResourceTemplate ()
|
||||
{
|
||||
GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionInputOnly,
|
||||
GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionOutputOnly,
|
||||
"\\_SB.GPO0", 0, ResourceConsumer) {15}
|
||||
GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionInputOnly,
|
||||
GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionOutputOnly,
|
||||
"\\_SB.GPO0", 0, ResourceConsumer) {27, 31}
|
||||
})
|
||||
|
||||
|
@ -49,15 +49,41 @@ index
|
|||
pin
|
||||
Pin in the GpioIo()/GpioInt() resource. Typically this is zero.
|
||||
active_low
|
||||
If 1 the GPIO is marked as active_low.
|
||||
If 1, the GPIO is marked as active_low.
|
||||
|
||||
Since ACPI GpioIo() resource does not have a field saying whether it is
|
||||
active low or high, the "active_low" argument can be used here. Setting
|
||||
it to 1 marks the GPIO as active low.
|
||||
|
||||
Note, active_low in _DSD does not make sense for GpioInt() resource and
|
||||
must be 0. GpioInt() resource has its own means of defining it.
|
||||
|
||||
In our Bluetooth example the "reset-gpios" refers to the second GpioIo()
|
||||
resource, second pin in that resource with the GPIO number of 31.
|
||||
|
||||
The GpioIo() resource unfortunately doesn't explicitly provide an initial
|
||||
state of the output pin which driver should use during its initialization.
|
||||
|
||||
Linux tries to use common sense here and derives the state from the bias
|
||||
and polarity settings. The table below shows the expectations:
|
||||
|
||||
========= ============= ==============
|
||||
Pull Bias Polarity Requested...
|
||||
========= ============= ==============
|
||||
Implicit x AS IS (assumed firmware configured for us)
|
||||
Explicit x (no _DSD) as Pull Bias (Up == High, Down == Low),
|
||||
assuming non-active (Polarity = !Pull Bias)
|
||||
Down Low as low, assuming active
|
||||
Down High as low, assuming non-active
|
||||
Up Low as high, assuming non-active
|
||||
Up High as high, assuming active
|
||||
========= ============= ==============
|
||||
|
||||
That said, for our above example the both GPIOs, since the bias setting
|
||||
is explicit and _DSD is present, will be treated as active with a high
|
||||
polarity and Linux will configure the pins in this state until a driver
|
||||
reprograms them differently.
|
||||
|
||||
It is possible to leave holes in the array of GPIOs. This is useful in
|
||||
cases like with SPI host controllers where some chip selects may be
|
||||
implemented as GPIOs and some as native signals. For example a SPI host
|
||||
|
@ -112,8 +138,8 @@ Example::
|
|||
Package () {
|
||||
"gpio-line-names",
|
||||
Package () {
|
||||
"SPI0_CS_N", "EXP2_INT", "MUX6_IO", "UART0_RXD", "MUX7_IO",
|
||||
"LVL_C_A1", "MUX0_IO", "SPI1_MISO"
|
||||
"SPI0_CS_N", "EXP2_INT", "MUX6_IO", "UART0_RXD",
|
||||
"MUX7_IO", "LVL_C_A1", "MUX0_IO", "SPI1_MISO",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,7 +163,7 @@ to the GPIO lines it is going to use and provide the GPIO subsystem with a
|
|||
mapping between those names and the ACPI GPIO resources corresponding to them.
|
||||
|
||||
To do that, the driver needs to define a mapping table as a NULL-terminated
|
||||
array of struct acpi_gpio_mapping objects that each contain a name, a pointer
|
||||
array of struct acpi_gpio_mapping objects that each contains a name, a pointer
|
||||
to an array of line data (struct acpi_gpio_params) objects and the size of that
|
||||
array. Each struct acpi_gpio_params object consists of three fields,
|
||||
crs_entry_index, line_index, active_low, representing the index of the target
|
||||
|
@ -154,13 +180,14 @@ question would look like this::
|
|||
static const struct acpi_gpio_mapping bluetooth_acpi_gpios[] = {
|
||||
{ "reset-gpios", &reset_gpio, 1 },
|
||||
{ "shutdown-gpios", &shutdown_gpio, 1 },
|
||||
{ },
|
||||
{ }
|
||||
};
|
||||
|
||||
Next, the mapping table needs to be passed as the second argument to
|
||||
acpi_dev_add_driver_gpios() that will register it with the ACPI device object
|
||||
pointed to by its first argument. That should be done in the driver's .probe()
|
||||
routine. On removal, the driver should unregister its GPIO mapping table by
|
||||
acpi_dev_add_driver_gpios() or its managed analogue that will
|
||||
register it with the ACPI device object pointed to by its first
|
||||
argument. That should be done in the driver's .probe() routine.
|
||||
On removal, the driver should unregister its GPIO mapping table by
|
||||
calling acpi_dev_remove_driver_gpios() on the ACPI device object where that
|
||||
table was previously registered.
|
||||
|
||||
|
@ -191,12 +218,12 @@ The driver might expect to get the right GPIO when it does::
|
|||
but since there is no way to know the mapping between "reset" and
|
||||
the GpioIo() in _CRS desc will hold ERR_PTR(-ENOENT).
|
||||
|
||||
The driver author can solve this by passing the mapping explictly
|
||||
(the recommended way and documented in the above chapter).
|
||||
The driver author can solve this by passing the mapping explicitly
|
||||
(this is the recommended way and it's documented in the above chapter).
|
||||
|
||||
The ACPI GPIO mapping tables should not contaminate drivers that are not
|
||||
knowing about which exact device they are servicing on. It implies that
|
||||
the ACPI GPIO mapping tables are hardly linked to ACPI ID and certain
|
||||
the ACPI GPIO mapping tables are hardly linked to an ACPI ID and certain
|
||||
objects, as listed in the above chapter, of the device in question.
|
||||
|
||||
Getting GPIO descriptor
|
||||
|
@ -229,5 +256,5 @@ Case 2 explicitly tells GPIO core to look for resources in _CRS.
|
|||
Be aware that gpiod_get_index() in cases 1 and 2, assuming that there
|
||||
are two versions of ACPI device description provided and no mapping is
|
||||
present in the driver, will return different resources. That's why a
|
||||
certain driver has to handle them carefully as explained in previous
|
||||
certain driver has to handle them carefully as explained in the previous
|
||||
chapter.
|
||||
|
|
|
@ -98,7 +98,7 @@ subject to change::
|
|||
[ 0.188903] exdebug-0398 ex_trace_point : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
|
||||
|
||||
Developers can utilize these special log entries to track the AML
|
||||
interpretion, thus can aid issue debugging and performance tuning. Note
|
||||
interpretation, thus can aid issue debugging and performance tuning. Note
|
||||
that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
|
||||
macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
|
||||
"AML tracer" logs.
|
||||
|
|
|
@ -110,7 +110,7 @@ Q: I sent a patch and I'm wondering what happened to it?
|
|||
Q: How can I tell whether it got merged?
|
||||
A: Start by looking at the main patchworks queue for netdev:
|
||||
|
||||
http://patchwork.ozlabs.org/project/netdev/list/
|
||||
https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
|
||||
The "State" field will tell you exactly where things are at with your
|
||||
patch.
|
||||
|
@ -152,7 +152,7 @@ networking subsystem, and then hands them off to Greg.
|
|||
|
||||
There is a patchworks queue that you can see here:
|
||||
|
||||
http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
|
||||
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
|
||||
|
||||
It contains the patches which Dave has selected, but not yet handed off
|
||||
to Greg. If Greg already has the patch, then it will be here:
|
||||
|
|
|
@ -247,8 +247,8 @@ Some of the interface modes are described below:
|
|||
speeds (see below.)
|
||||
|
||||
``PHY_INTERFACE_MODE_2500BASEX``
|
||||
This defines a variant of 1000BASE-X which is clocked 2.5 times faster,
|
||||
than the 802.3 standard giving a fixed bit rate of 3.125Gbaud.
|
||||
This defines a variant of 1000BASE-X which is clocked 2.5 times as fast
|
||||
as the 802.3 standard, giving a fixed bit rate of 3.125Gbaud.
|
||||
|
||||
``PHY_INTERFACE_MODE_SGMII``
|
||||
This is used for Cisco SGMII, which is a modification of 1000BASE-X
|
||||
|
|
|
@ -39,7 +39,7 @@ Procedure for submitting patches to the -stable tree
|
|||
submission guidelines as described in
|
||||
:ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
|
||||
after first checking the stable networking queue at
|
||||
https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
|
||||
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
|
||||
to ensure the requested patch is not already queued up.
|
||||
- Security patches should not be handled (solely) by the -stable review
|
||||
process but should follow the procedures in
|
||||
|
|
|
@ -46,7 +46,7 @@ Procedura per sottomettere patch per i sorgenti -stable
|
|||
:ref:`Documentation/translations/it_IT/networking/netdev-FAQ.rst <it_netdev-FAQ>`;
|
||||
ma solo dopo aver verificato al seguente indirizzo che la patch non sia
|
||||
già in coda:
|
||||
https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
|
||||
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
|
||||
- Una patch di sicurezza non dovrebbero essere gestite (solamente) dal processo
|
||||
di revisione -stable, ma dovrebbe seguire le procedure descritte in
|
||||
:ref:`Documentation/translations/it_IT/admin-guide/security-bugs.rst <it_securitybugs>`.
|
||||
|
|
|
@ -6367,7 +6367,7 @@ accesses that would usually trigger a #GP by KVM into the guest will
|
|||
instead get bounced to user space through the KVM_EXIT_X86_RDMSR and
|
||||
KVM_EXIT_X86_WRMSR exit notifications.
|
||||
|
||||
8.25 KVM_X86_SET_MSR_FILTER
|
||||
8.27 KVM_X86_SET_MSR_FILTER
|
||||
---------------------------
|
||||
|
||||
:Architectures: x86
|
||||
|
@ -6381,8 +6381,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
|
|||
trap and emulate MSRs that are outside of the scope of KVM as well as
|
||||
limit the attack surface on KVM's MSR emulation code.
|
||||
|
||||
|
||||
8.26 KVM_CAP_ENFORCE_PV_CPUID
|
||||
8.28 KVM_CAP_ENFORCE_PV_CPUID
|
||||
-----------------------------
|
||||
|
||||
Architectures: x86
|
||||
|
|
36
MAINTAINERS
36
MAINTAINERS
|
@ -1279,7 +1279,7 @@ M: Igor Russkikh <irusskikh@marvell.com>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://www.marvell.com/
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst
|
||||
F: drivers/net/ethernet/aquantia/atlantic/
|
||||
|
||||
|
@ -6622,6 +6622,7 @@ Q: http://patchwork.ozlabs.org/project/linux-ext4/list/
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git
|
||||
F: Documentation/filesystems/ext4/
|
||||
F: fs/ext4/
|
||||
F: include/trace/events/ext4.h
|
||||
|
||||
Extended Verification Module (EVM)
|
||||
M: Mimi Zohar <zohar@linux.ibm.com>
|
||||
|
@ -8837,8 +8838,8 @@ S: Supported
|
|||
W: http://www.intel.com/support/feedback.htm
|
||||
W: http://e1000.sourceforge.net/
|
||||
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue.git
|
||||
F: Documentation/networking/device_drivers/ethernet/intel/
|
||||
F: drivers/net/ethernet/intel/
|
||||
F: drivers/net/ethernet/intel/*/
|
||||
|
@ -11181,7 +11182,7 @@ M: Tariq Toukan <tariqt@nvidia.com>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx4/en_*
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx5e)
|
||||
|
@ -11189,7 +11190,7 @@ M: Saeed Mahameed <saeedm@nvidia.com>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
|
||||
|
||||
MELLANOX ETHERNET INNOVA DRIVERS
|
||||
|
@ -11197,7 +11198,7 @@ R: Boris Pismenny <borisp@nvidia.com>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/accel/*
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
|
||||
|
@ -11209,7 +11210,7 @@ M: Ido Schimmel <idosch@nvidia.com>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: drivers/net/ethernet/mellanox/mlxsw/
|
||||
F: tools/testing/selftests/drivers/net/mlxsw/
|
||||
|
||||
|
@ -11218,7 +11219,7 @@ M: mlxsw@nvidia.com
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: drivers/net/ethernet/mellanox/mlxfw/
|
||||
|
||||
MELLANOX HARDWARE PLATFORM SUPPORT
|
||||
|
@ -11237,7 +11238,7 @@ L: netdev@vger.kernel.org
|
|||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx4/
|
||||
F: include/linux/mlx4/
|
||||
|
||||
|
@ -11258,7 +11259,7 @@ L: netdev@vger.kernel.org
|
|||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: Documentation/networking/device_drivers/ethernet/mellanox/
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/
|
||||
F: include/linux/mlx5/
|
||||
|
@ -12138,7 +12139,7 @@ M: Jakub Kicinski <kuba@kernel.org>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
|
@ -12183,7 +12184,7 @@ M: Jakub Kicinski <kuba@kernel.org>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
B: mailto:netdev@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
|
@ -15254,7 +15255,6 @@ F: drivers/iommu/s390-iommu.c
|
|||
S390 IUCV NETWORK LAYER
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Ursula Braun <ubraun@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
@ -15265,7 +15265,6 @@ F: net/iucv/
|
|||
S390 NETWORK DRIVERS
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Ursula Braun <ubraun@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
@ -15836,7 +15835,6 @@ S: Maintained
|
|||
F: drivers/misc/sgi-xp/
|
||||
|
||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||
M: Ursula Braun <ubraun@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -18183,6 +18181,14 @@ L: linux-usb@vger.kernel.org
|
|||
S: Supported
|
||||
F: drivers/usb/class/usblp.c
|
||||
|
||||
USB RAW GADGET DRIVER
|
||||
R: Andrey Konovalov <andreyknvl@gmail.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/usb/raw-gadget.rst
|
||||
F: drivers/usb/gadget/legacy/raw_gadget.c
|
||||
F: include/uapi/linux/usb/raw_gadget.h
|
||||
|
||||
USB QMI WWAN NETWORK DRIVER
|
||||
M: Bjørn Mork <bjorn@mork.no>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -44,20 +44,20 @@ int kprobe_exceptions_notify(struct notifier_block *self,
|
|||
unsigned long val, void *data);
|
||||
|
||||
/* optinsn template addresses */
|
||||
extern __visible kprobe_opcode_t optprobe_template_entry;
|
||||
extern __visible kprobe_opcode_t optprobe_template_val;
|
||||
extern __visible kprobe_opcode_t optprobe_template_call;
|
||||
extern __visible kprobe_opcode_t optprobe_template_end;
|
||||
extern __visible kprobe_opcode_t optprobe_template_sub_sp;
|
||||
extern __visible kprobe_opcode_t optprobe_template_add_sp;
|
||||
extern __visible kprobe_opcode_t optprobe_template_restore_begin;
|
||||
extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
|
||||
extern __visible kprobe_opcode_t optprobe_template_restore_end;
|
||||
extern __visible kprobe_opcode_t optprobe_template_entry[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_val[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_call[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_end[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_add_sp[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_restore_end[];
|
||||
|
||||
#define MAX_OPTIMIZED_LENGTH 4
|
||||
#define MAX_OPTINSN_SIZE \
|
||||
((unsigned long)&optprobe_template_end - \
|
||||
(unsigned long)&optprobe_template_entry)
|
||||
((unsigned long)optprobe_template_end - \
|
||||
(unsigned long)optprobe_template_entry)
|
||||
#define RELATIVEJUMP_SIZE 4
|
||||
|
||||
struct arch_optimized_insn {
|
||||
|
|
|
@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
|
|
|
@ -85,21 +85,21 @@ asm (
|
|||
"optprobe_template_end:\n");
|
||||
|
||||
#define TMPL_VAL_IDX \
|
||||
((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
|
||||
#define TMPL_CALL_IDX \
|
||||
((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
|
||||
#define TMPL_END_IDX \
|
||||
((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
|
||||
#define TMPL_ADD_SP \
|
||||
((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
|
||||
#define TMPL_SUB_SP \
|
||||
((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
|
||||
#define TMPL_RESTORE_BEGIN \
|
||||
((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
|
||||
#define TMPL_RESTORE_ORIGN_INSN \
|
||||
((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
|
||||
#define TMPL_RESTORE_END \
|
||||
((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry)
|
||||
((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
|
||||
|
||||
/*
|
||||
* ARM can always optimize an instruction when using ARM ISA, except
|
||||
|
@ -234,7 +234,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
|
|||
}
|
||||
|
||||
/* Copy arch-dep-instance from template. */
|
||||
memcpy(code, (unsigned long *)&optprobe_template_entry,
|
||||
memcpy(code, (unsigned long *)optprobe_template_entry,
|
||||
TMPL_END_IDX * sizeof(kprobe_opcode_t));
|
||||
|
||||
/* Adjust buffer according to instruction. */
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
&enetc_port0 {
|
||||
phy-handle = <&phy0>;
|
||||
phy-connection-type = "sgmii";
|
||||
managed = "in-band-status";
|
||||
status = "okay";
|
||||
|
||||
mdio {
|
||||
|
|
|
@ -268,6 +268,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
|||
/*
|
||||
* CPU feature detected at boot time based on feature of one or more CPUs.
|
||||
* All possible conflicts for a late CPU are ignored.
|
||||
* NOTE: this means that a late CPU with the feature will *not* cause the
|
||||
* capability to be advertised by cpus_have_*cap()!
|
||||
*/
|
||||
#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
|
||||
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
|
||||
|
|
|
@ -86,6 +86,8 @@
|
|||
#define QCOM_CPU_PART_FALKOR_V1 0x800
|
||||
#define QCOM_CPU_PART_FALKOR 0xC00
|
||||
#define QCOM_CPU_PART_KRYO 0x200
|
||||
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
|
||||
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
|
||||
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
|
||||
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
|
||||
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
|
||||
|
@ -116,6 +118,8 @@
|
|||
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
|
||||
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
|
||||
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
|
||||
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
|
||||
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
|
||||
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
|
||||
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
|
||||
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
|
||||
|
|
|
@ -118,6 +118,8 @@ struct kvm_arch {
|
|||
*/
|
||||
unsigned long *pmu_filter;
|
||||
unsigned int pmuver;
|
||||
|
||||
u8 pfr0_csv2;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_fault_info {
|
||||
|
|
|
@ -372,6 +372,8 @@
|
|||
#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
|
||||
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
|
||||
|
||||
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
|
||||
|
||||
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
|
||||
|
||||
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
|
||||
|
@ -404,6 +406,8 @@
|
|||
#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
|
||||
#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
|
||||
|
||||
#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
|
||||
|
||||
/* Definitions for system register interface to AMU for ARMv8.4 onwards */
|
||||
#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2))
|
||||
#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0)
|
||||
|
|
|
@ -299,6 +299,8 @@ static const struct midr_range erratum_845719_list[] = {
|
|||
MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
|
||||
/* Brahma-B53 r0p[0] */
|
||||
MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
|
||||
/* Kryo2XX Silver rAp4 */
|
||||
MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -1337,6 +1337,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
|
||||
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||
{ /* sentinel */ }
|
||||
|
|
|
@ -127,7 +127,7 @@ static void *image_load(struct kimage *image,
|
|||
kernel_segment->mem, kbuf.bufsz,
|
||||
kernel_segment->memsz);
|
||||
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
|
||||
|
|
|
@ -73,8 +73,7 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
|
|
|
@ -522,14 +522,13 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
|
|||
bool prev32, next32;
|
||||
u64 val;
|
||||
|
||||
if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
|
||||
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
|
||||
return;
|
||||
|
||||
prev32 = is_compat_thread(task_thread_info(prev));
|
||||
next32 = is_compat_thread(task_thread_info(next));
|
||||
|
||||
if (prev32 == next32)
|
||||
if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
|
||||
return;
|
||||
|
||||
val = read_sysreg(cntkctl_el1);
|
||||
|
|
|
@ -118,6 +118,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
|
|||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||
{ /* sentinel */ }
|
||||
|
|
|
@ -66,7 +66,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu)
|
|||
|
||||
static void cpu_psci_cpu_die(unsigned int cpu)
|
||||
{
|
||||
int ret;
|
||||
/*
|
||||
* There are no known implementations of PSCI actually using the
|
||||
* power state field, pass a sensible default for now.
|
||||
|
@ -74,9 +73,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
|
|||
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
|
||||
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
|
||||
|
||||
ret = psci_ops.cpu_off(state);
|
||||
|
||||
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
|
||||
psci_ops.cpu_off(state);
|
||||
}
|
||||
|
||||
static int cpu_psci_cpu_kill(unsigned int cpu)
|
||||
|
|
|
@ -413,6 +413,7 @@ void cpu_die_early(void)
|
|||
|
||||
/* Mark this CPU absent */
|
||||
set_cpu_present(cpu, 0);
|
||||
rcu_report_dead(cpu);
|
||||
|
||||
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
|
||||
update_cpu_boot_status(CPU_KILL_ME);
|
||||
|
|
|
@ -102,6 +102,20 @@ static int kvm_arm_default_max_vcpus(void)
|
|||
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
|
||||
}
|
||||
|
||||
static void set_default_csv2(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* The default is to expose CSV2 == 1 if the HW isn't affected.
|
||||
* Although this is a per-CPU feature, we make it global because
|
||||
* asymmetric systems are just a nuisance.
|
||||
*
|
||||
* Userspace can override this as long as it doesn't promise
|
||||
* the impossible.
|
||||
*/
|
||||
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
|
||||
kvm->arch.pfr0_csv2 = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_init_vm - initializes a VM data structure
|
||||
* @kvm: pointer to the KVM struct
|
||||
|
@ -127,6 +141,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
/* The maximum number of VCPUs is limited by the host's GIC model */
|
||||
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
|
||||
|
||||
set_default_csv2(kvm);
|
||||
|
||||
return ret;
|
||||
out_free_stage2_pgd:
|
||||
kvm_free_stage2_pgd(&kvm->arch.mmu);
|
||||
|
|
|
@ -788,10 +788,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
}
|
||||
|
||||
switch (vma_shift) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SHIFT:
|
||||
if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
|
||||
break;
|
||||
fallthrough;
|
||||
#endif
|
||||
case CONT_PMD_SHIFT:
|
||||
vma_shift = PMD_SHIFT;
|
||||
fallthrough;
|
||||
|
|
|
@ -1038,7 +1038,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
|
||||
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
|
||||
|
||||
static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
@ -1047,33 +1047,25 @@ static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
}
|
||||
|
||||
/* Macro to expand the AMU counter and type registers*/
|
||||
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu }
|
||||
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), access_amu }
|
||||
#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu }
|
||||
#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), access_amu }
|
||||
|
||||
static bool trap_ptrauth(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
/*
|
||||
* If we land here, that is because we didn't fixup the access on exit
|
||||
* by allowing the PtrAuth sysregs. The only way this happens is when
|
||||
* the guest does not have PtrAuth support enabled.
|
||||
*/
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
||||
return false;
|
||||
}
|
||||
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
|
||||
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
|
||||
#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
|
||||
#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
|
||||
|
||||
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
|
||||
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we land here on a PtrAuth access, that is because we didn't
|
||||
* fixup the access on exit by allowing the PtrAuth sysregs. The only
|
||||
* way this happens is when the guest does not have PtrAuth support
|
||||
* enabled.
|
||||
*/
|
||||
#define __PTRAUTH_KEY(k) \
|
||||
{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
|
||||
{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
|
||||
.visibility = ptrauth_visibility}
|
||||
|
||||
#define PTRAUTH_KEY(k) \
|
||||
|
@ -1128,9 +1120,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
|||
if (!vcpu_has_sve(vcpu))
|
||||
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
|
||||
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
|
||||
if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
|
||||
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
|
||||
val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
|
||||
val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
|
||||
val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
|
||||
} else if (id == SYS_ID_AA64PFR1_EL1) {
|
||||
val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
|
||||
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
|
||||
|
@ -1153,6 +1144,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
|||
return val;
|
||||
}
|
||||
|
||||
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
|
||||
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
|
||||
|
||||
switch (id) {
|
||||
case SYS_ID_AA64ZFR0_EL1:
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
return REG_RAZ;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* cpufeature ID register access trap handlers */
|
||||
|
||||
static bool __access_id_reg(struct kvm_vcpu *vcpu,
|
||||
|
@ -1171,7 +1178,9 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
|
|||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
return __access_id_reg(vcpu, p, r, false);
|
||||
bool raz = sysreg_visible_as_raz(vcpu, r);
|
||||
|
||||
return __access_id_reg(vcpu, p, r, raz);
|
||||
}
|
||||
|
||||
static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
|
||||
|
@ -1192,71 +1201,40 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
|
|||
if (vcpu_has_sve(vcpu))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
/* Visibility overrides for SVE-specific ID registers */
|
||||
static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (vcpu_has_sve(vcpu))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN_USER;
|
||||
}
|
||||
|
||||
/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
|
||||
static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
return 0;
|
||||
|
||||
return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
|
||||
}
|
||||
|
||||
static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (p->is_write)
|
||||
return write_to_read_only(vcpu, p, rd);
|
||||
|
||||
p->regval = guest_id_aa64zfr0_el1(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
const struct kvm_one_reg *reg, void __user *uaddr)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
if (WARN_ON(!vcpu_has_sve(vcpu)))
|
||||
return -ENOENT;
|
||||
|
||||
val = guest_id_aa64zfr0_el1(vcpu);
|
||||
return reg_to_user(uaddr, &val, reg->id);
|
||||
}
|
||||
|
||||
static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
|
||||
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
const struct kvm_one_reg *reg, void __user *uaddr)
|
||||
{
|
||||
const u64 id = sys_reg_to_index(rd);
|
||||
int err;
|
||||
u64 val;
|
||||
|
||||
if (WARN_ON(!vcpu_has_sve(vcpu)))
|
||||
return -ENOENT;
|
||||
u8 csv2;
|
||||
|
||||
err = reg_from_user(&val, uaddr, id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* This is what we mean by invariant: you can't change it. */
|
||||
if (val != guest_id_aa64zfr0_el1(vcpu))
|
||||
/*
|
||||
* Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
|
||||
* it doesn't promise more than what is actually provided (the
|
||||
* guest could otherwise be covered in ectoplasmic residue).
|
||||
*/
|
||||
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
|
||||
if (csv2 > 1 ||
|
||||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
|
||||
return -EINVAL;
|
||||
|
||||
/* We can only differ with CSV2, and anything else is an error */
|
||||
val ^= read_id_reg(vcpu, rd, false);
|
||||
val &= ~(0xFUL << ID_AA64PFR0_CSV2_SHIFT);
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->kvm->arch.pfr0_csv2 = csv2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1299,13 +1277,17 @@ static int __set_id_reg(const struct kvm_vcpu *vcpu,
|
|||
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
const struct kvm_one_reg *reg, void __user *uaddr)
|
||||
{
|
||||
return __get_id_reg(vcpu, rd, uaddr, false);
|
||||
bool raz = sysreg_visible_as_raz(vcpu, rd);
|
||||
|
||||
return __get_id_reg(vcpu, rd, uaddr, raz);
|
||||
}
|
||||
|
||||
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
const struct kvm_one_reg *reg, void __user *uaddr)
|
||||
{
|
||||
return __set_id_reg(vcpu, rd, uaddr, false);
|
||||
bool raz = sysreg_visible_as_raz(vcpu, rd);
|
||||
|
||||
return __set_id_reg(vcpu, rd, uaddr, raz);
|
||||
}
|
||||
|
||||
static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
|
@ -1384,19 +1366,13 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* sys_reg_desc initialiser for known cpufeature ID registers */
|
||||
#define ID_SANITISED(name) { \
|
||||
SYS_DESC(SYS_##name), \
|
||||
.access = access_id_reg, \
|
||||
.get_user = get_id_reg, \
|
||||
.set_user = set_id_reg, \
|
||||
.visibility = id_visibility, \
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1514,11 +1490,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
|
||||
/* AArch64 ID registers */
|
||||
/* CRm=4 */
|
||||
ID_SANITISED(ID_AA64PFR0_EL1),
|
||||
{ SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
|
||||
.get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
|
||||
ID_SANITISED(ID_AA64PFR1_EL1),
|
||||
ID_UNALLOCATED(4,2),
|
||||
ID_UNALLOCATED(4,3),
|
||||
{ SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
|
||||
ID_SANITISED(ID_AA64ZFR0_EL1),
|
||||
ID_UNALLOCATED(4,5),
|
||||
ID_UNALLOCATED(4,6),
|
||||
ID_UNALLOCATED(4,7),
|
||||
|
@ -1557,8 +1534,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
|
||||
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
|
||||
|
||||
{ SYS_DESC(SYS_RGSR_EL1), access_mte_regs },
|
||||
{ SYS_DESC(SYS_GCR_EL1), access_mte_regs },
|
||||
{ SYS_DESC(SYS_RGSR_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_GCR_EL1), undef_access },
|
||||
|
||||
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
|
||||
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
|
||||
|
@ -1584,8 +1561,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
|
||||
|
||||
{ SYS_DESC(SYS_TFSR_EL1), access_mte_regs },
|
||||
{ SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs },
|
||||
{ SYS_DESC(SYS_TFSR_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_TFSRE0_EL1), undef_access },
|
||||
|
||||
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
|
||||
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
|
||||
|
@ -1621,6 +1598,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
|
||||
|
||||
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
|
||||
|
||||
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
|
||||
|
@ -1649,14 +1628,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
||||
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
|
||||
{ SYS_DESC(SYS_AMCR_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_AMCFGR_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_AMCGCR_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_AMUSERENR_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu },
|
||||
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
|
||||
|
||||
{ SYS_DESC(SYS_AMCR_EL0), undef_access },
|
||||
{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
|
||||
{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
|
||||
{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
|
||||
{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
|
||||
{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
|
||||
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
|
||||
{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
|
||||
AMU_AMEVCNTR0_EL0(0),
|
||||
AMU_AMEVCNTR0_EL0(1),
|
||||
AMU_AMEVCNTR0_EL0(2),
|
||||
|
@ -2185,7 +2166,7 @@ static void perform_access(struct kvm_vcpu *vcpu,
|
|||
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
|
||||
|
||||
/* Check for regs disabled by runtime config */
|
||||
if (sysreg_hidden_from_guest(vcpu, r)) {
|
||||
if (sysreg_hidden(vcpu, r)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return;
|
||||
}
|
||||
|
@ -2684,7 +2665,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
|
|||
return get_invariant_sys_reg(reg->id, uaddr);
|
||||
|
||||
/* Check for regs disabled by runtime config */
|
||||
if (sysreg_hidden_from_user(vcpu, r))
|
||||
if (sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
if (r->get_user)
|
||||
|
@ -2709,7 +2690,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
|
|||
return set_invariant_sys_reg(reg->id, uaddr);
|
||||
|
||||
/* Check for regs disabled by runtime config */
|
||||
if (sysreg_hidden_from_user(vcpu, r))
|
||||
if (sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
if (r->set_user)
|
||||
|
@ -2780,7 +2761,7 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
|
|||
if (!(rd->reg || rd->get_user))
|
||||
return 0;
|
||||
|
||||
if (sysreg_hidden_from_user(vcpu, rd))
|
||||
if (sysreg_hidden(vcpu, rd))
|
||||
return 0;
|
||||
|
||||
if (!copy_reg_to_user(rd, uind))
|
||||
|
|
|
@ -59,8 +59,8 @@ struct sys_reg_desc {
|
|||
const struct sys_reg_desc *rd);
|
||||
};
|
||||
|
||||
#define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */
|
||||
#define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */
|
||||
#define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */
|
||||
#define REG_RAZ (1 << 1) /* RAZ from userspace and guest */
|
||||
|
||||
static __printf(2, 3)
|
||||
inline void print_sys_reg_msg(const struct sys_reg_params *p,
|
||||
|
@ -111,22 +111,22 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
|
|||
__vcpu_sys_reg(vcpu, r->reg) = r->val;
|
||||
}
|
||||
|
||||
static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu,
|
||||
static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (likely(!r->visibility))
|
||||
return false;
|
||||
|
||||
return r->visibility(vcpu, r) & REG_HIDDEN_GUEST;
|
||||
return r->visibility(vcpu, r) & REG_HIDDEN;
|
||||
}
|
||||
|
||||
static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu,
|
||||
static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (likely(!r->visibility))
|
||||
return false;
|
||||
|
||||
return r->visibility(vcpu, r) & REG_HIDDEN_USER;
|
||||
return r->visibility(vcpu, r) & REG_RAZ;
|
||||
}
|
||||
|
||||
static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
|
||||
|
|
|
@ -1444,11 +1444,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
|
|||
free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
|
||||
}
|
||||
|
||||
static bool inside_linear_region(u64 start, u64 size)
|
||||
{
|
||||
/*
|
||||
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
|
||||
* accommodating both its ends but excluding PAGE_END. Max physical
|
||||
* range which can be mapped inside this linear mapping range, must
|
||||
* also be derived from its end points.
|
||||
*/
|
||||
return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
|
||||
(start + size - 1) <= __pa(PAGE_END - 1);
|
||||
}
|
||||
|
||||
int arch_add_memory(int nid, u64 start, u64 size,
|
||||
struct mhp_params *params)
|
||||
{
|
||||
int ret, flags = 0;
|
||||
|
||||
if (!inside_linear_region(start, size)) {
|
||||
pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rodata_full || debug_pagealloc_enabled())
|
||||
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
|
||||
|
||||
|
|
|
@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
|
|
|
@ -1336,7 +1336,7 @@ static void dump_trace_imc_data(struct perf_event *event)
|
|||
/* If this is a valid record, create the sample */
|
||||
struct perf_output_handle handle;
|
||||
|
||||
if (perf_output_begin(&handle, event, header.size))
|
||||
if (perf_output_begin(&handle, &data, event, header.size))
|
||||
return;
|
||||
|
||||
perf_output_sample(&handle, &header, &data, event);
|
||||
|
|
|
@ -144,8 +144,7 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
|
||||
|
|
|
@ -36,8 +36,7 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
|
|
|
@ -672,7 +672,7 @@ static void cpumsf_output_event_pid(struct perf_event *event,
|
|||
rcu_read_lock();
|
||||
|
||||
perf_prepare_sample(&header, data, event, regs);
|
||||
if (perf_output_begin(&handle, event, header.size))
|
||||
if (perf_output_begin(&handle, data, event, header.size))
|
||||
goto out;
|
||||
|
||||
/* Update the process ID (see also kernel/events/core.c) */
|
||||
|
|
|
@ -53,8 +53,7 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Use the regs from the first interruption and let
|
||||
|
|
|
@ -33,7 +33,13 @@ do { \
|
|||
} while (0)
|
||||
|
||||
#ifdef CONFIG_3_LEVEL_PGTABLES
|
||||
#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
|
||||
|
||||
#define __pmd_free_tlb(tlb, pmd, address) \
|
||||
do { \
|
||||
pgtable_pmd_page_dtor(virt_to_page(pmd)); \
|
||||
tlb_remove_page((tlb),virt_to_page(pmd)); \
|
||||
} while (0) \
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2630,7 +2630,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
|||
u64 pebs_enabled = cpuc->pebs_enabled;
|
||||
|
||||
handled++;
|
||||
x86_pmu.drain_pebs(regs);
|
||||
x86_pmu.drain_pebs(regs, &data);
|
||||
status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
|
||||
|
||||
/*
|
||||
|
@ -4987,6 +4987,12 @@ __init int intel_pmu_init(void)
|
|||
|
||||
x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
|
||||
|
||||
if (version >= 5) {
|
||||
x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
|
||||
if (x86_pmu.intel_cap.anythread_deprecated)
|
||||
pr_cont(" AnyThread deprecated, ");
|
||||
}
|
||||
|
||||
/*
|
||||
* Install the hw-cache-events table:
|
||||
*/
|
||||
|
@ -5512,6 +5518,10 @@ __init int intel_pmu_init(void)
|
|||
x86_pmu.intel_ctrl |=
|
||||
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
|
||||
|
||||
/* AnyThread may be deprecated on arch perfmon v5 or later */
|
||||
if (x86_pmu.intel_cap.anythread_deprecated)
|
||||
x86_pmu.format_attrs = intel_arch_formats_attr;
|
||||
|
||||
if (x86_pmu.event_constraints) {
|
||||
/*
|
||||
* event on fixed counter2 (REF_CYCLES) only works on this
|
||||
|
|
|
@ -642,8 +642,8 @@ int intel_pmu_drain_bts_buffer(void)
|
|||
rcu_read_lock();
|
||||
perf_prepare_sample(&header, &data, event, ®s);
|
||||
|
||||
if (perf_output_begin(&handle, event, header.size *
|
||||
(top - base - skip)))
|
||||
if (perf_output_begin(&handle, &data, event,
|
||||
header.size * (top - base - skip)))
|
||||
goto unlock;
|
||||
|
||||
for (at = base; at < top; at++) {
|
||||
|
@ -670,7 +670,9 @@ unlock:
|
|||
|
||||
static inline void intel_pmu_drain_pebs_buffer(void)
|
||||
{
|
||||
x86_pmu.drain_pebs(NULL);
|
||||
struct perf_sample_data data;
|
||||
|
||||
x86_pmu.drain_pebs(NULL, &data);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1719,8 +1721,10 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __intel_pmu_pebs_event(struct perf_event *event,
|
||||
static __always_inline void
|
||||
__intel_pmu_pebs_event(struct perf_event *event,
|
||||
struct pt_regs *iregs,
|
||||
struct perf_sample_data *data,
|
||||
void *base, void *top,
|
||||
int bit, int count,
|
||||
void (*setup_sample)(struct perf_event *,
|
||||
|
@ -1731,11 +1735,10 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct perf_sample_data data;
|
||||
struct x86_perf_regs perf_regs;
|
||||
struct pt_regs *regs = &perf_regs.regs;
|
||||
void *at = get_next_pebs_record_by_bit(base, top, bit);
|
||||
struct pt_regs dummy_iregs;
|
||||
static struct pt_regs dummy_iregs;
|
||||
|
||||
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
|
||||
/*
|
||||
|
@ -1752,14 +1755,14 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|||
iregs = &dummy_iregs;
|
||||
|
||||
while (count > 1) {
|
||||
setup_sample(event, iregs, at, &data, regs);
|
||||
perf_event_output(event, &data, regs);
|
||||
setup_sample(event, iregs, at, data, regs);
|
||||
perf_event_output(event, data, regs);
|
||||
at += cpuc->pebs_record_size;
|
||||
at = get_next_pebs_record_by_bit(at, top, bit);
|
||||
count--;
|
||||
}
|
||||
|
||||
setup_sample(event, iregs, at, &data, regs);
|
||||
setup_sample(event, iregs, at, data, regs);
|
||||
if (iregs == &dummy_iregs) {
|
||||
/*
|
||||
* The PEBS records may be drained in the non-overflow context,
|
||||
|
@ -1767,18 +1770,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|||
* last record the same as other PEBS records, and doesn't
|
||||
* invoke the generic overflow handler.
|
||||
*/
|
||||
perf_event_output(event, &data, regs);
|
||||
perf_event_output(event, data, regs);
|
||||
} else {
|
||||
/*
|
||||
* All but the last records are processed.
|
||||
* The last one is left to be able to call the overflow handler.
|
||||
*/
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
if (perf_event_overflow(event, data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
|
||||
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
struct debug_store *ds = cpuc->ds;
|
||||
|
@ -1812,7 +1815,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
|
|||
return;
|
||||
}
|
||||
|
||||
__intel_pmu_pebs_event(event, iregs, at, top, 0, n,
|
||||
__intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
|
||||
setup_pebs_fixed_sample_data);
|
||||
}
|
||||
|
||||
|
@ -1835,7 +1838,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int
|
|||
}
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
||||
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
struct debug_store *ds = cpuc->ds;
|
||||
|
@ -1942,14 +1945,14 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|||
}
|
||||
|
||||
if (counts[bit]) {
|
||||
__intel_pmu_pebs_event(event, iregs, base,
|
||||
__intel_pmu_pebs_event(event, iregs, data, base,
|
||||
top, bit, counts[bit],
|
||||
setup_pebs_fixed_sample_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
|
||||
static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
|
||||
{
|
||||
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
@ -1997,7 +2000,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
|
|||
if (WARN_ON_ONCE(!event->attr.precise_ip))
|
||||
continue;
|
||||
|
||||
__intel_pmu_pebs_event(event, iregs, base,
|
||||
__intel_pmu_pebs_event(event, iregs, data, base,
|
||||
top, bit, counts[bit],
|
||||
setup_pebs_adaptive_sample_data);
|
||||
}
|
||||
|
|
|
@ -475,7 +475,7 @@ enum perf_snb_uncore_imc_freerunning_types {
|
|||
static struct freerunning_counters snb_uncore_imc_freerunning[] = {
|
||||
[SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
|
||||
0x0, 0x0, 1, 32 },
|
||||
[SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
|
||||
[SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
|
||||
0x0, 0x0, 1, 32 },
|
||||
[SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
|
||||
0x0, 0x0, 1, 32 },
|
||||
|
|
|
@ -585,6 +585,7 @@ union perf_capabilities {
|
|||
u64 pebs_baseline:1;
|
||||
u64 perf_metrics:1;
|
||||
u64 pebs_output_pt_available:1;
|
||||
u64 anythread_deprecated:1;
|
||||
};
|
||||
u64 capabilities;
|
||||
};
|
||||
|
@ -727,7 +728,7 @@ struct x86_pmu {
|
|||
int pebs_record_size;
|
||||
int pebs_buffer_size;
|
||||
int max_pebs_events;
|
||||
void (*drain_pebs)(struct pt_regs *regs);
|
||||
void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
|
||||
struct event_constraint *pebs_constraints;
|
||||
void (*pebs_aliases)(struct perf_event *event);
|
||||
unsigned long large_pebs_flags;
|
||||
|
|
|
@ -639,6 +639,7 @@ struct kvm_vcpu_arch {
|
|||
int cpuid_nent;
|
||||
struct kvm_cpuid_entry2 *cpuid_entries;
|
||||
|
||||
unsigned long cr3_lm_rsvd_bits;
|
||||
int maxphyaddr;
|
||||
int max_tdp_level;
|
||||
|
||||
|
|
|
@ -137,7 +137,9 @@ union cpuid10_edx {
|
|||
struct {
|
||||
unsigned int num_counters_fixed:5;
|
||||
unsigned int bit_width_fixed:8;
|
||||
unsigned int reserved:19;
|
||||
unsigned int reserved1:2;
|
||||
unsigned int anythread_deprecated:1;
|
||||
unsigned int reserved2:16;
|
||||
} split;
|
||||
unsigned int full;
|
||||
};
|
||||
|
|
|
@ -2,14 +2,8 @@
|
|||
#ifndef _ASM_X86_UV_UV_H
|
||||
#define _ASM_X86_UV_UV_H
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC};
|
||||
|
||||
struct cpumask;
|
||||
struct mm_struct;
|
||||
struct flush_tlb_info;
|
||||
|
||||
#ifdef CONFIG_X86_UV
|
||||
#include <linux/efi.h>
|
||||
|
||||
|
@ -44,10 +38,6 @@ static inline int is_uv_system(void) { return 0; }
|
|||
static inline int is_uv_hubbed(int uv) { return 0; }
|
||||
static inline void uv_cpu_init(void) { }
|
||||
static inline void uv_system_init(void) { }
|
||||
static inline const struct cpumask *
|
||||
uv_flush_tlb_others(const struct cpumask *cpumask,
|
||||
const struct flush_tlb_info *info)
|
||||
{ return cpumask; }
|
||||
|
||||
#endif /* X86_UV */
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ static union uvh_apicid uvh_apicid;
|
|||
static int uv_node_id;
|
||||
|
||||
/* Unpack AT/OEM/TABLE ID's to be NULL terminated strings */
|
||||
static u8 uv_archtype[UV_AT_SIZE];
|
||||
static u8 uv_archtype[UV_AT_SIZE + 1];
|
||||
static u8 oem_id[ACPI_OEM_ID_SIZE + 1];
|
||||
static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
|
||||
|
||||
|
@ -320,7 +320,7 @@ static int __init decode_arch_type(unsigned long ptr)
|
|||
|
||||
if (n > 0 && n < sizeof(uv_ate->archtype)) {
|
||||
pr_info("UV: UVarchtype received from BIOS\n");
|
||||
uv_stringify(UV_AT_SIZE, uv_archtype, uv_ate->archtype);
|
||||
uv_stringify(sizeof(uv_archtype), uv_archtype, uv_ate->archtype);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -378,7 +378,7 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
|
|||
if (!early_get_arch_type())
|
||||
|
||||
/* If not use OEM ID for UVarchtype */
|
||||
uv_stringify(UV_AT_SIZE, uv_archtype, _oem_id);
|
||||
uv_stringify(sizeof(uv_archtype), uv_archtype, oem_id);
|
||||
|
||||
/* Check if not hubbed */
|
||||
if (strncmp(uv_archtype, "SGI", 3) != 0) {
|
||||
|
|
|
@ -101,8 +101,7 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
|
@ -129,12 +128,20 @@ u64 perf_reg_abi(struct task_struct *task)
|
|||
return PERF_SAMPLE_REGS_ABI_64;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct pt_regs, nmi_user_regs);
|
||||
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *regs_user_copy = this_cpu_ptr(&nmi_user_regs);
|
||||
struct pt_regs *user_regs = task_pt_regs(current);
|
||||
|
||||
if (!in_nmi()) {
|
||||
regs_user->regs = user_regs;
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're in an NMI that interrupted task_pt_regs setup, then
|
||||
* we can't sample user regs at all. This check isn't really
|
||||
|
|
|
@ -90,6 +90,20 @@ static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
|
||||
|
||||
/*
|
||||
* save the feature bitmap to avoid cpuid lookup for every PV
|
||||
* operation
|
||||
*/
|
||||
if (best)
|
||||
vcpu->arch.pv_cpuid.features = best->eax;
|
||||
}
|
||||
|
||||
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
@ -124,13 +138,6 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
|
|||
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
|
||||
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
|
||||
|
||||
/*
|
||||
* save the feature bitmap to avoid cpuid lookup for every PV
|
||||
* operation
|
||||
*/
|
||||
if (best)
|
||||
vcpu->arch.pv_cpuid.features = best->eax;
|
||||
|
||||
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
|
||||
if (best)
|
||||
|
@ -162,6 +169,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.guest_supported_xcr0 =
|
||||
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
|
||||
|
||||
kvm_update_pv_runtime(vcpu);
|
||||
|
||||
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
||||
|
@ -169,6 +178,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.cr4_guest_rsvd_bits =
|
||||
__cr4_reserved_bits(guest_cpuid_has, vcpu);
|
||||
|
||||
vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
|
||||
|
||||
/* Invoke the vendor callback only after the above state is updated. */
|
||||
kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
|
||||
}
|
||||
|
@ -672,7 +683,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
|||
|
||||
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
|
||||
edx.split.bit_width_fixed = cap.bit_width_fixed;
|
||||
edx.split.reserved = 0;
|
||||
edx.split.anythread_deprecated = 1;
|
||||
edx.split.reserved1 = 0;
|
||||
edx.split.reserved2 = 0;
|
||||
|
||||
entry->eax = eax.full;
|
||||
entry->ebx = cap.events_mask;
|
||||
|
|
|
@ -11,6 +11,7 @@ extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
|
|||
void kvm_set_cpu_caps(void);
|
||||
|
||||
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
|
||||
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
|
||||
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
|
||||
u32 function, u32 index);
|
||||
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
|
||||
|
|
|
@ -4046,6 +4046,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
|
|||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
/* emulating clflushopt regardless of cpuid */
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
ctxt->dst.val = (s32) ctxt->src.val;
|
||||
|
@ -4585,7 +4591,7 @@ static const struct opcode group11[] = {
|
|||
};
|
||||
|
||||
static const struct gprefix pfx_0f_ae_7 = {
|
||||
I(SrcMem | ByteOp, em_clflush), N, N, N,
|
||||
I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
|
||||
};
|
||||
|
||||
static const struct group_dual group15 = { {
|
||||
|
|
|
@ -856,13 +856,15 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
|
|||
} else {
|
||||
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
|
||||
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
|
||||
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
|
||||
desc = desc->more;
|
||||
while (desc->sptes[PTE_LIST_EXT-1]) {
|
||||
count += PTE_LIST_EXT;
|
||||
}
|
||||
if (desc->sptes[PTE_LIST_EXT-1]) {
|
||||
|
||||
if (!desc->more) {
|
||||
desc->more = mmu_alloc_pte_list_desc(vcpu);
|
||||
desc = desc->more;
|
||||
break;
|
||||
}
|
||||
desc = desc->more;
|
||||
}
|
||||
for (i = 0; desc->sptes[i]; ++i)
|
||||
++count;
|
||||
|
|
|
@ -49,7 +49,14 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
|
|||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
if (!kvm->arch.tdp_mmu_enabled)
|
||||
return false;
|
||||
if (WARN_ON(!VALID_PAGE(hpa)))
|
||||
return false;
|
||||
|
||||
sp = to_shadow_page(hpa);
|
||||
if (WARN_ON(!sp))
|
||||
return false;
|
||||
|
||||
return sp->tdp_mmu_page && sp->root_count;
|
||||
}
|
||||
|
|
|
@ -3741,6 +3741,7 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
|||
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
|
||||
boot_cpu_has(X86_FEATURE_XSAVE) &&
|
||||
|
@ -3753,6 +3754,13 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||
/* Check again if INVPCID interception if required */
|
||||
svm_check_invpcid(svm);
|
||||
|
||||
/* For sev guests, the memory encryption bit is not reserved in CR3. */
|
||||
if (sev_guest(vcpu->kvm)) {
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
|
||||
if (best)
|
||||
vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f));
|
||||
}
|
||||
|
||||
if (!kvm_vcpu_apicv_active(vcpu))
|
||||
return;
|
||||
|
||||
|
|
|
@ -255,10 +255,9 @@ static struct kmem_cache *x86_emulator_cache;
|
|||
|
||||
/*
|
||||
* When called, it means the previous get/set msr reached an invalid msr.
|
||||
* Return 0 if we want to ignore/silent this failed msr access, or 1 if we want
|
||||
* to fail the caller.
|
||||
* Return true if we want to ignore/silent this failed msr access.
|
||||
*/
|
||||
static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
|
||||
static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
|
||||
u64 data, bool write)
|
||||
{
|
||||
const char *op = write ? "wrmsr" : "rdmsr";
|
||||
|
@ -268,11 +267,11 @@ static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
|
|||
kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
|
||||
op, msr, data);
|
||||
/* Mask the error */
|
||||
return 0;
|
||||
return true;
|
||||
} else {
|
||||
kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
|
||||
op, msr, data);
|
||||
return -ENOENT;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1042,7 +1041,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|||
}
|
||||
|
||||
if (is_long_mode(vcpu) &&
|
||||
(cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
|
||||
(cr3 & vcpu->arch.cr3_lm_rsvd_bits))
|
||||
return 1;
|
||||
else if (is_pae_paging(vcpu) &&
|
||||
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
|
||||
|
@ -1416,7 +1415,8 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
|||
if (r == KVM_MSR_RET_INVALID) {
|
||||
/* Unconditionally clear the output for simplicity */
|
||||
*data = 0;
|
||||
r = kvm_msr_ignored_check(vcpu, index, 0, false);
|
||||
if (kvm_msr_ignored_check(vcpu, index, 0, false))
|
||||
r = 0;
|
||||
}
|
||||
|
||||
if (r)
|
||||
|
@ -1540,7 +1540,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
|
|||
struct msr_data msr;
|
||||
|
||||
if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
|
||||
return -EPERM;
|
||||
return KVM_MSR_RET_FILTERED;
|
||||
|
||||
switch (index) {
|
||||
case MSR_FS_BASE:
|
||||
|
@ -1581,7 +1581,8 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
|
|||
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
|
||||
|
||||
if (ret == KVM_MSR_RET_INVALID)
|
||||
ret = kvm_msr_ignored_check(vcpu, index, data, true);
|
||||
if (kvm_msr_ignored_check(vcpu, index, data, true))
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1599,7 +1600,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
|
|||
int ret;
|
||||
|
||||
if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
|
||||
return -EPERM;
|
||||
return KVM_MSR_RET_FILTERED;
|
||||
|
||||
msr.index = index;
|
||||
msr.host_initiated = host_initiated;
|
||||
|
@ -1618,7 +1619,8 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
|
|||
if (ret == KVM_MSR_RET_INVALID) {
|
||||
/* Unconditionally clear *data for simplicity */
|
||||
*data = 0;
|
||||
ret = kvm_msr_ignored_check(vcpu, index, 0, false);
|
||||
if (kvm_msr_ignored_check(vcpu, index, 0, false))
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1662,9 +1664,9 @@ static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
|
|||
static u64 kvm_msr_reason(int r)
|
||||
{
|
||||
switch (r) {
|
||||
case -ENOENT:
|
||||
case KVM_MSR_RET_INVALID:
|
||||
return KVM_MSR_EXIT_REASON_UNKNOWN;
|
||||
case -EPERM:
|
||||
case KVM_MSR_RET_FILTERED:
|
||||
return KVM_MSR_EXIT_REASON_FILTER;
|
||||
default:
|
||||
return KVM_MSR_EXIT_REASON_INVAL;
|
||||
|
@ -1965,7 +1967,7 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
|
|||
struct kvm_arch *ka = &vcpu->kvm->arch;
|
||||
|
||||
if (vcpu->vcpu_id == 0 && !host_initiated) {
|
||||
if (ka->boot_vcpu_runs_old_kvmclock && old_msr)
|
||||
if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
|
||||
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
|
||||
|
||||
ka->boot_vcpu_runs_old_kvmclock = old_msr;
|
||||
|
@ -3063,7 +3065,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
/* Values other than LBR and BTF are vendor-specific,
|
||||
thus reserved and should throw a #GP */
|
||||
return 1;
|
||||
}
|
||||
} else if (report_ignored_msrs)
|
||||
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
|
||||
__func__, data);
|
||||
break;
|
||||
|
@ -3463,29 +3465,63 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
msr_info->data = vcpu->arch.efer;
|
||||
break;
|
||||
case MSR_KVM_WALL_CLOCK:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->kvm->arch.wall_clock;
|
||||
break;
|
||||
case MSR_KVM_WALL_CLOCK_NEW:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->kvm->arch.wall_clock;
|
||||
break;
|
||||
case MSR_KVM_SYSTEM_TIME:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->arch.time;
|
||||
break;
|
||||
case MSR_KVM_SYSTEM_TIME_NEW:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->arch.time;
|
||||
break;
|
||||
case MSR_KVM_ASYNC_PF_EN:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->arch.apf.msr_en_val;
|
||||
break;
|
||||
case MSR_KVM_ASYNC_PF_INT:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->arch.apf.msr_int_val;
|
||||
break;
|
||||
case MSR_KVM_ASYNC_PF_ACK:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
|
||||
return 1;
|
||||
|
||||
msr_info->data = 0;
|
||||
break;
|
||||
case MSR_KVM_STEAL_TIME:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->arch.st.msr_val;
|
||||
break;
|
||||
case MSR_KVM_PV_EOI_EN:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->arch.pv_eoi.msr_val;
|
||||
break;
|
||||
case MSR_KVM_POLL_CONTROL:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
|
||||
return 1;
|
||||
|
||||
msr_info->data = vcpu->arch.msr_kvm_poll_control;
|
||||
break;
|
||||
case MSR_IA32_P5_MC_ADDR:
|
||||
|
@ -4575,6 +4611,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
|||
|
||||
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
|
||||
vcpu->arch.pv_cpuid.enforce = cap->args[0];
|
||||
if (vcpu->arch.pv_cpuid.enforce)
|
||||
kvm_update_pv_runtime(vcpu);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -376,7 +376,13 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
|
|||
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
|
||||
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
|
||||
|
||||
#define KVM_MSR_RET_INVALID 2
|
||||
/*
|
||||
* Internal error codes that are used to indicate that MSR emulation encountered
|
||||
* an error that should result in #GP in the guest, unless userspace
|
||||
* handles it.
|
||||
*/
|
||||
#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
|
||||
#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
|
||||
|
||||
#define __cr4_reserved_bits(__cpu_has, __c) \
|
||||
({ \
|
||||
|
|
|
@ -49,7 +49,7 @@ static void disk_release_events(struct gendisk *disk);
|
|||
* Set disk capacity and notify if the size is not currently
|
||||
* zero and will not be set to zero
|
||||
*/
|
||||
void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
|
||||
bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
|
||||
bool update_bdev)
|
||||
{
|
||||
sector_t capacity = get_capacity(disk);
|
||||
|
@ -62,7 +62,10 @@ void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
|
|||
char *envp[] = { "RESIZE=1", NULL };
|
||||
|
||||
kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(set_capacity_revalidate_and_notify);
|
||||
|
|
|
@ -357,7 +357,6 @@ static void speakup_cut(struct vc_data *vc)
|
|||
mark_cut_flag = 0;
|
||||
synth_printf("%s\n", spk_msg_get(MSG_CUT));
|
||||
|
||||
speakup_clear_selection();
|
||||
ret = speakup_set_selection(tty);
|
||||
|
||||
switch (ret) {
|
||||
|
|
|
@ -22,13 +22,6 @@ struct speakup_selection_work {
|
|||
struct tty_struct *tty;
|
||||
};
|
||||
|
||||
void speakup_clear_selection(void)
|
||||
{
|
||||
console_lock();
|
||||
clear_selection();
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
static void __speakup_set_selection(struct work_struct *work)
|
||||
{
|
||||
struct speakup_selection_work *ssw =
|
||||
|
@ -51,6 +44,10 @@ static void __speakup_set_selection(struct work_struct *work)
|
|||
goto unref;
|
||||
}
|
||||
|
||||
console_lock();
|
||||
clear_selection();
|
||||
console_unlock();
|
||||
|
||||
set_selection_kernel(&sel, tty);
|
||||
|
||||
unref:
|
||||
|
|
|
@ -70,7 +70,6 @@ void spk_do_flush(void);
|
|||
void speakup_start_ttys(void);
|
||||
void synth_buffer_add(u16 ch);
|
||||
void synth_buffer_clear(void);
|
||||
void speakup_clear_selection(void);
|
||||
int speakup_set_selection(struct tty_struct *tty);
|
||||
void speakup_cancel_selection(void);
|
||||
int speakup_paste_selection(struct tty_struct *tty);
|
||||
|
|
|
@ -298,9 +298,11 @@ static unsigned char ttyio_in(int timeout)
|
|||
struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
|
||||
char rv;
|
||||
|
||||
if (wait_for_completion_timeout(&ldisc_data->completion,
|
||||
if (!timeout) {
|
||||
if (!try_wait_for_completion(&ldisc_data->completion))
|
||||
return 0xff;
|
||||
} else if (wait_for_completion_timeout(&ldisc_data->completion,
|
||||
usecs_to_jiffies(timeout)) == 0) {
|
||||
if (timeout)
|
||||
pr_warn("spk_ttyio: timeout (%d) while waiting for input\n",
|
||||
timeout);
|
||||
return 0xff;
|
||||
|
|
|
@ -32,6 +32,10 @@ enum {
|
|||
E_NEW_DEFAULT,
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: add new members at the end, speakupmap.h depends on the values of the
|
||||
* enum starting from SPELL_DELAY (see inc_dec_var)
|
||||
*/
|
||||
enum var_id_t {
|
||||
VERSION = 0, SYNTH, SILENT, SYNTH_DIRECT,
|
||||
KEYMAP, CHARS,
|
||||
|
@ -42,9 +46,9 @@ enum var_id_t {
|
|||
SAY_CONTROL, SAY_WORD_CTL, NO_INTERRUPT, KEY_ECHO,
|
||||
SPELL_DELAY, PUNC_LEVEL, READING_PUNC,
|
||||
ATTRIB_BLEEP, BLEEPS,
|
||||
RATE, PITCH, INFLECTION, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
|
||||
RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
|
||||
DIRECT, PAUSE,
|
||||
CAPS_START, CAPS_STOP, CHARTAB,
|
||||
CAPS_START, CAPS_STOP, CHARTAB, INFLECTION,
|
||||
MAXVARS
|
||||
};
|
||||
|
||||
|
|
|
@ -89,7 +89,18 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
|
|||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"),
|
||||
},
|
||||
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Medion Akoya E2228T, notification of the LID device only
|
||||
* happens on close, not on open and _LID always returns closed.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"),
|
||||
},
|
||||
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
|
||||
},
|
||||
|
|
|
@ -106,6 +106,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
|
|||
|
||||
static const struct acpi_device_id pch_fivr_device_ids[] = {
|
||||
{"INTC1045", 0},
|
||||
{"INTC1049", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
|
||||
|
|
|
@ -229,6 +229,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
|
|||
{"INT3532", 0},
|
||||
{"INTC1047", 0},
|
||||
{"INTC1050", 0},
|
||||
{"INTC1060", 0},
|
||||
{"INTC1061", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
|
||||
|
|
|
@ -25,10 +25,16 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
|
|||
{"INT340A"},
|
||||
{"INT340B"},
|
||||
{"INTC1040"},
|
||||
{"INTC1041"},
|
||||
{"INTC1043"},
|
||||
{"INTC1044"},
|
||||
{"INTC1045"},
|
||||
{"INTC1046"},
|
||||
{"INTC1047"},
|
||||
{"INTC1048"},
|
||||
{"INTC1049"},
|
||||
{"INTC1060"},
|
||||
{"INTC1061"},
|
||||
{""},
|
||||
};
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
|
|||
|
||||
switch (gsi) {
|
||||
case 0 ... 255:
|
||||
sprintf(ev_name, "_%c%02hhX",
|
||||
sprintf(ev_name, "_%c%02X",
|
||||
trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
|
||||
|
||||
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
|
||||
|
|
|
@ -27,6 +27,7 @@ static const struct acpi_device_id fan_device_ids[] = {
|
|||
{"PNP0C0B", 0},
|
||||
{"INT3404", 0},
|
||||
{"INTC1044", 0},
|
||||
{"INTC1048", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
|
||||
|
|
|
@ -24,9 +24,9 @@ enum acpi_sbs_device_addr {
|
|||
typedef void (*smbus_alarm_callback)(void *context);
|
||||
|
||||
extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
|
||||
u8 command, u8 * data);
|
||||
u8 command, u8 *data);
|
||||
extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address,
|
||||
u8 command, u8 * data, u8 length);
|
||||
u8 command, u8 *data, u8 length);
|
||||
extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
|
||||
smbus_alarm_callback callback, void *context);
|
||||
extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc);
|
||||
|
|
|
@ -1453,7 +1453,7 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
|
|||
}
|
||||
|
||||
/**
|
||||
* acpi_dma_configure - Set-up DMA configuration for the device.
|
||||
* acpi_dma_configure_id - Set-up DMA configuration for the device.
|
||||
* @dev: The pointer to the device
|
||||
* @attr: device dma attributes
|
||||
* @input_id: input device id const value pointer
|
||||
|
|
|
@ -255,7 +255,8 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
|
|||
|
||||
bd_set_nr_sectors(bdev, size);
|
||||
|
||||
set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
|
||||
if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false))
|
||||
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
|
|
@ -1518,6 +1518,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
|
|||
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
|
||||
bdev->bd_openers == 0)
|
||||
nbd_disconnect_and_put(nbd);
|
||||
bdput(bdev);
|
||||
|
||||
nbd_config_put(nbd);
|
||||
nbd_put(nbd);
|
||||
|
|
|
@ -435,12 +435,12 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size
|
|||
/*
|
||||
* Allocate DMA memory from ancestor. When a virtio
|
||||
* device is created by remoteproc, the DMA memory is
|
||||
* associated with the grandparent device:
|
||||
* vdev => rproc => platform-dev.
|
||||
* associated with the parent device:
|
||||
* virtioY => remoteprocX#vdevYbuffer.
|
||||
*/
|
||||
if (!vdev->dev.parent || !vdev->dev.parent->parent)
|
||||
buf->dev = vdev->dev.parent;
|
||||
if (!buf->dev)
|
||||
goto free_buf;
|
||||
buf->dev = vdev->dev.parent->parent;
|
||||
|
||||
/* Increase device refcnt to avoid freeing it */
|
||||
get_device(buf->dev);
|
||||
|
|
|
@ -443,9 +443,9 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
|
|||
hws[IMX8MM_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mm_a53_core_sels, ARRAY_SIZE(imx8mm_a53_core_sels));
|
||||
|
||||
/* BUS */
|
||||
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
|
||||
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
|
||||
hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
|
||||
hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
|
||||
hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
|
||||
hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
|
||||
hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
|
||||
hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
|
||||
|
@ -453,11 +453,11 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
|
|||
hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
|
||||
hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
|
||||
hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
|
||||
hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
|
||||
hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
|
||||
hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mm_noc_sels, base + 0x8d00);
|
||||
hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_bus_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
|
||||
|
||||
/* AHB */
|
||||
hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
|
||||
hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
|
||||
hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
|
||||
|
||||
/* IPG */
|
||||
|
|
|
@ -431,7 +431,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
|
|||
hws[IMX8MN_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mn_a53_core_sels, ARRAY_SIZE(imx8mn_a53_core_sels));
|
||||
|
||||
/* BUS */
|
||||
hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
|
||||
hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
|
||||
hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
|
||||
hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
|
||||
hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
|
||||
|
@ -439,9 +439,9 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
|
|||
hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
|
||||
hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
|
||||
hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
|
||||
hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
|
||||
hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mn_noc_sels, base + 0x8d00);
|
||||
|
||||
hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
|
||||
hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
|
||||
hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
|
||||
hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
|
||||
hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
|
||||
|
|
|
@ -557,9 +557,9 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
|
|||
/* CORE SEL */
|
||||
hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels));
|
||||
|
||||
hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
|
||||
hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
|
||||
hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
|
||||
hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
|
||||
hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
|
||||
hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
|
||||
hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite_bus("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
|
||||
hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite_bus("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
|
||||
|
@ -567,12 +567,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
|
|||
hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite_bus("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
|
||||
hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
|
||||
hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
|
||||
hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
|
||||
hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
|
||||
hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
|
||||
hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_bus_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
|
||||
hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite_bus("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
|
||||
hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite_bus("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
|
||||
|
||||
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
|
||||
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
|
||||
hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
|
||||
hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
|
||||
|
||||
|
|
|
@ -431,7 +431,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
|
|||
hws[IMX8MQ_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mq_a53_core_sels, ARRAY_SIZE(imx8mq_a53_core_sels));
|
||||
|
||||
/* BUS */
|
||||
hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
|
||||
hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
|
||||
hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
|
||||
hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
|
||||
hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
|
||||
|
@ -441,12 +441,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
|
|||
hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
|
||||
hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
|
||||
hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
|
||||
hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
|
||||
hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
|
||||
hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mq_noc_sels, base + 0x8d00);
|
||||
hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_bus_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
|
||||
|
||||
/* AHB */
|
||||
/* AHB clock is used by the AHB bus therefore marked as critical */
|
||||
hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
|
||||
hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
|
||||
hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
|
||||
|
||||
/* IPG */
|
||||
|
|
|
@ -549,6 +549,11 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
|
|||
IMX_COMPOSITE_BUS, \
|
||||
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
|
||||
|
||||
#define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \
|
||||
imx8m_clk_hw_composite_flags(name, parent_names, ARRAY_SIZE(parent_names), reg, \
|
||||
IMX_COMPOSITE_BUS, \
|
||||
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE | CLK_IS_CRITICAL)
|
||||
|
||||
#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
|
||||
imx8m_clk_hw_composite_flags(name, parent_names, \
|
||||
ARRAY_SIZE(parent_names), reg, \
|
||||
|
|
|
@ -26,7 +26,10 @@ struct clk_regmap {
|
|||
void *data;
|
||||
};
|
||||
|
||||
#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
|
||||
static inline struct clk_regmap *to_clk_regmap(struct clk_hw *hw)
|
||||
{
|
||||
return container_of(hw, struct clk_regmap, hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct clk_regmap_gate_data - regmap backed gate specific data
|
||||
|
|
|
@ -24,7 +24,11 @@ struct clk_regmap {
|
|||
unsigned int enable_mask;
|
||||
bool enable_is_inverted;
|
||||
};
|
||||
#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
|
||||
|
||||
static inline struct clk_regmap *to_clk_regmap(struct clk_hw *hw)
|
||||
{
|
||||
return container_of(hw, struct clk_regmap, hw);
|
||||
}
|
||||
|
||||
int clk_is_enabled_regmap(struct clk_hw *hw);
|
||||
int clk_enable_regmap(struct clk_hw *hw);
|
||||
|
|
|
@ -2254,7 +2254,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
|
|||
return -EINVAL;
|
||||
|
||||
/* Platform doesn't want dynamic frequency switching ? */
|
||||
if (policy->governor->dynamic_switching &&
|
||||
if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
|
||||
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
|
||||
struct cpufreq_governor *gov = cpufreq_fallback_governor();
|
||||
|
||||
|
@ -2280,6 +2280,8 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
|
|||
}
|
||||
}
|
||||
|
||||
policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
|
|||
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
|
||||
{ \
|
||||
.name = _name_, \
|
||||
.dynamic_switching = true, \
|
||||
.flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
|
||||
.owner = THIS_MODULE, \
|
||||
.init = cpufreq_dbs_governor_init, \
|
||||
.exit = cpufreq_dbs_governor_exit, \
|
||||
|
|
|
@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
|
|||
static struct cpufreq_governor cpufreq_gov_performance = {
|
||||
.name = "performance",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_GOV_STRICT_TARGET,
|
||||
.limits = cpufreq_gov_performance_limits,
|
||||
};
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
|
|||
.name = "powersave",
|
||||
.limits = cpufreq_gov_powersave_limits,
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_GOV_STRICT_TARGET,
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
|
||||
|
|
|
@ -2527,7 +2527,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
|
|||
}
|
||||
|
||||
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
|
||||
bool fast_switch)
|
||||
bool strict, bool fast_switch)
|
||||
{
|
||||
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
|
||||
|
||||
|
@ -2539,7 +2539,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
|
|||
* field in it, so opportunistically update the max too if needed.
|
||||
*/
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(cpu->max_perf_ratio);
|
||||
value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
|
||||
|
||||
if (value == prev)
|
||||
return;
|
||||
|
@ -2562,14 +2562,16 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
|
|||
pstate_funcs.get_val(cpu, target_pstate));
|
||||
}
|
||||
|
||||
static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
|
||||
bool fast_switch)
|
||||
static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
|
||||
int target_pstate, bool fast_switch)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
int old_pstate = cpu->pstate.current_pstate;
|
||||
|
||||
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
|
||||
if (hwp_active) {
|
||||
intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
|
||||
intel_cpufreq_adjust_hwp(cpu, target_pstate,
|
||||
policy->strict_target, fast_switch);
|
||||
cpu->pstate.current_pstate = target_pstate;
|
||||
} else if (target_pstate != old_pstate) {
|
||||
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
|
||||
|
@ -2609,7 +2611,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
|
|||
break;
|
||||
}
|
||||
|
||||
target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
|
||||
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
|
||||
|
||||
freqs.new = target_pstate * cpu->pstate.scaling;
|
||||
|
||||
|
@ -2628,7 +2630,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||
|
||||
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
|
||||
|
||||
target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
|
||||
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
|
||||
|
||||
return target_pstate * cpu->pstate.scaling;
|
||||
}
|
||||
|
|
|
@ -147,6 +147,9 @@ static int zynqmp_pm_feature(u32 api_id)
|
|||
return 0;
|
||||
|
||||
/* Return value if feature is already checked */
|
||||
if (api_id > ARRAY_SIZE(zynqmp_pm_features))
|
||||
return PM_FEATURE_INVALID;
|
||||
|
||||
if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
|
||||
return zynqmp_pm_features[api_id];
|
||||
|
||||
|
|
|
@ -1114,6 +1114,7 @@ static const struct aspeed_gpio_config ast2500_config =
|
|||
|
||||
static const struct aspeed_bank_props ast2600_bank_props[] = {
|
||||
/* input output */
|
||||
{4, 0xffffffff, 0x00ffffff}, /* Q/R/S/T */
|
||||
{5, 0xffffffff, 0xffffff00}, /* U/V/W/X */
|
||||
{6, 0x0000ffff, 0x0000ffff}, /* Y/Z */
|
||||
{ },
|
||||
|
|
|
@ -343,8 +343,8 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
|
|||
#ifdef CONFIG_PM_SLEEP
|
||||
static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
|
||||
{
|
||||
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
|
||||
struct dwapb_gpio *gpio = igc->private;
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
|
||||
struct dwapb_context *ctx = gpio->ports[0].ctx;
|
||||
irq_hw_number_t bit = irqd_to_hwirq(d);
|
||||
|
||||
|
|
|
@ -1114,13 +1114,23 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
|
|||
{
|
||||
struct device *dev = bank->chip.parent;
|
||||
void __iomem *base = bank->base;
|
||||
u32 nowake;
|
||||
u32 mask, nowake;
|
||||
|
||||
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
|
||||
|
||||
if (!bank->enabled_non_wakeup_gpios)
|
||||
goto update_gpio_context_count;
|
||||
|
||||
/* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
|
||||
mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
|
||||
mask &= ~bank->context.risingdetect;
|
||||
bank->saved_datain |= mask;
|
||||
|
||||
/* Check for pending EDGE_RISING, ignore EDGE_BOTH */
|
||||
mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
|
||||
mask &= ~bank->context.fallingdetect;
|
||||
bank->saved_datain &= ~mask;
|
||||
|
||||
if (!may_lose_context)
|
||||
goto update_gpio_context_count;
|
||||
|
||||
|
|
|
@ -28,6 +28,47 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status
|
||||
*
|
||||
* Bit: Description
|
||||
* 0: Enable Interrupt Sources (Bit 0)
|
||||
* 1: Enable Interrupt Sources (Bit 1)
|
||||
* 2: Generate Internal PCI Bus Internal SERR# Interrupt
|
||||
* 3: Mailbox Interrupt Enable
|
||||
* 4: Power Management Interrupt Enable
|
||||
* 5: Power Management Interrupt
|
||||
* 6: Slave Read Local Data Parity Check Error Enable
|
||||
* 7: Slave Read Local Data Parity Check Error Status
|
||||
* 8: Internal PCI Wire Interrupt Enable
|
||||
* 9: PCI Express Doorbell Interrupt Enable
|
||||
* 10: PCI Abort Interrupt Enable
|
||||
* 11: Local Interrupt Input Enable
|
||||
* 12: Retry Abort Enable
|
||||
* 13: PCI Express Doorbell Interrupt Active
|
||||
* 14: PCI Abort Interrupt Active
|
||||
* 15: Local Interrupt Input Active
|
||||
* 16: Local Interrupt Output Enable
|
||||
* 17: Local Doorbell Interrupt Enable
|
||||
* 18: DMA Channel 0 Interrupt Enable
|
||||
* 19: DMA Channel 1 Interrupt Enable
|
||||
* 20: Local Doorbell Interrupt Active
|
||||
* 21: DMA Channel 0 Interrupt Active
|
||||
* 22: DMA Channel 1 Interrupt Active
|
||||
* 23: Built-In Self-Test (BIST) Interrupt Active
|
||||
* 24: Direct Master was the Bus Master during a Master or Target Abort
|
||||
* 25: DMA Channel 0 was the Bus Master during a Master or Target Abort
|
||||
* 26: DMA Channel 1 was the Bus Master during a Master or Target Abort
|
||||
* 27: Target Abort after internal 256 consecutive Master Retrys
|
||||
* 28: PCI Bus wrote data to LCS_MBOX0
|
||||
* 29: PCI Bus wrote data to LCS_MBOX1
|
||||
* 30: PCI Bus wrote data to LCS_MBOX2
|
||||
* 31: PCI Bus wrote data to LCS_MBOX3
|
||||
*/
|
||||
#define PLX_PEX8311_PCI_LCS_INTCSR 0x68
|
||||
#define INTCSR_INTERNAL_PCI_WIRE BIT(8)
|
||||
#define INTCSR_LOCAL_INPUT BIT(11)
|
||||
|
||||
/**
|
||||
* struct idio_24_gpio_reg - GPIO device registers structure
|
||||
* @out0_7: Read: FET Outputs 0-7
|
||||
|
@ -92,6 +133,7 @@ struct idio_24_gpio_reg {
|
|||
struct idio_24_gpio {
|
||||
struct gpio_chip chip;
|
||||
raw_spinlock_t lock;
|
||||
__u8 __iomem *plx;
|
||||
struct idio_24_gpio_reg __iomem *reg;
|
||||
unsigned long irq_mask;
|
||||
};
|
||||
|
@ -334,13 +376,13 @@ static void idio_24_irq_mask(struct irq_data *data)
|
|||
unsigned long flags;
|
||||
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
|
||||
unsigned char new_irq_mask;
|
||||
const unsigned long bank_offset = bit_offset/8 * 8;
|
||||
const unsigned long bank_offset = bit_offset / 8;
|
||||
unsigned char cos_enable_state;
|
||||
|
||||
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
|
||||
|
||||
idio24gpio->irq_mask &= BIT(bit_offset);
|
||||
new_irq_mask = idio24gpio->irq_mask >> bank_offset;
|
||||
idio24gpio->irq_mask &= ~BIT(bit_offset);
|
||||
new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
|
||||
|
||||
if (!new_irq_mask) {
|
||||
cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
|
||||
|
@ -363,12 +405,12 @@ static void idio_24_irq_unmask(struct irq_data *data)
|
|||
unsigned long flags;
|
||||
unsigned char prev_irq_mask;
|
||||
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
|
||||
const unsigned long bank_offset = bit_offset/8 * 8;
|
||||
const unsigned long bank_offset = bit_offset / 8;
|
||||
unsigned char cos_enable_state;
|
||||
|
||||
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
|
||||
|
||||
prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
|
||||
prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
|
||||
idio24gpio->irq_mask |= BIT(bit_offset);
|
||||
|
||||
if (!prev_irq_mask) {
|
||||
|
@ -455,6 +497,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
struct device *const dev = &pdev->dev;
|
||||
struct idio_24_gpio *idio24gpio;
|
||||
int err;
|
||||
const size_t pci_plx_bar_index = 1;
|
||||
const size_t pci_bar_index = 2;
|
||||
const char *const name = pci_name(pdev);
|
||||
struct gpio_irq_chip *girq;
|
||||
|
@ -469,12 +512,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
return err;
|
||||
}
|
||||
|
||||
err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
|
||||
err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
|
||||
idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
|
||||
|
||||
idio24gpio->chip.label = name;
|
||||
|
@ -504,6 +548,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
/* Software board reset */
|
||||
iowrite8(0, &idio24gpio->reg->soft_reset);
|
||||
/*
|
||||
* enable PLX PEX8311 internal PCI wire interrupt and local interrupt
|
||||
* input
|
||||
*/
|
||||
iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
|
||||
idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
|
||||
|
||||
err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
|
||||
if (err) {
|
||||
|
|
|
@ -183,7 +183,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(chip->regs);
|
||||
|
||||
ngpio = of_irq_count(node);
|
||||
if (ngpio >= SIFIVE_GPIO_MAX) {
|
||||
if (ngpio > SIFIVE_GPIO_MAX) {
|
||||
dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
|
||||
SIFIVE_GPIO_MAX);
|
||||
return -ENXIO;
|
||||
|
|
|
@ -7,22 +7,7 @@
|
|||
|
||||
struct gpio_device;
|
||||
|
||||
#ifdef CONFIG_GPIO_CDEV
|
||||
|
||||
int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt);
|
||||
void gpiolib_cdev_unregister(struct gpio_device *gdev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void gpiolib_cdev_unregister(struct gpio_device *gdev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_GPIO_CDEV */
|
||||
|
||||
#endif /* GPIOLIB_CDEV_H */
|
||||
|
|
|
@ -480,11 +480,23 @@ static void gpiodevice_release(struct device *dev)
|
|||
kfree(gdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GPIO_CDEV
|
||||
#define gcdev_register(gdev, devt) gpiolib_cdev_register((gdev), (devt))
|
||||
#define gcdev_unregister(gdev) gpiolib_cdev_unregister((gdev))
|
||||
#else
|
||||
/*
|
||||
* gpiolib_cdev_register() indirectly calls device_add(), which is still
|
||||
* required even when cdev is not selected.
|
||||
*/
|
||||
#define gcdev_register(gdev, devt) device_add(&(gdev)->dev)
|
||||
#define gcdev_unregister(gdev) device_del(&(gdev)->dev)
|
||||
#endif
|
||||
|
||||
static int gpiochip_setup_dev(struct gpio_device *gdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gpiolib_cdev_register(gdev, gpio_devt);
|
||||
ret = gcdev_register(gdev, gpio_devt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -500,7 +512,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
|
|||
return 0;
|
||||
|
||||
err_remove_device:
|
||||
gpiolib_cdev_unregister(gdev);
|
||||
gcdev_unregister(gdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -825,7 +837,7 @@ void gpiochip_remove(struct gpio_chip *gc)
|
|||
* be removed, else it will be dangling until the last user is
|
||||
* gone.
|
||||
*/
|
||||
gpiolib_cdev_unregister(gdev);
|
||||
gcdev_unregister(gdev);
|
||||
put_device(&gdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_remove);
|
||||
|
|
|
@ -492,8 +492,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
|||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev) &&
|
||||
!nv_is_headless_sku(adev->pdev))
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin");
|
||||
|
||||
/* address block */
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче