Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
include/linux/net.ha5ef058dc4
("net: introduce and use custom sockopt socket flag")e993ffe3da
("net: flag sockets supporting msghdr originated zerocopy") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Коммит
96917bb3a3
4
.mailmap
4
.mailmap
|
@ -104,6 +104,7 @@ Christoph Hellwig <hch@lst.de>
|
|||
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
Damian Hobson-Garcia <dhobsong@igel.co.jp>
|
||||
Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
|
||||
|
@ -353,7 +354,8 @@ Peter Oruba <peter@oruba.de>
|
|||
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
|
||||
Praveen BP <praveenbp@ti.com>
|
||||
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
|
||||
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
|
||||
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
|
||||
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
|
||||
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
|
||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
|
||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
|
||||
|
|
|
@ -9,7 +9,6 @@ the Linux ACPI support.
|
|||
:maxdepth: 1
|
||||
|
||||
initrd_table_override
|
||||
dsdt-override
|
||||
ssdt-overlays
|
||||
cppc_sysfs
|
||||
fan_performance_states
|
||||
|
|
|
@ -144,6 +144,42 @@ managing and controlling ublk devices with help of several control commands:
|
|||
For retrieving device info via ``ublksrv_ctrl_dev_info``. It is the server's
|
||||
responsibility to save IO target specific info in userspace.
|
||||
|
||||
- ``UBLK_CMD_START_USER_RECOVERY``
|
||||
|
||||
This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This
|
||||
command is accepted after the old process has exited, ublk device is quiesced
|
||||
and ``/dev/ublkc*`` is released. User should send this command before he starts
|
||||
a new process which re-opens ``/dev/ublkc*``. When this command returns, the
|
||||
ublk device is ready for the new process.
|
||||
|
||||
- ``UBLK_CMD_END_USER_RECOVERY``
|
||||
|
||||
This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This
|
||||
command is accepted after ublk device is quiesced and a new process has
|
||||
opened ``/dev/ublkc*`` and get all ublk queues be ready. When this command
|
||||
returns, ublk device is unquiesced and new I/O requests are passed to the
|
||||
new process.
|
||||
|
||||
- user recovery feature description
|
||||
|
||||
Two new features are added for user recovery: ``UBLK_F_USER_RECOVERY`` and
|
||||
``UBLK_F_USER_RECOVERY_REISSUE``.
|
||||
|
||||
With ``UBLK_F_USER_RECOVERY`` set, after one ubq_daemon(ublk server's io
|
||||
handler) is dying, ublk does not delete ``/dev/ublkb*`` during the whole
|
||||
recovery stage and ublk device ID is kept. It is ublk server's
|
||||
responsibility to recover the device context by its own knowledge.
|
||||
Requests which have not been issued to userspace are requeued. Requests
|
||||
which have been issued to userspace are aborted.
|
||||
|
||||
With ``UBLK_F_USER_RECOVERY_REISSUE`` set, after one ubq_daemon(ublk
|
||||
server's io handler) is dying, contrary to ``UBLK_F_USER_RECOVERY``,
|
||||
requests which have been issued to userspace are requeued and will be
|
||||
re-issued to the new process after handling ``UBLK_CMD_END_USER_RECOVERY``.
|
||||
``UBLK_F_USER_RECOVERY_REISSUE`` is designed for backends who tolerate
|
||||
double-write since the driver may issue the same I/O request twice. It
|
||||
might be useful to a read-only FS or a VM backend.
|
||||
|
||||
Data plane
|
||||
----------
|
||||
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
Dongwoon Anatech DW9714 camera voice coil lens driver
|
||||
|
||||
DW9174 is a 10-bit DAC with current sink capability. It is intended
|
||||
for driving voice coil lenses in camera modules.
|
||||
|
||||
Mandatory properties:
|
||||
|
||||
- compatible: "dongwoon,dw9714"
|
||||
- reg: I²C slave address
|
|
@ -0,0 +1,47 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/media/i2c/dongwoon,dw9714.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Dongwoon Anatech DW9714 camera voice coil lens driver
|
||||
|
||||
maintainers:
|
||||
- Krzysztof Kozlowski <krzk@kernel.org>
|
||||
|
||||
description:
|
||||
DW9174 is a 10-bit DAC with current sink capability. It is intended for
|
||||
driving voice coil lenses in camera modules.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: dongwoon,dw9714
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
powerdown-gpios:
|
||||
description:
|
||||
XSD pin for shutdown (active low)
|
||||
|
||||
vcc-supply:
|
||||
description: VDD power supply
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
camera-lens@c {
|
||||
compatible = "dongwoon,dw9714";
|
||||
reg = <0x0c>;
|
||||
vcc-supply = <®_csi_1v8>;
|
||||
};
|
||||
};
|
|
@ -274,10 +274,6 @@ patternProperties:
|
|||
slew-rate:
|
||||
enum: [0, 1]
|
||||
|
||||
output-enable:
|
||||
description:
|
||||
This will internally disable the tri-state for MIO pins.
|
||||
|
||||
drive-strength:
|
||||
description:
|
||||
Selects the drive strength for MIO pins, in mA.
|
||||
|
|
|
@ -214,18 +214,29 @@ Link properties can be modified at runtime by calling
|
|||
Pipelines and media streams
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
A media stream is a stream of pixels or metadata originating from one or more
|
||||
source devices (such as a sensors) and flowing through media entity pads
|
||||
towards the final sinks. The stream can be modified on the route by the
|
||||
devices (e.g. scaling or pixel format conversions), or it can be split into
|
||||
multiple branches, or multiple branches can be merged.
|
||||
|
||||
A media pipeline is a set of media streams which are interdependent. This
|
||||
interdependency can be caused by the hardware (e.g. configuration of a second
|
||||
stream cannot be changed if the first stream has been enabled) or by the driver
|
||||
due to the software design. Most commonly a media pipeline consists of a single
|
||||
stream which does not branch.
|
||||
|
||||
When starting streaming, drivers must notify all entities in the pipeline to
|
||||
prevent link states from being modified during streaming by calling
|
||||
:c:func:`media_pipeline_start()`.
|
||||
|
||||
The function will mark all entities connected to the given entity through
|
||||
enabled links, either directly or indirectly, as streaming.
|
||||
The function will mark all the pads which are part of the pipeline as streaming.
|
||||
|
||||
The struct media_pipeline instance pointed to by
|
||||
the pipe argument will be stored in every entity in the pipeline.
|
||||
the pipe argument will be stored in every pad in the pipeline.
|
||||
Drivers should embed the struct media_pipeline
|
||||
in higher-level pipeline structures and can then access the
|
||||
pipeline through the struct media_entity
|
||||
pipeline through the struct media_pad
|
||||
pipe field.
|
||||
|
||||
Calls to :c:func:`media_pipeline_start()` can be nested.
|
||||
|
|
|
@ -19,6 +19,8 @@ Supported devices:
|
|||
|
||||
Corsair HX1200i
|
||||
|
||||
Corsair HX1500i
|
||||
|
||||
Corsair RM550i
|
||||
|
||||
Corsair RM650i
|
||||
|
|
|
@ -319,3 +319,13 @@ unpatched tree to confirm infrastructure didn't mangle it.
|
|||
Finally, go back and read
|
||||
:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
|
||||
to be sure you are not repeating some common mistake documented there.
|
||||
|
||||
My company uses peer feedback in employee performance reviews. Can I ask netdev maintainers for feedback?
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
|
||||
Yes, especially if you spend significant amount of time reviewing code
|
||||
and go out of your way to improve shared infrastructure.
|
||||
|
||||
The feedback must be requested by you, the contributor, and will always
|
||||
be shared with you (even if you request for it to be submitted to your
|
||||
manager).
|
||||
|
|
|
@ -239,6 +239,7 @@ ignore define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL
|
|||
ignore define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE
|
||||
ignore define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX
|
||||
ignore define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX
|
||||
ignore define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL
|
||||
|
||||
ignore define CEC_MSG_GIVE_FEATURES
|
||||
|
||||
|
@ -487,6 +488,7 @@ ignore define CEC_OP_SYS_AUD_STATUS_ON
|
|||
|
||||
ignore define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST
|
||||
ignore define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS
|
||||
ignore define CEC_MSG_SET_AUDIO_VOLUME_LEVEL
|
||||
|
||||
ignore define CEC_OP_AUD_FMT_ID_CEA861
|
||||
ignore define CEC_OP_AUD_FMT_ID_CEA861_CXT
|
||||
|
|
|
@ -136,9 +136,9 @@ V4L2 functions
|
|||
|
||||
operates like the :c:func:`read()` function.
|
||||
|
||||
.. c:function:: void v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
|
||||
.. c:function:: void *v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
|
||||
|
||||
operates like the :c:func:`munmap()` function.
|
||||
operates like the :c:func:`mmap()` function.
|
||||
|
||||
.. c:function:: int v4l2_munmap(void *_start, size_t length);
|
||||
|
||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -6285,7 +6285,7 @@ M: Sakari Ailus <sakari.ailus@linux.intel.com>
|
|||
L: linux-media@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://linuxtv.org/media_tree.git
|
||||
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
|
||||
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml
|
||||
F: drivers/media/i2c/dw9714.c
|
||||
|
||||
DONGWOON DW9768 LENS VOICE COIL DRIVER
|
||||
|
@ -14715,6 +14715,12 @@ F: drivers/nvme/target/auth.c
|
|||
F: drivers/nvme/target/fabrics-cmd-auth.c
|
||||
F: include/linux/nvme-auth.h
|
||||
|
||||
NVM EXPRESS HARDWARE MONITORING SUPPORT
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
S: Supported
|
||||
F: drivers/nvme/host/hwmon.c
|
||||
|
||||
NVM EXPRESS FC TRANSPORT DRIVERS
|
||||
M: James Smart <james.smart@broadcom.com>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
|
@ -15845,7 +15851,7 @@ F: Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
|
|||
F: drivers/pci/controller/dwc/*designware*
|
||||
|
||||
PCI DRIVER FOR TI DRA7XX/J721E
|
||||
M: Kishon Vijay Abraham I <kishon@ti.com>
|
||||
M: Vignesh Raghavendra <vigneshr@ti.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
|
@ -15862,10 +15868,10 @@ F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
|
|||
F: drivers/pci/controller/pci-v3-semi.c
|
||||
|
||||
PCI ENDPOINT SUBSYSTEM
|
||||
M: Kishon Vijay Abraham I <kishon@ti.com>
|
||||
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
|
||||
R: Krzysztof Wilczyński <kw@linux.com>
|
||||
R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
R: Kishon Vijay Abraham I <kishon@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-pci/list/
|
||||
|
@ -16671,6 +16677,7 @@ F: Documentation/driver-api/ptp.rst
|
|||
F: drivers/net/phy/dp83640*
|
||||
F: drivers/ptp/*
|
||||
F: include/linux/ptp_cl*
|
||||
K: (?:\b|_)ptp(?:\b|_)
|
||||
|
||||
PTP VIRTUAL CLOCK SUPPORT
|
||||
M: Yangbo Lu <yangbo.lu@nxp.com>
|
||||
|
@ -18137,7 +18144,6 @@ L: linux-media@vger.kernel.org
|
|||
S: Maintained
|
||||
T: git git://linuxtv.org/media_tree.git
|
||||
F: drivers/staging/media/deprecated/saa7146/
|
||||
F: include/media/drv-intf/saa7146*
|
||||
|
||||
SAFESETID SECURITY MODULE
|
||||
M: Micah Morton <mortonm@chromium.org>
|
||||
|
@ -22126,6 +22132,7 @@ F: Documentation/watchdog/
|
|||
F: drivers/watchdog/
|
||||
F: include/linux/watchdog.h
|
||||
F: include/uapi/linux/watchdog.h
|
||||
F: include/trace/events/watchdog.h
|
||||
|
||||
WHISKEYCOVE PMIC GPIO DRIVER
|
||||
M: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
|
||||
|
@ -22766,7 +22773,7 @@ S: Maintained
|
|||
W: http://mjpeg.sourceforge.net/driver-zoran/
|
||||
Q: https://patchwork.linuxtv.org/project/linux-media/list/
|
||||
F: Documentation/driver-api/media/drivers/zoran.rst
|
||||
F: drivers/staging/media/zoran/
|
||||
F: drivers/media/pci/zoran/
|
||||
|
||||
ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
|
||||
M: Minchan Kim <minchan@kernel.org>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -13,6 +13,18 @@
|
|||
|
||||
#define KVM_PGTABLE_MAX_LEVELS 4U
|
||||
|
||||
/*
|
||||
* The largest supported block sizes for KVM (no 52-bit PA support):
|
||||
* - 4K (level 1): 1GB
|
||||
* - 16K (level 2): 32MB
|
||||
* - 64K (level 2): 512MB
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
#define KVM_PGTABLE_MIN_BLOCK_LEVEL 1U
|
||||
#else
|
||||
#define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U
|
||||
#endif
|
||||
|
||||
static inline u64 kvm_get_parange(u64 mmfr0)
|
||||
{
|
||||
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
|
@ -58,11 +70,7 @@ static inline u64 kvm_granule_size(u32 level)
|
|||
|
||||
static inline bool kvm_level_supports_block_mapping(u32 level)
|
||||
{
|
||||
/*
|
||||
* Reject invalid block mappings and don't bother with 4TB mappings for
|
||||
* 52-bit PAs.
|
||||
*/
|
||||
return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
|
||||
return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -10,13 +10,6 @@
|
|||
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
/*
|
||||
* PGDIR_SHIFT determines the size a top-level page table entry can map
|
||||
* and depends on the number of levels in the page table. Compute the
|
||||
* PGDIR_SHIFT for a given number of levels.
|
||||
*/
|
||||
#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
|
||||
|
||||
/*
|
||||
* The hardware supports concatenation of up to 16 tables at stage2 entry
|
||||
* level and we use the feature whenever possible, which means we resolve 4
|
||||
|
@ -30,11 +23,6 @@
|
|||
#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
|
||||
#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
|
||||
|
||||
/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
|
||||
#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
|
||||
#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm))
|
||||
#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1)
|
||||
|
||||
/*
|
||||
* kvm_mmmu_cache_min_pages() is the number of pages required to install
|
||||
* a stage-2 translation. We pre-allocate the entry level page table at
|
||||
|
@ -42,12 +30,4 @@
|
|||
*/
|
||||
#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
|
||||
|
||||
static inline phys_addr_t
|
||||
stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
|
||||
|
||||
return (boundary - 1 < end - 1) ? boundary : end;
|
||||
}
|
||||
|
||||
#endif /* __ARM64_S2_PGTABLE_H_ */
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller)
|
|||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
||||
|
||||
SYM_FUNC_START(ftrace_stub)
|
||||
SYM_TYPED_FUNC_START(ftrace_stub)
|
||||
ret
|
||||
SYM_FUNC_END(ftrace_stub)
|
||||
|
||||
SYM_TYPED_FUNC_START(ftrace_stub_graph)
|
||||
ret
|
||||
SYM_FUNC_END(ftrace_stub_graph)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/*
|
||||
* void return_to_handler(void)
|
||||
|
|
|
@ -5,9 +5,6 @@
|
|||
|
||||
incdir := $(srctree)/$(src)/include
|
||||
subdir-asflags-y := -I$(incdir)
|
||||
subdir-ccflags-y := -I$(incdir) \
|
||||
-fno-stack-protector \
|
||||
-DDISABLE_BRANCH_PROFILING \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
subdir-ccflags-y := -I$(incdir)
|
||||
|
||||
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
|
||||
|
|
|
@ -10,6 +10,9 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
|
|||
# will explode instantly (Words of Marc Zyngier). So introduce a generic flag
|
||||
# __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM.
|
||||
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
|
||||
ccflags-y += -fno-stack-protector \
|
||||
-DDISABLE_BRANCH_PROFILING \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
|
||||
hostprogs := gen-hyprel
|
||||
HOST_EXTRACFLAGS += -I$(objtree)/include
|
||||
|
@ -89,6 +92,10 @@ quiet_cmd_hypcopy = HYPCOPY $@
|
|||
# Remove ftrace, Shadow Call Stack, and CFI CFLAGS.
|
||||
# This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations.
|
||||
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
|
||||
# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile'
|
||||
# when profile optimization is applied. gen-hyprel does not support SHT_REL and
|
||||
# causes a build failure. Remove profile optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS))
|
||||
|
||||
# KVM nVHE code is run at a different exception code with a different map, so
|
||||
# compiler instrumentation that inserts callbacks or checks into the code may
|
||||
|
|
|
@ -31,6 +31,13 @@ static phys_addr_t hyp_idmap_vector;
|
|||
|
||||
static unsigned long io_map_base;
|
||||
|
||||
static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
|
||||
phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
|
||||
|
||||
return (boundary - 1 < end - 1) ? boundary : end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
|
||||
|
@ -52,7 +59,7 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
|
|||
if (!pgt)
|
||||
return -EINVAL;
|
||||
|
||||
next = stage2_pgd_addr_end(kvm, addr, end);
|
||||
next = stage2_range_addr_end(addr, end);
|
||||
ret = fn(pgt, addr, next - addr);
|
||||
if (ret)
|
||||
break;
|
||||
|
|
|
@ -2149,7 +2149,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
|
|||
|
||||
memset(entry, 0, esz);
|
||||
|
||||
while (len > 0) {
|
||||
while (true) {
|
||||
int next_offset;
|
||||
size_t byte_offset;
|
||||
|
||||
|
@ -2162,6 +2162,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
|
|||
return next_offset;
|
||||
|
||||
byte_offset = next_offset * esz;
|
||||
if (byte_offset >= len)
|
||||
break;
|
||||
|
||||
id += next_offset;
|
||||
gpa += byte_offset;
|
||||
len -= byte_offset;
|
||||
|
|
|
@ -42,16 +42,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
|
|||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* The T-Head CMO errata internally probe the CBOM block size, but otherwise
|
||||
* don't depend on Zicbom.
|
||||
*/
|
||||
extern unsigned int riscv_cbom_block_size;
|
||||
#ifdef CONFIG_RISCV_ISA_ZICBOM
|
||||
void riscv_init_cbom_blocksize(void);
|
||||
#else
|
||||
static inline void riscv_init_cbom_blocksize(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
|
||||
void riscv_noncoherent_supported(void);
|
||||
|
|
|
@ -45,6 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
|
|||
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
|
||||
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
|
||||
void kvm_riscv_guest_timer_init(struct kvm *kvm);
|
||||
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
|
||||
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
|
||||
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -708,6 +708,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
|
|||
clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
|
||||
}
|
||||
}
|
||||
|
||||
/* Sync-up timer CSRs */
|
||||
kvm_riscv_vcpu_timer_sync(vcpu);
|
||||
}
|
||||
|
||||
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
|
||||
|
|
|
@ -320,6 +320,21 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
|
|||
kvm_riscv_vcpu_timer_unblocking(vcpu);
|
||||
}
|
||||
|
||||
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
|
||||
|
||||
if (!t->sstc_enabled)
|
||||
return;
|
||||
|
||||
#if defined(CONFIG_32BIT)
|
||||
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
||||
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
|
||||
#else
|
||||
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
||||
#endif
|
||||
}
|
||||
|
||||
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
|
||||
|
@ -327,13 +342,11 @@ void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
|
|||
if (!t->sstc_enabled)
|
||||
return;
|
||||
|
||||
t = &vcpu->arch.timer;
|
||||
#if defined(CONFIG_32BIT)
|
||||
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
||||
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
|
||||
#else
|
||||
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
||||
#endif
|
||||
/*
|
||||
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
|
||||
* upon every VM exit so no need to save here.
|
||||
*/
|
||||
|
||||
/* timer should be enabled for the remaining operations */
|
||||
if (unlikely(!t->init_done))
|
||||
return;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* Copyright (C) 2017 SiFive
|
||||
*/
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -86,3 +87,40 @@ void flush_icache_pte(pte_t pte)
|
|||
flush_icache_all();
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
unsigned int riscv_cbom_block_size;
|
||||
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
|
||||
|
||||
void riscv_init_cbom_blocksize(void)
|
||||
{
|
||||
struct device_node *node;
|
||||
unsigned long cbom_hartid;
|
||||
u32 val, probed_block_size;
|
||||
int ret;
|
||||
|
||||
probed_block_size = 0;
|
||||
for_each_of_cpu_node(node) {
|
||||
unsigned long hartid;
|
||||
|
||||
ret = riscv_of_processor_hartid(node, &hartid);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/* set block-size for cbom extension if available */
|
||||
ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
if (!probed_block_size) {
|
||||
probed_block_size = val;
|
||||
cbom_hartid = hartid;
|
||||
} else {
|
||||
if (probed_block_size != val)
|
||||
pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
|
||||
cbom_hartid, hartid);
|
||||
}
|
||||
}
|
||||
|
||||
if (probed_block_size)
|
||||
riscv_cbom_block_size = probed_block_size;
|
||||
}
|
||||
|
|
|
@ -8,13 +8,8 @@
|
|||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
unsigned int riscv_cbom_block_size;
|
||||
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
|
||||
|
||||
static bool noncoherent_supported;
|
||||
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
|
@ -77,42 +72,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
dev->dma_coherent = coherent;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RISCV_ISA_ZICBOM
|
||||
void riscv_init_cbom_blocksize(void)
|
||||
{
|
||||
struct device_node *node;
|
||||
unsigned long cbom_hartid;
|
||||
u32 val, probed_block_size;
|
||||
int ret;
|
||||
|
||||
probed_block_size = 0;
|
||||
for_each_of_cpu_node(node) {
|
||||
unsigned long hartid;
|
||||
|
||||
ret = riscv_of_processor_hartid(node, &hartid);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/* set block-size for cbom extension if available */
|
||||
ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
if (!probed_block_size) {
|
||||
probed_block_size = val;
|
||||
cbom_hartid = hartid;
|
||||
} else {
|
||||
if (probed_block_size != val)
|
||||
pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
|
||||
cbom_hartid, hartid);
|
||||
}
|
||||
}
|
||||
|
||||
if (probed_block_size)
|
||||
riscv_cbom_block_size = probed_block_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
void riscv_noncoherent_supported(void)
|
||||
{
|
||||
WARN(!riscv_cbom_block_size,
|
||||
|
|
|
@ -1973,7 +1973,6 @@ config EFI
|
|||
config EFI_STUB
|
||||
bool "EFI stub support"
|
||||
depends on EFI
|
||||
depends on $(cc-option,-mabi=ms) || X86_32
|
||||
select RELOCATABLE
|
||||
help
|
||||
This kernel feature allows a bzImage to be loaded directly
|
||||
|
|
|
@ -1596,7 +1596,7 @@ void __init intel_pmu_arch_lbr_init(void)
|
|||
return;
|
||||
|
||||
clear_arch_lbr:
|
||||
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
|
||||
setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,8 +25,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
|
|||
{
|
||||
u64 start = rmrr->base_address;
|
||||
u64 end = rmrr->end_address + 1;
|
||||
int entry_type;
|
||||
|
||||
if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
|
||||
entry_type = e820__get_entry_type(start, end);
|
||||
if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS)
|
||||
return 0;
|
||||
|
||||
pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
|
||||
|
|
|
@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
|
|||
return ret;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
if (rev >= mc->hdr.patch_id)
|
||||
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
*/
|
||||
if (rev > mc->hdr.patch_id)
|
||||
return ret;
|
||||
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
|
@ -528,8 +534,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
|||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
/* Check whether we have saved a new patch already: */
|
||||
if (*new_rev && rev < mc->hdr.patch_id) {
|
||||
/*
|
||||
* Check whether a new patch has been saved already. Also, allow application of
|
||||
* the same revision in order to pick up SMT-thread-specific configuration even
|
||||
* if the sibling SMT thread already has an up-to-date revision.
|
||||
*/
|
||||
if (*new_rev && rev <= mc->hdr.patch_id) {
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
*new_rev = mc->hdr.patch_id;
|
||||
return;
|
||||
|
|
|
@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
|
|||
.rid = RDT_RESOURCE_L3,
|
||||
.name = "L3",
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
},
|
||||
.domains = domain_init(RDT_RESOURCE_L3),
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
|
@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
|
|||
.rid = RDT_RESOURCE_L2,
|
||||
.name = "L2",
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
},
|
||||
.domains = domain_init(RDT_RESOURCE_L2),
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
|
@ -836,6 +830,7 @@ static __init void rdt_init_res_defs_intel(void)
|
|||
r->cache.arch_has_sparse_bitmaps = false;
|
||||
r->cache.arch_has_empty_bitmaps = false;
|
||||
r->cache.arch_has_per_cpu_cfg = false;
|
||||
r->cache.min_cbm_bits = 1;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
||||
hw_res->msr_update = mba_wrmsr_intel;
|
||||
|
@ -856,6 +851,7 @@ static __init void rdt_init_res_defs_amd(void)
|
|||
r->cache.arch_has_sparse_bitmaps = true;
|
||||
r->cache.arch_has_empty_bitmaps = true;
|
||||
r->cache.arch_has_per_cpu_cfg = true;
|
||||
r->cache.min_cbm_bits = 0;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
|
||||
hw_res->msr_update = mba_wrmsr_amd;
|
||||
|
|
|
@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
|
||||
unsigned int core_select_mask, core_level_siblings;
|
||||
unsigned int die_select_mask, die_level_siblings;
|
||||
unsigned int pkg_mask_width;
|
||||
bool die_level_present = false;
|
||||
int leaf;
|
||||
|
||||
|
@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
|
||||
sub_index = 1;
|
||||
do {
|
||||
while (true) {
|
||||
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
/*
|
||||
|
@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
}
|
||||
|
||||
sub_index++;
|
||||
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
|
||||
if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
|
||||
pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
else
|
||||
break;
|
||||
|
||||
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
|
||||
sub_index++;
|
||||
}
|
||||
|
||||
core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
|
||||
die_select_mask = (~(-1 << die_plus_mask_width)) >>
|
||||
core_plus_mask_width;
|
||||
|
||||
|
@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
}
|
||||
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
die_plus_mask_width);
|
||||
pkg_mask_width);
|
||||
/*
|
||||
* Reinit the apicid, now that we have extended initial_apicid.
|
||||
*/
|
||||
|
|
|
@ -210,13 +210,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
|
|||
fpstate_reset(¤t->thread.fpu);
|
||||
}
|
||||
|
||||
static void __init fpu__init_init_fpstate(void)
|
||||
{
|
||||
/* Bring init_fpstate size and features up to date */
|
||||
init_fpstate.size = fpu_kernel_cfg.max_size;
|
||||
init_fpstate.xfeatures = fpu_kernel_cfg.max_features;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called on the boot CPU once per system bootup, to set up the initial
|
||||
* FPU state that is later cloned into all processes:
|
||||
|
@ -236,5 +229,4 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
|
|||
fpu__init_system_xstate_size_legacy();
|
||||
fpu__init_system_xstate(fpu_kernel_cfg.max_size);
|
||||
fpu__init_task_struct_size();
|
||||
fpu__init_init_fpstate();
|
||||
}
|
||||
|
|
|
@ -360,7 +360,7 @@ static void __init setup_init_fpu_buf(void)
|
|||
|
||||
print_xstate_features();
|
||||
|
||||
xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features);
|
||||
xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures);
|
||||
|
||||
/*
|
||||
* Init all the features state with header.xfeatures being 0x0
|
||||
|
@ -678,20 +678,6 @@ static unsigned int __init get_xsave_size_user(void)
|
|||
return ebx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Will the runtime-enumerated 'xstate_size' fit in the init
|
||||
* task's statically-allocated buffer?
|
||||
*/
|
||||
static bool __init is_supported_xstate_size(unsigned int test_xstate_size)
|
||||
{
|
||||
if (test_xstate_size <= sizeof(init_fpstate.regs))
|
||||
return true;
|
||||
|
||||
pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
|
||||
sizeof(init_fpstate.regs), test_xstate_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __init init_xstate_size(void)
|
||||
{
|
||||
/* Recompute the context size for enabled features: */
|
||||
|
@ -717,10 +703,6 @@ static int __init init_xstate_size(void)
|
|||
kernel_default_size =
|
||||
xstate_calculate_size(fpu_kernel_cfg.default_features, compacted);
|
||||
|
||||
/* Ensure we have the space to store all default enabled features. */
|
||||
if (!is_supported_xstate_size(kernel_default_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (!paranoid_xstate_size_valid(kernel_size))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -875,6 +857,19 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
|
|||
update_regset_xstate_info(fpu_user_cfg.max_size,
|
||||
fpu_user_cfg.max_features);
|
||||
|
||||
/*
|
||||
* init_fpstate excludes dynamic states as they are large but init
|
||||
* state is zero.
|
||||
*/
|
||||
init_fpstate.size = fpu_kernel_cfg.default_size;
|
||||
init_fpstate.xfeatures = fpu_kernel_cfg.default_features;
|
||||
|
||||
if (init_fpstate.size > sizeof(init_fpstate.regs)) {
|
||||
pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n",
|
||||
sizeof(init_fpstate.regs), init_fpstate.size);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
setup_init_fpu_buf();
|
||||
|
||||
/*
|
||||
|
@ -1130,6 +1125,15 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
|
|||
*/
|
||||
mask = fpstate->user_xfeatures;
|
||||
|
||||
/*
|
||||
* Dynamic features are not present in init_fpstate. When they are
|
||||
* in an all zeros init state, remove those from 'mask' to zero
|
||||
* those features in the user buffer instead of retrieving them
|
||||
* from init_fpstate.
|
||||
*/
|
||||
if (fpu_state_size_dynamic())
|
||||
mask &= (header.xfeatures | xinit->header.xcomp_bv);
|
||||
|
||||
for_each_extended_xfeature(i, mask) {
|
||||
/*
|
||||
* If there was a feature or alignment gap, zero the space
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/export.h>
|
||||
|
@ -129,6 +130,14 @@
|
|||
|
||||
.endm
|
||||
|
||||
SYM_TYPED_FUNC_START(ftrace_stub)
|
||||
RET
|
||||
SYM_FUNC_END(ftrace_stub)
|
||||
|
||||
SYM_TYPED_FUNC_START(ftrace_stub_graph)
|
||||
RET
|
||||
SYM_FUNC_END(ftrace_stub_graph)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
SYM_FUNC_START(__fentry__)
|
||||
|
@ -172,21 +181,10 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
|
|||
*/
|
||||
SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
jmp ftrace_epilogue
|
||||
RET
|
||||
SYM_FUNC_END(ftrace_caller);
|
||||
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
|
||||
|
||||
SYM_FUNC_START(ftrace_epilogue)
|
||||
/*
|
||||
* This is weak to keep gas from relaxing the jumps.
|
||||
*/
|
||||
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
|
||||
UNWIND_HINT_FUNC
|
||||
ENDBR
|
||||
RET
|
||||
SYM_FUNC_END(ftrace_epilogue)
|
||||
|
||||
SYM_FUNC_START(ftrace_regs_caller)
|
||||
/* Save the current flags before any operations that can change them */
|
||||
pushfq
|
||||
|
@ -262,14 +260,11 @@ SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
|
|||
popfq
|
||||
|
||||
/*
|
||||
* As this jmp to ftrace_epilogue can be a short jump
|
||||
* it must not be copied into the trampoline.
|
||||
* The trampoline will add the code to jump
|
||||
* to the return.
|
||||
* The trampoline will add the return.
|
||||
*/
|
||||
SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
|
||||
ANNOTATE_NOENDBR
|
||||
jmp ftrace_epilogue
|
||||
RET
|
||||
|
||||
/* Swap the flags with orig_rax */
|
||||
1: movq MCOUNT_REG_SIZE(%rsp), %rdi
|
||||
|
@ -280,7 +275,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
|
|||
/* Restore flags */
|
||||
popfq
|
||||
UNWIND_HINT_FUNC
|
||||
jmp ftrace_epilogue
|
||||
RET
|
||||
|
||||
SYM_FUNC_END(ftrace_regs_caller)
|
||||
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
|
||||
|
@ -291,9 +286,6 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
|
|||
SYM_FUNC_START(__fentry__)
|
||||
cmpq $ftrace_stub, ftrace_trace_function
|
||||
jnz trace
|
||||
|
||||
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
RET
|
||||
|
||||
trace:
|
||||
|
|
|
@ -713,7 +713,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
|||
/* Otherwise, skip ahead to the user-specified starting frame: */
|
||||
while (!unwind_done(state) &&
|
||||
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
|
||||
state->sp < (unsigned long)first_frame))
|
||||
state->sp <= (unsigned long)first_frame))
|
||||
unwind_next_frame(state);
|
||||
|
||||
return;
|
||||
|
|
|
@ -6442,26 +6442,22 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
||||
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
|
||||
struct kvm_msr_filter *filter)
|
||||
{
|
||||
struct kvm_msr_filter __user *user_msr_filter = argp;
|
||||
struct kvm_x86_msr_filter *new_filter, *old_filter;
|
||||
struct kvm_msr_filter filter;
|
||||
bool default_allow;
|
||||
bool empty = true;
|
||||
int r = 0;
|
||||
u32 i;
|
||||
|
||||
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
|
||||
return -EFAULT;
|
||||
|
||||
if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
|
||||
if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
|
||||
empty &= !filter.ranges[i].nmsrs;
|
||||
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
|
||||
empty &= !filter->ranges[i].nmsrs;
|
||||
|
||||
default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
|
||||
default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
|
||||
if (empty && !default_allow)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -6469,8 +6465,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
|||
if (!new_filter)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
|
||||
r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
|
||||
r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
|
||||
if (r) {
|
||||
kvm_free_msr_filter(new_filter);
|
||||
return r;
|
||||
|
@ -6493,6 +6489,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_COMPAT
|
||||
/* for KVM_X86_SET_MSR_FILTER */
|
||||
struct kvm_msr_filter_range_compat {
|
||||
__u32 flags;
|
||||
__u32 nmsrs;
|
||||
__u32 base;
|
||||
__u32 bitmap;
|
||||
};
|
||||
|
||||
struct kvm_msr_filter_compat {
|
||||
__u32 flags;
|
||||
struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
|
||||
};
|
||||
|
||||
#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
|
||||
|
||||
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct kvm *kvm = filp->private_data;
|
||||
long r = -ENOTTY;
|
||||
|
||||
switch (ioctl) {
|
||||
case KVM_X86_SET_MSR_FILTER_COMPAT: {
|
||||
struct kvm_msr_filter __user *user_msr_filter = argp;
|
||||
struct kvm_msr_filter_compat filter_compat;
|
||||
struct kvm_msr_filter filter;
|
||||
int i;
|
||||
|
||||
if (copy_from_user(&filter_compat, user_msr_filter,
|
||||
sizeof(filter_compat)))
|
||||
return -EFAULT;
|
||||
|
||||
filter.flags = filter_compat.flags;
|
||||
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
|
||||
struct kvm_msr_filter_range_compat *cr;
|
||||
|
||||
cr = &filter_compat.ranges[i];
|
||||
filter.ranges[i] = (struct kvm_msr_filter_range) {
|
||||
.flags = cr->flags,
|
||||
.nmsrs = cr->nmsrs,
|
||||
.base = cr->base,
|
||||
.bitmap = (__u8 *)(ulong)cr->bitmap,
|
||||
};
|
||||
}
|
||||
|
||||
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
|
||||
static int kvm_arch_suspend_notifier(struct kvm *kvm)
|
||||
{
|
||||
|
@ -6915,9 +6967,16 @@ set_pit2_out:
|
|||
case KVM_SET_PMU_EVENT_FILTER:
|
||||
r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
|
||||
break;
|
||||
case KVM_X86_SET_MSR_FILTER:
|
||||
r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
|
||||
case KVM_X86_SET_MSR_FILTER: {
|
||||
struct kvm_msr_filter __user *user_msr_filter = argp;
|
||||
struct kvm_msr_filter filter;
|
||||
|
||||
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
|
||||
return -EFAULT;
|
||||
|
||||
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
r = -ENOTTY;
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
@ -388,6 +389,18 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int __init bpf_arch_init_dispatcher_early(void *ip)
|
||||
{
|
||||
const u8 *nop_insn = x86_nops[5];
|
||||
|
||||
if (is_endbr(*(u32 *)ip))
|
||||
ip += ENDBR_INSN_SIZE;
|
||||
|
||||
if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
|
||||
text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||
void *old_addr, void *new_addr)
|
||||
{
|
||||
|
|
|
@ -369,12 +369,8 @@ struct bfq_queue {
|
|||
unsigned long split_time; /* time of last split */
|
||||
|
||||
unsigned long first_IO_time; /* time of first I/O for this queue */
|
||||
|
||||
unsigned long creation_time; /* when this queue is created */
|
||||
|
||||
/* max service rate measured so far */
|
||||
u32 max_service_rate;
|
||||
|
||||
/*
|
||||
* Pointer to the waker queue for this queue, i.e., to the
|
||||
* queue Q such that this queue happens to get new I/O right
|
||||
|
|
|
@ -741,7 +741,7 @@ void bio_put(struct bio *bio)
|
|||
return;
|
||||
}
|
||||
|
||||
if (bio->bi_opf & REQ_ALLOC_CACHE) {
|
||||
if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) {
|
||||
struct bio_alloc_cache *cache;
|
||||
|
||||
bio_uninit(bio);
|
||||
|
|
|
@ -3112,8 +3112,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
|
|||
struct page *page;
|
||||
unsigned long flags;
|
||||
|
||||
/* There is no need to clear a driver tags own mapping */
|
||||
if (drv_tags == tags)
|
||||
/*
|
||||
* There is no need to clear mapping if driver tags is not initialized
|
||||
* or the mapping belongs to the driver tags.
|
||||
*/
|
||||
if (!drv_tags || drv_tags == tags)
|
||||
return;
|
||||
|
||||
list_for_each_entry(page, &tags->page_list, lru) {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/ratelimit.h>
|
||||
#include <linux/edac.h>
|
||||
#include <linux/ras.h>
|
||||
#include <acpi/ghes.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
|
@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
|||
int cpu = mce->extcpu;
|
||||
struct acpi_hest_generic_status *estatus, *tmp;
|
||||
struct acpi_hest_generic_data *gdata;
|
||||
const guid_t *fru_id = &guid_null;
|
||||
char *fru_text = "";
|
||||
const guid_t *fru_id;
|
||||
char *fru_text;
|
||||
guid_t *sec_type;
|
||||
static u32 err_seq;
|
||||
|
||||
|
@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
|||
|
||||
/* log event via trace */
|
||||
err_seq++;
|
||||
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
|
||||
fru_id = (guid_t *)gdata->fru_id;
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
|
||||
fru_text = gdata->fru_text;
|
||||
sec_type = (guid_t *)gdata->section_type;
|
||||
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
|
||||
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
|
||||
if (gdata->error_data_length >= sizeof(*mem))
|
||||
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
|
||||
(u8)gdata->error_severity);
|
||||
apei_estatus_for_each_section(tmp, gdata) {
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
|
||||
fru_id = (guid_t *)gdata->fru_id;
|
||||
else
|
||||
fru_id = &guid_null;
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
|
||||
fru_text = gdata->fru_text;
|
||||
else
|
||||
fru_text = "";
|
||||
sec_type = (guid_t *)gdata->section_type;
|
||||
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
|
||||
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
|
||||
|
||||
if (gdata->error_data_length >= sizeof(*mem))
|
||||
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
|
||||
(u8)gdata->error_severity);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
|
|||
clear_fixmap(fixmap_idx);
|
||||
}
|
||||
|
||||
int ghes_estatus_pool_init(int num_ghes)
|
||||
int ghes_estatus_pool_init(unsigned int num_ghes)
|
||||
{
|
||||
unsigned long addr, len;
|
||||
int rc;
|
||||
|
|
|
@ -1142,7 +1142,8 @@ static void iort_iommu_msi_get_resv_regions(struct device *dev,
|
|||
struct iommu_resv_region *region;
|
||||
|
||||
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
|
||||
prot, IOMMU_RESV_MSI);
|
||||
prot, IOMMU_RESV_MSI,
|
||||
GFP_KERNEL);
|
||||
if (region)
|
||||
list_add_tail(®ion->list, head);
|
||||
}
|
||||
|
|
|
@ -323,6 +323,7 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
|
|||
|
||||
list_for_each_entry(pn, &adev->physical_node_list, node) {
|
||||
if (dev_is_pci(pn->dev)) {
|
||||
get_device(pn->dev);
|
||||
pci_dev = to_pci_dev(pn->dev);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -428,17 +428,31 @@ static const struct dmi_system_id asus_laptop[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static const struct dmi_system_id lenovo_82ra[] = {
|
||||
{
|
||||
.ident = "LENOVO IdeaPad Flex 5 16ALC7",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "82RA"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
struct irq_override_cmp {
|
||||
const struct dmi_system_id *system;
|
||||
unsigned char irq;
|
||||
unsigned char triggering;
|
||||
unsigned char polarity;
|
||||
unsigned char shareable;
|
||||
bool override;
|
||||
};
|
||||
|
||||
static const struct irq_override_cmp skip_override_table[] = {
|
||||
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
|
||||
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
|
||||
static const struct irq_override_cmp override_table[] = {
|
||||
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
|
||||
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
|
||||
{ lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
|
||||
{ lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
|
||||
};
|
||||
|
||||
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
|
||||
|
@ -446,6 +460,17 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
|
||||
const struct irq_override_cmp *entry = &override_table[i];
|
||||
|
||||
if (dmi_check_system(entry->system) &&
|
||||
entry->irq == gsi &&
|
||||
entry->triggering == triggering &&
|
||||
entry->polarity == polarity &&
|
||||
entry->shareable == shareable)
|
||||
return entry->override;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
/*
|
||||
* IRQ override isn't needed on modern AMD Zen systems and
|
||||
|
@ -456,17 +481,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
|
|||
return false;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
|
||||
const struct irq_override_cmp *entry = &skip_override_table[i];
|
||||
|
||||
if (dmi_check_system(entry->system) &&
|
||||
entry->irq == gsi &&
|
||||
entry->triggering == triggering &&
|
||||
entry->polarity == polarity &&
|
||||
entry->shareable == shareable)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -498,8 +512,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
|||
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
|
||||
|
||||
if (triggering != trig || polarity != pol) {
|
||||
pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi,
|
||||
t ? "level" : "edge", p ? "low" : "high");
|
||||
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
|
||||
t ? "level" : "edge",
|
||||
trig == triggering ? "" : "(!)",
|
||||
p ? "low" : "high",
|
||||
pol == polarity ? "" : "(!)");
|
||||
triggering = trig;
|
||||
polarity = pol;
|
||||
}
|
||||
|
|
|
@ -1509,9 +1509,12 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
|
|||
goto out;
|
||||
}
|
||||
|
||||
*map = r;
|
||||
|
||||
list_for_each_entry(rentry, &list, node) {
|
||||
if (rentry->res->start >= rentry->res->end) {
|
||||
kfree(r);
|
||||
kfree(*map);
|
||||
*map = NULL;
|
||||
ret = -EINVAL;
|
||||
dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
|
||||
goto out;
|
||||
|
@ -1523,8 +1526,6 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
|
|||
r->offset = rentry->offset;
|
||||
r++;
|
||||
}
|
||||
|
||||
*map = r;
|
||||
}
|
||||
out:
|
||||
acpi_dev_free_resource_list(&list);
|
||||
|
|
|
@ -30,11 +30,6 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
|
|||
return NULL;
|
||||
memset(req, 0, sizeof(*req));
|
||||
|
||||
req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src,
|
||||
GFP_NOIO, &drbd_io_bio_set);
|
||||
req->private_bio->bi_private = req;
|
||||
req->private_bio->bi_end_io = drbd_request_endio;
|
||||
|
||||
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
|
||||
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
|
||||
| (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
|
||||
|
@ -1219,9 +1214,12 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio)
|
|||
/* Update disk stats */
|
||||
req->start_jif = bio_start_io_acct(req->master_bio);
|
||||
|
||||
if (!get_ldev(device)) {
|
||||
bio_put(req->private_bio);
|
||||
req->private_bio = NULL;
|
||||
if (get_ldev(device)) {
|
||||
req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
|
||||
bio, GFP_NOIO,
|
||||
&drbd_io_bio_set);
|
||||
req->private_bio->bi_private = req;
|
||||
req->private_bio->bi_end_io = drbd_request_endio;
|
||||
}
|
||||
|
||||
/* process discards always from our submitter thread */
|
||||
|
|
|
@ -124,7 +124,7 @@ struct ublk_queue {
|
|||
bool force_abort;
|
||||
unsigned short nr_io_ready; /* how many ios setup */
|
||||
struct ublk_device *dev;
|
||||
struct ublk_io ios[0];
|
||||
struct ublk_io ios[];
|
||||
};
|
||||
|
||||
#define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
|
||||
|
|
|
@ -222,10 +222,8 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
|
|||
if (reg_name[0]) {
|
||||
priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
|
||||
if (priv->opp_token < 0) {
|
||||
ret = priv->opp_token;
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev, "failed to set regulators: %d\n",
|
||||
ret);
|
||||
ret = dev_err_probe(cpu_dev, priv->opp_token,
|
||||
"failed to set regulators\n");
|
||||
goto free_cpumask;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -396,9 +396,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
ret = imx6q_opp_check_speed_grading(cpu_dev);
|
||||
}
|
||||
if (ret) {
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev, "failed to read ocotp: %d\n",
|
||||
ret);
|
||||
dev_err_probe(cpu_dev, ret, "failed to read ocotp\n");
|
||||
goto out_free_opp;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
|
|||
|
||||
static void get_krait_bin_format_a(struct device *cpu_dev,
|
||||
int *speed, int *pvs, int *pvs_ver,
|
||||
struct nvmem_cell *pvs_nvmem, u8 *buf)
|
||||
u8 *buf)
|
||||
{
|
||||
u32 pte_efuse;
|
||||
|
||||
|
@ -95,7 +95,7 @@ static void get_krait_bin_format_a(struct device *cpu_dev,
|
|||
|
||||
static void get_krait_bin_format_b(struct device *cpu_dev,
|
||||
int *speed, int *pvs, int *pvs_ver,
|
||||
struct nvmem_cell *pvs_nvmem, u8 *buf)
|
||||
u8 *buf)
|
||||
{
|
||||
u32 pte_efuse, redundant_sel;
|
||||
|
||||
|
@ -213,6 +213,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
|||
int speed = 0, pvs = 0, pvs_ver = 0;
|
||||
u8 *speedbin;
|
||||
size_t len;
|
||||
int ret = 0;
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
|
||||
|
@ -222,15 +223,16 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
|||
switch (len) {
|
||||
case 4:
|
||||
get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
|
||||
speedbin_nvmem, speedbin);
|
||||
speedbin);
|
||||
break;
|
||||
case 8:
|
||||
get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
|
||||
speedbin_nvmem, speedbin);
|
||||
speedbin);
|
||||
break;
|
||||
default:
|
||||
dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto len_error;
|
||||
}
|
||||
|
||||
snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
|
||||
|
@ -238,8 +240,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
|||
|
||||
drv->versions = (1 << speed);
|
||||
|
||||
len_error:
|
||||
kfree(speedbin);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct qcom_cpufreq_match_data match_data_kryo = {
|
||||
|
@ -262,7 +265,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
|||
struct nvmem_cell *speedbin_nvmem;
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev;
|
||||
char *pvs_name = "speedXX-pvsXX-vXX";
|
||||
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
|
||||
char *pvs_name = pvs_name_buffer;
|
||||
unsigned cpu;
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
@ -295,11 +299,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
|||
if (drv->data->get_version) {
|
||||
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
||||
if (IS_ERR(speedbin_nvmem)) {
|
||||
if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev,
|
||||
"Could not get nvmem cell: %ld\n",
|
||||
PTR_ERR(speedbin_nvmem));
|
||||
ret = PTR_ERR(speedbin_nvmem);
|
||||
ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
|
||||
"Could not get nvmem cell\n");
|
||||
goto free_drv;
|
||||
}
|
||||
|
||||
|
|
|
@ -56,12 +56,9 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
|
|||
|
||||
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
||||
of_node_put(np);
|
||||
if (IS_ERR(speedbin_nvmem)) {
|
||||
if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
|
||||
pr_err("Could not get nvmem cell: %ld\n",
|
||||
PTR_ERR(speedbin_nvmem));
|
||||
return PTR_ERR(speedbin_nvmem);
|
||||
}
|
||||
if (IS_ERR(speedbin_nvmem))
|
||||
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
|
||||
"Could not get nvmem cell\n");
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
|
|
|
@ -589,6 +589,7 @@ static const struct of_device_id tegra194_cpufreq_of_match[] = {
|
|||
{ .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
|
||||
|
||||
static struct platform_driver tegra194_ccplex_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -124,28 +124,6 @@ config EFI_ZBOOT
|
|||
is supported by the encapsulated image. (The compression algorithm
|
||||
used is described in the zboot image header)
|
||||
|
||||
config EFI_ZBOOT_SIGNED
|
||||
def_bool y
|
||||
depends on EFI_ZBOOT_SIGNING_CERT != ""
|
||||
depends on EFI_ZBOOT_SIGNING_KEY != ""
|
||||
|
||||
config EFI_ZBOOT_SIGNING
|
||||
bool "Sign the EFI decompressor for UEFI secure boot"
|
||||
depends on EFI_ZBOOT
|
||||
help
|
||||
Use the 'sbsign' command line tool (which must exist on the host
|
||||
path) to sign both the EFI decompressor PE/COFF image, as well as the
|
||||
encapsulated PE/COFF image, which is subsequently compressed and
|
||||
wrapped by the former image.
|
||||
|
||||
config EFI_ZBOOT_SIGNING_CERT
|
||||
string "Certificate to use for signing the compressed EFI boot image"
|
||||
depends on EFI_ZBOOT_SIGNING
|
||||
|
||||
config EFI_ZBOOT_SIGNING_KEY
|
||||
string "Private key to use for signing the compressed EFI boot image"
|
||||
depends on EFI_ZBOOT_SIGNING
|
||||
|
||||
config EFI_ARMSTUB_DTB_LOADER
|
||||
bool "Enable the DTB loader"
|
||||
depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH
|
||||
|
|
|
@ -63,7 +63,7 @@ static bool __init efi_virtmap_init(void)
|
|||
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
||||
continue;
|
||||
if (md->virt_addr == 0)
|
||||
if (md->virt_addr == U64_MAX)
|
||||
return false;
|
||||
|
||||
ret = efi_create_mapping(&efi_mm, md);
|
||||
|
|
|
@ -271,6 +271,8 @@ static __init int efivar_ssdt_load(void)
|
|||
acpi_status ret = acpi_load_table(data, NULL);
|
||||
if (ret)
|
||||
pr_err("failed to load table: %u\n", ret);
|
||||
else
|
||||
continue;
|
||||
} else {
|
||||
pr_err("failed to get var data: 0x%lx\n", status);
|
||||
}
|
||||
|
|
|
@ -20,22 +20,11 @@ zboot-size-len-y := 4
|
|||
zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
|
||||
zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
|
||||
|
||||
quiet_cmd_sbsign = SBSIGN $@
|
||||
cmd_sbsign = sbsign --out $@ $< \
|
||||
--key $(CONFIG_EFI_ZBOOT_SIGNING_KEY) \
|
||||
--cert $(CONFIG_EFI_ZBOOT_SIGNING_CERT)
|
||||
|
||||
$(obj)/$(EFI_ZBOOT_PAYLOAD).signed: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
|
||||
$(call if_changed,sbsign)
|
||||
|
||||
ZBOOT_PAYLOAD-y := $(EFI_ZBOOT_PAYLOAD)
|
||||
ZBOOT_PAYLOAD-$(CONFIG_EFI_ZBOOT_SIGNED) := $(EFI_ZBOOT_PAYLOAD).signed
|
||||
|
||||
$(obj)/vmlinuz: $(obj)/$(ZBOOT_PAYLOAD-y) FORCE
|
||||
$(obj)/vmlinuz: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
|
||||
$(call if_changed,$(zboot-method-y))
|
||||
|
||||
OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \
|
||||
--rename-section .data=.gzdata,load,alloc,readonly,contents
|
||||
--rename-section .data=.gzdata,load,alloc,readonly,contents
|
||||
$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
|
@ -53,18 +42,8 @@ LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
|
|||
$(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
ZBOOT_EFI-y := vmlinuz.efi
|
||||
ZBOOT_EFI-$(CONFIG_EFI_ZBOOT_SIGNED) := vmlinuz.efi.unsigned
|
||||
|
||||
OBJCOPYFLAGS_$(ZBOOT_EFI-y) := -O binary
|
||||
$(obj)/$(ZBOOT_EFI-y): $(obj)/vmlinuz.efi.elf FORCE
|
||||
OBJCOPYFLAGS_vmlinuz.efi := -O binary
|
||||
$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
|
||||
|
||||
ifneq ($(CONFIG_EFI_ZBOOT_SIGNED),)
|
||||
$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.unsigned FORCE
|
||||
$(call if_changed,sbsign)
|
||||
endif
|
||||
|
||||
targets += $(EFI_ZBOOT_PAYLOAD).signed vmlinuz.efi.unsigned
|
||||
|
|
|
@ -313,16 +313,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
|
|||
|
||||
/*
|
||||
* Set the virtual address field of all
|
||||
* EFI_MEMORY_RUNTIME entries to 0. This will signal
|
||||
* the incoming kernel that no virtual translation has
|
||||
* been installed.
|
||||
* EFI_MEMORY_RUNTIME entries to U64_MAX. This will
|
||||
* signal the incoming kernel that no virtual
|
||||
* translation has been installed.
|
||||
*/
|
||||
for (l = 0; l < priv.boot_memmap->map_size;
|
||||
l += priv.boot_memmap->desc_size) {
|
||||
p = (void *)priv.boot_memmap->map + l;
|
||||
|
||||
if (p->attribute & EFI_MEMORY_RUNTIME)
|
||||
p->virt_addr = 0;
|
||||
p->virt_addr = U64_MAX;
|
||||
}
|
||||
}
|
||||
return EFI_SUCCESS;
|
||||
|
|
|
@ -765,9 +765,9 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
|
|||
* relocated by efi_relocate_kernel.
|
||||
* On failure, we exit to the firmware via efi_exit instead of returning.
|
||||
*/
|
||||
unsigned long efi_main(efi_handle_t handle,
|
||||
efi_system_table_t *sys_table_arg,
|
||||
struct boot_params *boot_params)
|
||||
asmlinkage unsigned long efi_main(efi_handle_t handle,
|
||||
efi_system_table_t *sys_table_arg,
|
||||
struct boot_params *boot_params)
|
||||
{
|
||||
unsigned long bzimage_addr = (unsigned long)startup_32;
|
||||
unsigned long buffer_start, buffer_end;
|
||||
|
|
|
@ -38,7 +38,8 @@ SECTIONS
|
|||
}
|
||||
}
|
||||
|
||||
PROVIDE(__efistub__gzdata_size = ABSOLUTE(. - __efistub__gzdata_start));
|
||||
PROVIDE(__efistub__gzdata_size =
|
||||
ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start));
|
||||
|
||||
PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
|
||||
PROVIDE(__data_size = ABSOLUTE(_end - _etext));
|
||||
|
|
|
@ -41,7 +41,7 @@ static bool __init efi_virtmap_init(void)
|
|||
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
||||
continue;
|
||||
if (md->virt_addr == 0)
|
||||
if (md->virt_addr == U64_MAX)
|
||||
return false;
|
||||
|
||||
ret = efi_create_mapping(&efi_mm, md);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -20,19 +21,19 @@ static struct efivars *__efivars;
|
|||
|
||||
static DEFINE_SEMAPHORE(efivars_lock);
|
||||
|
||||
efi_status_t check_var_size(u32 attributes, unsigned long size)
|
||||
static efi_status_t check_var_size(u32 attributes, unsigned long size)
|
||||
{
|
||||
const struct efivar_operations *fops;
|
||||
|
||||
fops = __efivars->ops;
|
||||
|
||||
if (!fops->query_variable_store)
|
||||
return EFI_UNSUPPORTED;
|
||||
return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES;
|
||||
|
||||
return fops->query_variable_store(attributes, size, false);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(check_var_size, EFIVAR);
|
||||
|
||||
static
|
||||
efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size)
|
||||
{
|
||||
const struct efivar_operations *fops;
|
||||
|
@ -40,11 +41,10 @@ efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size)
|
|||
fops = __efivars->ops;
|
||||
|
||||
if (!fops->query_variable_store)
|
||||
return EFI_UNSUPPORTED;
|
||||
return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES;
|
||||
|
||||
return fops->query_variable_store(attributes, size, true);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(check_var_size_nonblocking, EFIVAR);
|
||||
|
||||
/**
|
||||
* efivars_kobject - get the kobject for the registered efivars
|
||||
|
|
|
@ -274,9 +274,6 @@ extern int amdgpu_vcnfw_log;
|
|||
#define AMDGPU_RESET_VCE (1 << 13)
|
||||
#define AMDGPU_RESET_VCE1 (1 << 14)
|
||||
|
||||
#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0)
|
||||
#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1)
|
||||
|
||||
/* max cursor sizes (in pixels) */
|
||||
#define CIK_CURSOR_WIDTH 128
|
||||
#define CIK_CURSOR_HEIGHT 128
|
||||
|
@ -1065,7 +1062,6 @@ struct amdgpu_device {
|
|||
|
||||
struct work_struct reset_work;
|
||||
|
||||
uint32_t amdgpu_reset_level_mask;
|
||||
bool job_hang;
|
||||
};
|
||||
|
||||
|
|
|
@ -134,7 +134,6 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
|
|||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id)
|
|||
|
||||
lock_srbm(adev, mec, pipe, 0, 0);
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, regCPC_INT_CNTL),
|
||||
WREG32_SOC15(GC, 0, regCPC_INT_CNTL,
|
||||
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
|
||||
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
|
||||
|
||||
|
|
|
@ -1954,8 +1954,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
|||
return PTR_ERR(ent);
|
||||
}
|
||||
|
||||
debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask);
|
||||
|
||||
/* Register debugfs entries for amdgpu_ttm */
|
||||
amdgpu_ttm_debugfs_init(adev);
|
||||
amdgpu_debugfs_pm_init(adev);
|
||||
|
|
|
@ -2928,6 +2928,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
|
|||
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
|
||||
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
|
||||
|
||||
/*
|
||||
* Per PMFW team's suggestion, driver needs to handle gfxoff
|
||||
* and df cstate features disablement for gpu reset(e.g. Mode1Reset)
|
||||
* scenario. Add the missing df cstate disablement here.
|
||||
*/
|
||||
if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
|
||||
dev_warn(adev->dev, "Failed to disallow df cstate");
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
@ -5210,7 +5218,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
|
||||
reset_context->job = job;
|
||||
reset_context->hive = hive;
|
||||
|
||||
/*
|
||||
* Build list of devices to reset.
|
||||
* In case we are in XGMI hive mode, resort the device list
|
||||
|
@ -5337,11 +5344,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
|||
amdgpu_ras_resume(adev);
|
||||
} else {
|
||||
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
|
||||
if (r && r == -EAGAIN) {
|
||||
set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
|
||||
adev->asic_reset_res = 0;
|
||||
if (r && r == -EAGAIN)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (!r && gpu_reset_for_dev_remove)
|
||||
goto recover_end;
|
||||
|
@ -5777,7 +5781,6 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
|
|||
reset_context.reset_req_dev = adev;
|
||||
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
|
||||
set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
|
||||
|
||||
adev->no_hw_access = true;
|
||||
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
|
||||
|
|
|
@ -72,7 +72,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
|
||||
|
||||
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
|
||||
if (r)
|
||||
|
|
|
@ -1950,7 +1950,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
|
|||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
|
||||
}
|
||||
|
@ -2268,6 +2267,25 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
|
|||
|
||||
static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
switch (adev->ip_versions[MP0_HWIP][0]) {
|
||||
case IP_VERSION(13, 0, 2):
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_IP_DISCOVERY) {
|
||||
switch (adev->ip_versions[MP0_HWIP][0]) {
|
||||
case IP_VERSION(13, 0, 0):
|
||||
case IP_VERSION(13, 0, 10):
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return adev->asic_type == CHIP_VEGA10 ||
|
||||
adev->asic_type == CHIP_VEGA20 ||
|
||||
adev->asic_type == CHIP_ARCTURUS ||
|
||||
|
@ -2311,11 +2329,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
|
|||
!amdgpu_ras_asic_supported(adev))
|
||||
return;
|
||||
|
||||
/* If driver run on sriov guest side, only enable ras for aldebaran */
|
||||
if (amdgpu_sriov_vf(adev) &&
|
||||
adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2))
|
||||
return;
|
||||
|
||||
if (!adev->gmc.xgmi.connected_to_cpu) {
|
||||
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
|
||||
dev_info(adev->dev, "MEM ECC is active.\n");
|
||||
|
|
|
@ -37,8 +37,6 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
adev->amdgpu_reset_level_mask = 0x1;
|
||||
|
||||
switch (adev->ip_versions[MP1_HWIP][0]) {
|
||||
case IP_VERSION(13, 0, 2):
|
||||
ret = aldebaran_reset_init(adev);
|
||||
|
@ -76,12 +74,6 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
|
|||
{
|
||||
struct amdgpu_reset_handler *reset_handler = NULL;
|
||||
|
||||
if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
|
||||
return -ENOSYS;
|
||||
|
||||
if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
|
||||
return -ENOSYS;
|
||||
|
||||
if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
|
||||
reset_handler = adev->reset_cntl->get_reset_handler(
|
||||
adev->reset_cntl, reset_context);
|
||||
|
@ -98,12 +90,6 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
|
|||
int ret;
|
||||
struct amdgpu_reset_handler *reset_handler = NULL;
|
||||
|
||||
if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
|
||||
return -ENOSYS;
|
||||
|
||||
if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
|
||||
return -ENOSYS;
|
||||
|
||||
if (adev->reset_cntl)
|
||||
reset_handler = adev->reset_cntl->get_reset_handler(
|
||||
adev->reset_cntl, reset_context);
|
||||
|
|
|
@ -30,8 +30,7 @@ enum AMDGPU_RESET_FLAGS {
|
|||
|
||||
AMDGPU_NEED_FULL_RESET = 0,
|
||||
AMDGPU_SKIP_HW_RESET = 1,
|
||||
AMDGPU_SKIP_MODE2_RESET = 2,
|
||||
AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
|
||||
AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
|
||||
};
|
||||
|
||||
struct amdgpu_reset_context {
|
||||
|
|
|
@ -405,9 +405,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
|
|||
{
|
||||
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
|
||||
|
||||
if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
|
||||
return false;
|
||||
|
||||
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -439,6 +439,9 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
|
|||
while (cursor.remaining) {
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
|
||||
if (!cursor.remaining)
|
||||
break;
|
||||
|
||||
/* ttm_resource_ioremap only supports contiguous memory */
|
||||
if (end != cursor.start)
|
||||
return false;
|
||||
|
|
|
@ -726,6 +726,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
|
|||
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
/* VF MMIO access (except mailbox range) from CPU
|
||||
* will be blocked during sriov runtime
|
||||
*/
|
||||
adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
|
||||
|
||||
/* we have the ability to check now */
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
switch (adev->asic_type) {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
|
||||
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
|
||||
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
|
||||
#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
|
||||
|
||||
/* flags for indirect register access path supported by rlcg for sriov */
|
||||
#define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28)
|
||||
|
@ -297,6 +298,9 @@ struct amdgpu_video_codec_info;
|
|||
#define amdgpu_passthrough(adev) \
|
||||
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
|
||||
|
||||
#define amdgpu_sriov_vf_mmio_access_protection(adev) \
|
||||
((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
|
||||
|
||||
static inline bool is_virtual_machine(void)
|
||||
{
|
||||
#if defined(CONFIG_X86)
|
||||
|
|
|
@ -2338,7 +2338,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
if (amdgpu_vm_update_mode == -1) {
|
||||
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
|
||||
/* For asic with VF MMIO access protection
|
||||
* avoid using CPU for VM table updates
|
||||
*/
|
||||
if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
!amdgpu_sriov_vf_mmio_access_protection(adev))
|
||||
adev->vm_manager.vm_update_mode =
|
||||
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
|
||||
else
|
||||
|
|
|
@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
|||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
}
|
||||
|
||||
if (fence && !p->immediate)
|
||||
if (fence && !p->immediate) {
|
||||
/*
|
||||
* Most hw generations now have a separate queue for page table
|
||||
* updates, but when the queue is shared with userspace we need
|
||||
* the extra CPU round trip to correctly flush the TLB.
|
||||
*/
|
||||
set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
|
||||
swap(*fence, f);
|
||||
}
|
||||
dma_fence_put(f);
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1571,7 +1571,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
|
||||
|
||||
/* Enable trap for each kfd vmid. */
|
||||
data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL));
|
||||
data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
|
||||
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
|
||||
}
|
||||
soc21_grbm_select(adev, 0, 0, 0, 0);
|
||||
|
@ -5076,6 +5076,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
|
|||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
gfx_v11_0_update_gfx_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
|
|
|
@ -186,6 +186,10 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
|||
/* Use register 17 for GART */
|
||||
const unsigned eng = 17;
|
||||
unsigned int i;
|
||||
unsigned char hub_ip = 0;
|
||||
|
||||
hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
|
||||
GC_HWIP : MMHUB_HWIP;
|
||||
|
||||
spin_lock(&adev->gmc.invalidate_lock);
|
||||
/*
|
||||
|
@ -199,8 +203,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
|||
if (use_semaphore) {
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
|
||||
hub->eng_distance * eng);
|
||||
tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
|
||||
hub->eng_distance * eng, hub_ip);
|
||||
if (tmp & 0x1)
|
||||
break;
|
||||
udelay(1);
|
||||
|
@ -210,12 +214,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
|||
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
|
||||
}
|
||||
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
|
||||
WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
|
||||
|
||||
/* Wait for ACK with a delay.*/
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
|
||||
hub->eng_distance * eng);
|
||||
tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
|
||||
hub->eng_distance * eng, hub_ip);
|
||||
tmp &= 1 << vmid;
|
||||
if (tmp)
|
||||
break;
|
||||
|
@ -229,8 +233,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
|||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
*/
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
|
||||
hub->eng_distance * eng, 0);
|
||||
WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
|
||||
hub->eng_distance * eng, 0, hub_ip);
|
||||
|
||||
/* Issue additional private vm invalidation to MMHUB */
|
||||
if ((vmhub != AMDGPU_GFXHUB_0) &&
|
||||
|
|
|
@ -1156,6 +1156,42 @@ static int mes_v11_0_sw_fini(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t data;
|
||||
int i;
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0);
|
||||
|
||||
/* disable the queue if it's active */
|
||||
if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
|
||||
data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
|
||||
DOORBELL_EN, 0);
|
||||
data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
|
||||
DOORBELL_HIT, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
|
||||
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
|
||||
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
|
||||
|
||||
soc21_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
adev->mes.ring.sched.ready = false;
|
||||
}
|
||||
|
||||
static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
@ -1207,6 +1243,9 @@ failure:
|
|||
|
||||
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mes.ring.sched.ready)
|
||||
mes_v11_0_kiq_dequeue_sched(adev);
|
||||
|
||||
mes_v11_0_enable(adev, false);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1262,9 +1301,6 @@ failure:
|
|||
|
||||
static int mes_v11_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mes.ring.sched.ready = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1296,7 +1332,8 @@ static int mes_v11_0_late_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!amdgpu_in_reset(adev))
|
||||
if (!amdgpu_in_reset(adev) &&
|
||||
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
|
||||
amdgpu_mes_self_test(adev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -290,7 +290,6 @@ flr_done:
|
|||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
|
|
|
@ -317,7 +317,6 @@ flr_done:
|
|||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
|
|
|
@ -529,7 +529,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
|||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
|
|
|
@ -1417,11 +1417,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
|
|||
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
adev->nbio.funcs->sdma_doorbell_range(adev, i,
|
||||
ring->use_doorbell, ring->doorbell_index,
|
||||
adev->doorbell_index.sdma_doorbell_range);
|
||||
|
||||
/* unhalt engine */
|
||||
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
|
||||
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
|
||||
|
|
|
@ -31,12 +31,23 @@
|
|||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
|
||||
static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
|
||||
{
|
||||
#if 0
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
|
||||
|
||||
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) &&
|
||||
adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct amdgpu_reset_handler *
|
||||
sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct amdgpu_reset_handler *handler;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
|
||||
|
||||
if (reset_context->method != AMD_RESET_METHOD_NONE) {
|
||||
list_for_each_entry(handler, &reset_ctl->reset_handlers,
|
||||
|
@ -44,15 +55,13 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
|
|||
if (handler->reset_method == reset_context->method)
|
||||
return handler;
|
||||
}
|
||||
} else {
|
||||
list_for_each_entry(handler, &reset_ctl->reset_handlers,
|
||||
}
|
||||
|
||||
if (sienna_cichlid_is_mode2_default(reset_ctl)) {
|
||||
list_for_each_entry (handler, &reset_ctl->reset_handlers,
|
||||
handler_list) {
|
||||
if (handler->reset_method == AMD_RESET_METHOD_MODE2 &&
|
||||
adev->pm.fw_version >= 0x3a5500 &&
|
||||
!amdgpu_sriov_vf(adev)) {
|
||||
reset_context->method = AMD_RESET_METHOD_MODE2;
|
||||
if (handler->reset_method == AMD_RESET_METHOD_MODE2)
|
||||
return handler;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1211,6 +1211,20 @@ static int soc15_common_sw_fini(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* sdma doorbell range is programed by hypervisor */
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
adev->nbio.funcs->sdma_doorbell_range(adev, i,
|
||||
true, adev->doorbell_index.sdma_engine[i] << 1,
|
||||
adev->doorbell_index.sdma_doorbell_range);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int soc15_common_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -1230,6 +1244,13 @@ static int soc15_common_hw_init(void *handle)
|
|||
|
||||
/* enable the doorbell aperture */
|
||||
soc15_enable_doorbell_aperture(adev, true);
|
||||
/* HW doorbell routing policy: doorbell writing not
|
||||
* in SDMA/IH/MM/ACV range will be routed to CP. So
|
||||
* we need to init SDMA doorbell range prior
|
||||
* to CP ip block init and ring test. IH already
|
||||
* happens before CP.
|
||||
*/
|
||||
soc15_sdma_doorbell_range_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -423,6 +423,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
|
|||
case IP_VERSION(11, 0, 0):
|
||||
return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
|
@ -636,7 +637,11 @@ static int soc21_common_early_init(void *handle)
|
|||
break;
|
||||
case IP_VERSION(11, 0, 3):
|
||||
adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_JPEG_MGCG;
|
||||
AMD_CG_SUPPORT_JPEG_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGCG;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
|
|
|
@ -77,7 +77,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
|
|||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
|
||||
|
|
|
@ -262,8 +262,9 @@ struct kfd2kgd_calls {
|
|||
uint32_t queue_id);
|
||||
|
||||
int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
|
||||
uint32_t reset_type, unsigned int timeout,
|
||||
uint32_t pipe_id, uint32_t queue_id);
|
||||
enum kfd_preempt_type reset_type,
|
||||
unsigned int timeout, uint32_t pipe_id,
|
||||
uint32_t queue_id);
|
||||
|
||||
bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
|
||||
|
||||
|
|
|
@ -3362,11 +3362,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
|||
if (adev->pm.sysfs_initialized)
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&adev->pm.pm_attr_list);
|
||||
|
||||
if (adev->pm.dpm_enabled == 0)
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&adev->pm.pm_attr_list);
|
||||
|
||||
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
|
||||
DRIVER_NAME, adev,
|
||||
hwmon_groups);
|
||||
|
|
|
@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
|
|||
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *speed)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t duty100, duty;
|
||||
uint64_t tmp64;
|
||||
uint32_t current_rpm;
|
||||
uint32_t percent = 0;
|
||||
|
||||
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
|
||||
CG_FDO_CTRL1, FMAX_DUTY100);
|
||||
duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
|
||||
CG_THERMAL_STATUS, FDO_PWM_DUTY);
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan)
|
||||
return 0;
|
||||
|
||||
if (!duty100)
|
||||
return -EINVAL;
|
||||
if (vega10_get_current_rpm(hwmgr, ¤t_rpm))
|
||||
return -1;
|
||||
|
||||
tmp64 = (uint64_t)duty * 255;
|
||||
do_div(tmp64, duty100);
|
||||
*speed = MIN((uint32_t)tmp64, 255);
|
||||
if (hwmgr->thermal_controller.
|
||||
advanceFanControlParameters.usMaxFanRPM != 0)
|
||||
percent = current_rpm * 255 /
|
||||
hwmgr->thermal_controller.
|
||||
advanceFanControlParameters.usMaxFanRPM;
|
||||
|
||||
*speed = MIN(percent, 255);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1314,8 +1314,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
|||
|
||||
ret = smu_enable_thermal_alert(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to enable thermal alert!\n");
|
||||
return ret;
|
||||
dev_err(adev->dev, "Failed to enable thermal alert!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_notify_display_change(smu);
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
// *** IMPORTANT ***
|
||||
// SMU TEAM: Always increment the interface version if
|
||||
// any structure is changed in this file
|
||||
#define PMFW_DRIVER_IF_VERSION 5
|
||||
#define PMFW_DRIVER_IF_VERSION 7
|
||||
|
||||
typedef struct {
|
||||
int32_t value;
|
||||
|
@ -163,8 +163,8 @@ typedef struct {
|
|||
uint16_t DclkFrequency; //[MHz]
|
||||
uint16_t MemclkFrequency; //[MHz]
|
||||
uint16_t spare; //[centi]
|
||||
uint16_t UvdActivity; //[centi]
|
||||
uint16_t GfxActivity; //[centi]
|
||||
uint16_t UvdActivity; //[centi]
|
||||
|
||||
uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
|
||||
uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
|
||||
|
@ -199,6 +199,19 @@ typedef struct {
|
|||
uint16_t DeviceState;
|
||||
uint16_t CurTemp; //[centi-Celsius]
|
||||
uint16_t spare2;
|
||||
|
||||
uint16_t AverageGfxclkFrequency;
|
||||
uint16_t AverageFclkFrequency;
|
||||
uint16_t AverageGfxActivity;
|
||||
uint16_t AverageSocclkFrequency;
|
||||
uint16_t AverageVclkFrequency;
|
||||
uint16_t AverageVcnActivity;
|
||||
uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads
|
||||
uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes
|
||||
uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower
|
||||
uint16_t AverageCorePower; //Filtered of [sum of CorePower[8]])
|
||||
uint16_t AverageCoreC0Residency[8]; //Filtered of [average C0 residency % per core]
|
||||
uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing.
|
||||
} SmuMetrics_t;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
|
||||
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
|
||||
|
|
|
@ -2242,9 +2242,17 @@ static void arcturus_get_unique_id(struct smu_context *smu)
|
|||
static int arcturus_set_df_cstate(struct smu_context *smu,
|
||||
enum pp_df_cstate state)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t smu_version;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Arcturus does not need the cstate disablement
|
||||
* prerequisite for gpu reset.
|
||||
*/
|
||||
if (amdgpu_in_reset(adev) || adev->in_suspend)
|
||||
return 0;
|
||||
|
||||
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to get smu version!\n");
|
||||
|
|
|
@ -1640,6 +1640,15 @@ static bool aldebaran_is_baco_supported(struct smu_context *smu)
|
|||
static int aldebaran_set_df_cstate(struct smu_context *smu,
|
||||
enum pp_df_cstate state)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
/*
|
||||
* Aldebaran does not need the cstate disablement
|
||||
* prerequisite for gpu reset.
|
||||
*/
|
||||
if (amdgpu_in_reset(adev) || adev->in_suspend)
|
||||
return 0;
|
||||
|
||||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -211,7 +211,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
|
|||
return 0;
|
||||
|
||||
if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
|
||||
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
|
||||
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||
|
||||
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))
|
||||
return 0;
|
||||
|
||||
/* override pptable_id from driver parameter */
|
||||
|
@ -454,9 +455,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
|
|||
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
|
||||
} else {
|
||||
pptable_id = smu->smu_table.boot_values.pp_table_id;
|
||||
|
||||
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
|
||||
pptable_id = 6666;
|
||||
}
|
||||
|
||||
/* force using vbios pptable in sriov mode */
|
||||
|
|
|
@ -119,6 +119,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
|
|||
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
|
||||
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
|
||||
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
|
||||
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
|
||||
};
|
||||
|
||||
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
|
||||
|
@ -1753,6 +1754,15 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
|
||||
enum pp_df_cstate state)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_DFCstateControl,
|
||||
state,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
|
||||
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
|
||||
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
|
||||
|
@ -1822,6 +1832,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
|
|||
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
|
||||
.mode1_reset = smu_v13_0_mode1_reset,
|
||||
.set_mp1_state = smu_v13_0_0_set_mp1_state,
|
||||
.set_df_cstate = smu_v13_0_0_set_df_cstate,
|
||||
};
|
||||
|
||||
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
|
||||
|
|
|
@ -121,6 +121,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
|
|||
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
|
||||
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
|
||||
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
|
||||
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
|
||||
};
|
||||
|
||||
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
|
||||
|
@ -1587,6 +1588,16 @@ static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
|
||||
enum pp_df_cstate state)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_DFCstateControl,
|
||||
state,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
||||
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
|
||||
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
|
||||
|
@ -1649,6 +1660,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
|||
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
|
||||
.mode1_reset = smu_v13_0_mode1_reset,
|
||||
.set_mp1_state = smu_v13_0_7_set_mp1_state,
|
||||
.set_df_cstate = smu_v13_0_7_set_df_cstate,
|
||||
};
|
||||
|
||||
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче