Merge 5.3-rc5 into char-misc-next
We need the char/misc fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Коммит
e70c971d7d
|
@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA $@
|
||||||
|
|
||||||
DT_DOCS = $(shell \
|
DT_DOCS = $(shell \
|
||||||
cd $(srctree)/$(src) && \
|
cd $(srctree)/$(src) && \
|
||||||
find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
|
find * \( -name '*.yaml' ! \
|
||||||
|
-name $(DT_TMP_SCHEMA) ! \
|
||||||
|
-name '*.example.dt.yaml' \) \
|
||||||
)
|
)
|
||||||
|
|
||||||
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
|
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
|
||||||
|
|
|
@ -7,18 +7,6 @@ Required properties:
|
||||||
- phy-mode : See ethernet.txt file in the same directory
|
- phy-mode : See ethernet.txt file in the same directory
|
||||||
|
|
||||||
Optional properties:
|
Optional properties:
|
||||||
- phy-reset-gpios : Should specify the gpio for phy reset
|
|
||||||
- phy-reset-duration : Reset duration in milliseconds. Should present
|
|
||||||
only if property "phy-reset-gpios" is available. Missing the property
|
|
||||||
will have the duration be 1 millisecond. Numbers greater than 1000 are
|
|
||||||
invalid and 1 millisecond will be used instead.
|
|
||||||
- phy-reset-active-high : If present then the reset sequence using the GPIO
|
|
||||||
specified in the "phy-reset-gpios" property is reversed (H=reset state,
|
|
||||||
L=operation state).
|
|
||||||
- phy-reset-post-delay : Post reset delay in milliseconds. If present then
|
|
||||||
a delay of phy-reset-post-delay milliseconds will be observed after the
|
|
||||||
phy-reset-gpios has been toggled. Can be omitted thus no delay is
|
|
||||||
observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
|
|
||||||
- phy-supply : regulator that powers the Ethernet PHY.
|
- phy-supply : regulator that powers the Ethernet PHY.
|
||||||
- phy-handle : phandle to the PHY device connected to this device.
|
- phy-handle : phandle to the PHY device connected to this device.
|
||||||
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
|
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
|
||||||
|
@ -47,11 +35,27 @@ Optional properties:
|
||||||
For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
|
For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
|
||||||
per second interrupt associated with 1588 precision time protocol(PTP).
|
per second interrupt associated with 1588 precision time protocol(PTP).
|
||||||
|
|
||||||
|
|
||||||
Optional subnodes:
|
Optional subnodes:
|
||||||
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
|
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
|
||||||
according to phy.txt in the same directory
|
according to phy.txt in the same directory
|
||||||
|
|
||||||
|
Deprecated optional properties:
|
||||||
|
To avoid these, create a phy node according to phy.txt in the same
|
||||||
|
directory, and point the fec's "phy-handle" property to it. Then use
|
||||||
|
the phy's reset binding, again described by phy.txt.
|
||||||
|
- phy-reset-gpios : Should specify the gpio for phy reset
|
||||||
|
- phy-reset-duration : Reset duration in milliseconds. Should present
|
||||||
|
only if property "phy-reset-gpios" is available. Missing the property
|
||||||
|
will have the duration be 1 millisecond. Numbers greater than 1000 are
|
||||||
|
invalid and 1 millisecond will be used instead.
|
||||||
|
- phy-reset-active-high : If present then the reset sequence using the GPIO
|
||||||
|
specified in the "phy-reset-gpios" property is reversed (H=reset state,
|
||||||
|
L=operation state).
|
||||||
|
- phy-reset-post-delay : Post reset delay in milliseconds. If present then
|
||||||
|
a delay of phy-reset-post-delay milliseconds will be observed after the
|
||||||
|
phy-reset-gpios has been toggled. Can be omitted thus no delay is
|
||||||
|
observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
ethernet@83fec000 {
|
ethernet@83fec000 {
|
||||||
|
|
|
@ -37,7 +37,8 @@ properties:
|
||||||
hwlocks: true
|
hwlocks: true
|
||||||
|
|
||||||
st,syscfg:
|
st,syscfg:
|
||||||
$ref: "/schemas/types.yaml#/definitions/phandle-array"
|
allOf:
|
||||||
|
- $ref: "/schemas/types.yaml#/definitions/phandle-array"
|
||||||
description: Should be phandle/offset/mask
|
description: Should be phandle/offset/mask
|
||||||
items:
|
items:
|
||||||
- description: Phandle to the syscon node which includes IRQ mux selection.
|
- description: Phandle to the syscon node which includes IRQ mux selection.
|
||||||
|
|
18
MAINTAINERS
18
MAINTAINERS
|
@ -6441,6 +6441,14 @@ S: Maintained
|
||||||
F: drivers/perf/fsl_imx8_ddr_perf.c
|
F: drivers/perf/fsl_imx8_ddr_perf.c
|
||||||
F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
|
F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
|
||||||
|
|
||||||
|
FREESCALE IMX I2C DRIVER
|
||||||
|
M: Oleksij Rempel <o.rempel@pengutronix.de>
|
||||||
|
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||||
|
L: linux-i2c@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: drivers/i2c/busses/i2c-imx.c
|
||||||
|
F: Documentation/devicetree/bindings/i2c/i2c-imx.txt
|
||||||
|
|
||||||
FREESCALE IMX LPI2C DRIVER
|
FREESCALE IMX LPI2C DRIVER
|
||||||
M: Dong Aisheng <aisheng.dong@nxp.com>
|
M: Dong Aisheng <aisheng.dong@nxp.com>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
|
@ -7452,7 +7460,7 @@ F: drivers/net/hyperv/
|
||||||
F: drivers/scsi/storvsc_drv.c
|
F: drivers/scsi/storvsc_drv.c
|
||||||
F: drivers/uio/uio_hv_generic.c
|
F: drivers/uio/uio_hv_generic.c
|
||||||
F: drivers/video/fbdev/hyperv_fb.c
|
F: drivers/video/fbdev/hyperv_fb.c
|
||||||
F: drivers/iommu/hyperv_iommu.c
|
F: drivers/iommu/hyperv-iommu.c
|
||||||
F: net/vmw_vsock/hyperv_transport.c
|
F: net/vmw_vsock/hyperv_transport.c
|
||||||
F: include/clocksource/hyperv_timer.h
|
F: include/clocksource/hyperv_timer.h
|
||||||
F: include/linux/hyperv.h
|
F: include/linux/hyperv.h
|
||||||
|
@ -8064,6 +8072,13 @@ T: git git://git.code.sf.net/p/intel-sas/isci
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/scsi/isci/
|
F: drivers/scsi/isci/
|
||||||
|
|
||||||
|
INTEL CPU family model numbers
|
||||||
|
M: Tony Luck <tony.luck@intel.com>
|
||||||
|
M: x86@kernel.org
|
||||||
|
L: linux-kernel@vger.kernel.org
|
||||||
|
S: Supported
|
||||||
|
F: arch/x86/include/asm/intel-family.h
|
||||||
|
|
||||||
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
|
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
|
||||||
M: Jani Nikula <jani.nikula@linux.intel.com>
|
M: Jani Nikula <jani.nikula@linux.intel.com>
|
||||||
M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
|
M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
|
||||||
|
@ -8416,7 +8431,6 @@ L: linux-xfs@vger.kernel.org
|
||||||
L: linux-fsdevel@vger.kernel.org
|
L: linux-fsdevel@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
|
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: fs/iomap.c
|
|
||||||
F: fs/iomap/
|
F: fs/iomap/
|
||||||
F: include/linux/iomap.h
|
F: include/linux/iomap.h
|
||||||
|
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 3
|
PATCHLEVEL = 3
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Bobtail Squid
|
NAME = Bobtail Squid
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
||||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
if (!dev_is_dma_coherent(dev))
|
return __get_dma_pgprot(attrs, prot);
|
||||||
return __get_dma_pgprot(attrs, prot);
|
|
||||||
return prot;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||||
|
|
|
@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
|
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
|
||||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
|
/*
|
||||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
|
* We already refuse to boot CPUs that don't support our configured
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
|
* page size, so we can only detect mismatches for a page size other
|
||||||
|
* than the one we're currently using. Unfortunately, SoCs like this
|
||||||
|
* exist in the wild so, even though we don't like it, we'll have to go
|
||||||
|
* along with it and treat them as non-strict.
|
||||||
|
*/
|
||||||
|
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
|
||||||
|
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
|
||||||
|
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
|
||||||
/* Linux shouldn't care about secure memory */
|
/* Linux shouldn't care about secure memory */
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
|
||||||
|
|
|
@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||||
|
|
||||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||||
struct plt_entry trampoline;
|
struct plt_entry trampoline, *dst;
|
||||||
struct module *mod;
|
struct module *mod;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||||
* to check if the actual opcodes are in fact identical,
|
* to check if the actual opcodes are in fact identical,
|
||||||
* regardless of the offset in memory so use memcmp() instead.
|
* regardless of the offset in memory so use memcmp() instead.
|
||||||
*/
|
*/
|
||||||
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
dst = mod->arch.ftrace_trampoline;
|
||||||
if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
|
trampoline = get_plt_entry(addr, dst);
|
||||||
sizeof(trampoline))) {
|
if (memcmp(dst, &trampoline, sizeof(trampoline))) {
|
||||||
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
|
if (plt_entry_is_initialized(dst)) {
|
||||||
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* point the trampoline to our ftrace entry point */
|
/* point the trampoline to our ftrace entry point */
|
||||||
module_disable_ro(mod);
|
module_disable_ro(mod);
|
||||||
*mod->arch.ftrace_trampoline = trampoline;
|
*dst = trampoline;
|
||||||
module_enable_ro(mod, true);
|
module_enable_ro(mod, true);
|
||||||
|
|
||||||
/* update trampoline before patching in the branch */
|
/*
|
||||||
smp_wmb();
|
* Ensure updated trampoline is visible to instruction
|
||||||
|
* fetch before we patch in the branch.
|
||||||
|
*/
|
||||||
|
__flush_icache_range((unsigned long)&dst[0],
|
||||||
|
(unsigned long)&dst[1]);
|
||||||
}
|
}
|
||||||
addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
|
addr = (unsigned long)dst;
|
||||||
#else /* CONFIG_ARM64_MODULE_PLTS */
|
#else /* CONFIG_ARM64_MODULE_PLTS */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
#endif /* CONFIG_ARM64_MODULE_PLTS */
|
#endif /* CONFIG_ARM64_MODULE_PLTS */
|
||||||
|
|
|
@ -14,9 +14,7 @@
|
||||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
|
return pgprot_writecombine(prot);
|
||||||
return pgprot_writecombine(prot);
|
|
||||||
return prot;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||||
|
|
|
@ -121,7 +121,6 @@ config PPC
|
||||||
select ARCH_32BIT_OFF_T if PPC32
|
select ARCH_32BIT_OFF_T if PPC32
|
||||||
select ARCH_HAS_DEBUG_VIRTUAL
|
select ARCH_HAS_DEBUG_VIRTUAL
|
||||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||||
select ARCH_HAS_DMA_MMAP_PGPROT
|
|
||||||
select ARCH_HAS_ELF_RANDOMIZE
|
select ARCH_HAS_ELF_RANDOMIZE
|
||||||
select ARCH_HAS_FORTIFY_SOURCE
|
select ARCH_HAS_FORTIFY_SOURCE
|
||||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||||
|
|
|
@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
|
||||||
signal.o sysfs.o cacheinfo.o time.o \
|
signal.o sysfs.o cacheinfo.o time.o \
|
||||||
prom.o traps.o setup-common.o \
|
prom.o traps.o setup-common.o \
|
||||||
udbg.o misc.o io.o misc_$(BITS).o \
|
udbg.o misc.o io.o misc_$(BITS).o \
|
||||||
of_platform.o prom_parse.o \
|
of_platform.o prom_parse.o
|
||||||
dma-common.o
|
|
||||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||||
signal_64.o ptrace32.o \
|
signal_64.o ptrace32.o \
|
||||||
paca.o nvram_64.o firmware.o
|
paca.o nvram_64.o firmware.o
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
/*
|
|
||||||
* Contains common dma routines for all powerpc platforms.
|
|
||||||
*
|
|
||||||
* Copyright (C) 2019 Shawn Anastasio.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/dma-noncoherent.h>
|
|
||||||
|
|
||||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
|
||||||
return pgprot_noncached(prot);
|
|
||||||
return prot;
|
|
||||||
}
|
|
|
@ -54,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
||||||
CONFIG_SERIAL_OF_PLATFORM=y
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
|
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
|
||||||
CONFIG_HVC_RISCV_SBI=y
|
CONFIG_HVC_RISCV_SBI=y
|
||||||
|
CONFIG_HW_RANDOM=y
|
||||||
|
CONFIG_HW_RANDOM_VIRTIO=y
|
||||||
CONFIG_SPI=y
|
CONFIG_SPI=y
|
||||||
CONFIG_SPI_SIFIVE=y
|
CONFIG_SPI_SIFIVE=y
|
||||||
# CONFIG_PTP_1588_CLOCK is not set
|
# CONFIG_PTP_1588_CLOCK is not set
|
||||||
|
|
|
@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
|
||||||
CONFIG_PCI_HOST_GENERIC=y
|
CONFIG_PCI_HOST_GENERIC=y
|
||||||
CONFIG_PCIE_XILINX=y
|
CONFIG_PCIE_XILINX=y
|
||||||
CONFIG_DEVTMPFS=y
|
CONFIG_DEVTMPFS=y
|
||||||
|
CONFIG_DEVTMPFS_MOUNT=y
|
||||||
CONFIG_BLK_DEV_LOOP=y
|
CONFIG_BLK_DEV_LOOP=y
|
||||||
CONFIG_VIRTIO_BLK=y
|
CONFIG_VIRTIO_BLK=y
|
||||||
CONFIG_BLK_DEV_SD=y
|
CONFIG_BLK_DEV_SD=y
|
||||||
|
@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
||||||
CONFIG_SERIAL_OF_PLATFORM=y
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
|
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
|
||||||
CONFIG_HVC_RISCV_SBI=y
|
CONFIG_HVC_RISCV_SBI=y
|
||||||
|
CONFIG_HW_RANDOM=y
|
||||||
|
CONFIG_HW_RANDOM_VIRTIO=y
|
||||||
# CONFIG_PTP_1588_CLOCK is not set
|
# CONFIG_PTP_1588_CLOCK is not set
|
||||||
CONFIG_DRM=y
|
CONFIG_DRM=y
|
||||||
CONFIG_DRM_RADEON=y
|
CONFIG_DRM_RADEON=y
|
||||||
|
|
|
@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
|
||||||
|
|
||||||
static inline void __fstate_clean(struct pt_regs *regs)
|
static inline void __fstate_clean(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
|
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void fstate_off(struct task_struct *task,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void fstate_save(struct task_struct *task,
|
static inline void fstate_save(struct task_struct *task,
|
||||||
|
|
|
@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
|
||||||
}
|
}
|
||||||
|
|
||||||
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
|
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
|
||||||
#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
|
|
||||||
#define flush_tlb_range(vma, start, end) \
|
#define flush_tlb_range(vma, start, end) \
|
||||||
remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
|
remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
|
||||||
#define flush_tlb_mm(mm) \
|
|
||||||
|
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||||
|
unsigned long addr)
|
||||||
|
{
|
||||||
|
flush_tlb_range(vma, addr, addr + PAGE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define flush_tlb_mm(mm) \
|
||||||
remote_sfence_vma(mm_cpumask(mm), 0, -1)
|
remote_sfence_vma(mm_cpumask(mm), 0, -1)
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
|
@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
|
||||||
unsigned long sp)
|
unsigned long sp)
|
||||||
{
|
{
|
||||||
regs->sstatus = SR_SPIE;
|
regs->sstatus = SR_SPIE;
|
||||||
if (has_fpu)
|
if (has_fpu) {
|
||||||
regs->sstatus |= SR_FS_INITIAL;
|
regs->sstatus |= SR_FS_INITIAL;
|
||||||
|
/*
|
||||||
|
* Restore the initial value to the FP register
|
||||||
|
* before starting the user program.
|
||||||
|
*/
|
||||||
|
fstate_restore(current, regs);
|
||||||
|
}
|
||||||
regs->sepc = pc;
|
regs->sepc = pc;
|
||||||
regs->sp = sp;
|
regs->sp = sp;
|
||||||
set_fs(USER_DS);
|
set_fs(USER_DS);
|
||||||
|
@ -75,10 +81,11 @@ void flush_thread(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_FPU
|
#ifdef CONFIG_FPU
|
||||||
/*
|
/*
|
||||||
* Reset FPU context
|
* Reset FPU state and context
|
||||||
* frm: round to nearest, ties to even (IEEE default)
|
* frm: round to nearest, ties to even (IEEE default)
|
||||||
* fflags: accrued exceptions cleared
|
* fflags: accrued exceptions cleared
|
||||||
*/
|
*/
|
||||||
|
fstate_off(current, task_pt_regs(current));
|
||||||
memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
|
memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn)
|
||||||
printk("dbr");
|
printk("dbr");
|
||||||
break;
|
break;
|
||||||
case FD_REG_N:
|
case FD_REG_N:
|
||||||
if (0)
|
|
||||||
goto d_reg_n;
|
|
||||||
case F_REG_N:
|
case F_REG_N:
|
||||||
printk("fr%d", rn);
|
printk("fr%d", rn);
|
||||||
break;
|
break;
|
||||||
|
@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
|
||||||
printk("xd%d", rn & ~1);
|
printk("xd%d", rn & ~1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
d_reg_n:
|
/* else, fall through */
|
||||||
case D_REG_N:
|
case D_REG_N:
|
||||||
printk("dr%d", rn);
|
printk("dr%d", rn);
|
||||||
break;
|
break;
|
||||||
|
@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
|
||||||
printk("xd%d", rm & ~1);
|
printk("xd%d", rm & ~1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
/* else, fall through */
|
||||||
case D_REG_M:
|
case D_REG_M:
|
||||||
printk("dr%d", rm);
|
printk("dr%d", rm);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
|
||||||
switch (sh_type) {
|
switch (sh_type) {
|
||||||
case SH_BREAKPOINT_READ:
|
case SH_BREAKPOINT_READ:
|
||||||
*gen_type = HW_BREAKPOINT_R;
|
*gen_type = HW_BREAKPOINT_R;
|
||||||
|
break;
|
||||||
case SH_BREAKPOINT_WRITE:
|
case SH_BREAKPOINT_WRITE:
|
||||||
*gen_type = HW_BREAKPOINT_W;
|
*gen_type = HW_BREAKPOINT_W;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -18,6 +18,20 @@
|
||||||
* Note: efi_info is commonly left uninitialized, but that field has a
|
* Note: efi_info is commonly left uninitialized, but that field has a
|
||||||
* private magic, so it is better to leave it unchanged.
|
* private magic, so it is better to leave it unchanged.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
|
||||||
|
|
||||||
|
#define BOOT_PARAM_PRESERVE(struct_member) \
|
||||||
|
{ \
|
||||||
|
.start = offsetof(struct boot_params, struct_member), \
|
||||||
|
.len = sizeof_mbr(struct boot_params, struct_member), \
|
||||||
|
}
|
||||||
|
|
||||||
|
struct boot_params_to_save {
|
||||||
|
unsigned int start;
|
||||||
|
unsigned int len;
|
||||||
|
};
|
||||||
|
|
||||||
static void sanitize_boot_params(struct boot_params *boot_params)
|
static void sanitize_boot_params(struct boot_params *boot_params)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -35,21 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
|
||||||
* problems again.
|
* problems again.
|
||||||
*/
|
*/
|
||||||
if (boot_params->sentinel) {
|
if (boot_params->sentinel) {
|
||||||
/* fields in boot_params are left uninitialized, clear them */
|
static struct boot_params scratch;
|
||||||
boot_params->acpi_rsdp_addr = 0;
|
char *bp_base = (char *)boot_params;
|
||||||
memset(&boot_params->ext_ramdisk_image, 0,
|
char *save_base = (char *)&scratch;
|
||||||
(char *)&boot_params->efi_info -
|
int i;
|
||||||
(char *)&boot_params->ext_ramdisk_image);
|
|
||||||
memset(&boot_params->kbd_status, 0,
|
const struct boot_params_to_save to_save[] = {
|
||||||
(char *)&boot_params->hdr -
|
BOOT_PARAM_PRESERVE(screen_info),
|
||||||
(char *)&boot_params->kbd_status);
|
BOOT_PARAM_PRESERVE(apm_bios_info),
|
||||||
memset(&boot_params->_pad7[0], 0,
|
BOOT_PARAM_PRESERVE(tboot_addr),
|
||||||
(char *)&boot_params->edd_mbr_sig_buffer[0] -
|
BOOT_PARAM_PRESERVE(ist_info),
|
||||||
(char *)&boot_params->_pad7[0]);
|
BOOT_PARAM_PRESERVE(acpi_rsdp_addr),
|
||||||
memset(&boot_params->_pad8[0], 0,
|
BOOT_PARAM_PRESERVE(hd0_info),
|
||||||
(char *)&boot_params->eddbuf[0] -
|
BOOT_PARAM_PRESERVE(hd1_info),
|
||||||
(char *)&boot_params->_pad8[0]);
|
BOOT_PARAM_PRESERVE(sys_desc_table),
|
||||||
memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
|
BOOT_PARAM_PRESERVE(olpc_ofw_header),
|
||||||
|
BOOT_PARAM_PRESERVE(efi_info),
|
||||||
|
BOOT_PARAM_PRESERVE(alt_mem_k),
|
||||||
|
BOOT_PARAM_PRESERVE(scratch),
|
||||||
|
BOOT_PARAM_PRESERVE(e820_entries),
|
||||||
|
BOOT_PARAM_PRESERVE(eddbuf_entries),
|
||||||
|
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
|
||||||
|
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
|
||||||
|
BOOT_PARAM_PRESERVE(e820_table),
|
||||||
|
BOOT_PARAM_PRESERVE(eddbuf),
|
||||||
|
};
|
||||||
|
|
||||||
|
memset(&scratch, 0, sizeof(scratch));
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(to_save); i++) {
|
||||||
|
memcpy(save_base + to_save[i].start,
|
||||||
|
bp_base + to_save[i].start, to_save[i].len);
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(boot_params, save_base, sizeof(*boot_params));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -184,7 +184,8 @@ void __init default_setup_apic_routing(void)
|
||||||
def_to_bigsmp = 0;
|
def_to_bigsmp = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* If P4 and above fall through */
|
/* P4 and above */
|
||||||
|
/* fall through */
|
||||||
case X86_VENDOR_HYGON:
|
case X86_VENDOR_HYGON:
|
||||||
case X86_VENDOR_AMD:
|
case X86_VENDOR_AMD:
|
||||||
def_to_bigsmp = 1;
|
def_to_bigsmp = 1;
|
||||||
|
|
|
@ -17,6 +17,12 @@
|
||||||
*/
|
*/
|
||||||
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
|
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
|
||||||
|
* hardware or BIOS before kernel boot.
|
||||||
|
*/
|
||||||
|
static u32 orig_umwait_control_cached __ro_after_init;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
|
* Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
|
||||||
* the sysfs write functions.
|
* the sysfs write functions.
|
||||||
|
@ -52,6 +58,23 @@ static int umwait_cpu_online(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The CPU hotplug callback sets the control MSR to the original control
|
||||||
|
* value.
|
||||||
|
*/
|
||||||
|
static int umwait_cpu_offline(unsigned int cpu)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* This code is protected by the CPU hotplug already and
|
||||||
|
* orig_umwait_control_cached is never changed after it caches
|
||||||
|
* the original control MSR value in umwait_init(). So there
|
||||||
|
* is no race condition here.
|
||||||
|
*/
|
||||||
|
wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
|
* On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
|
||||||
* is the only active CPU at this time. The MSR is set up on the APs via the
|
* is the only active CPU at this time. The MSR is set up on the APs via the
|
||||||
|
@ -185,8 +208,22 @@ static int __init umwait_init(void)
|
||||||
if (!boot_cpu_has(X86_FEATURE_WAITPKG))
|
if (!boot_cpu_has(X86_FEATURE_WAITPKG))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cache the original control MSR value before the control MSR is
|
||||||
|
* changed. This is the only place where orig_umwait_control_cached
|
||||||
|
* is modified.
|
||||||
|
*/
|
||||||
|
rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
|
||||||
|
|
||||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
|
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
|
||||||
umwait_cpu_online, NULL);
|
umwait_cpu_online, umwait_cpu_offline);
|
||||||
|
if (ret < 0) {
|
||||||
|
/*
|
||||||
|
* On failure, the control MSR on all CPUs has the
|
||||||
|
* original control value.
|
||||||
|
*/
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
register_syscore_ops(&umwait_syscore_ops);
|
register_syscore_ops(&umwait_syscore_ops);
|
||||||
|
|
||||||
|
|
|
@ -178,13 +178,15 @@ void FPU_printall(void)
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
FPU_REG *r = &st(i);
|
FPU_REG *r = &st(i);
|
||||||
u_char tagi = FPU_gettagi(i);
|
u_char tagi = FPU_gettagi(i);
|
||||||
|
|
||||||
switch (tagi) {
|
switch (tagi) {
|
||||||
case TAG_Empty:
|
case TAG_Empty:
|
||||||
continue;
|
continue;
|
||||||
break;
|
|
||||||
case TAG_Zero:
|
case TAG_Zero:
|
||||||
case TAG_Special:
|
case TAG_Special:
|
||||||
|
/* Update tagi for the printk below */
|
||||||
tagi = FPU_Special(r);
|
tagi = FPU_Special(r);
|
||||||
|
/* fall through */
|
||||||
case TAG_Valid:
|
case TAG_Valid:
|
||||||
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
|
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
|
||||||
getsign(r) ? '-' : '+',
|
getsign(r) ? '-' : '+',
|
||||||
|
@ -198,7 +200,6 @@ void FPU_printall(void)
|
||||||
printk("Whoops! Error in errors.c: tag%d is %d ", i,
|
printk("Whoops! Error in errors.c: tag%d is %d ", i,
|
||||||
tagi);
|
tagi);
|
||||||
continue;
|
continue;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
printk("%s\n", tag_desc[(int)(unsigned)tagi]);
|
printk("%s\n", tag_desc[(int)(unsigned)tagi]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
|
||||||
case TW_Denormal:
|
case TW_Denormal:
|
||||||
if (denormal_operand() < 0)
|
if (denormal_operand() < 0)
|
||||||
return;
|
return;
|
||||||
|
/* fall through */
|
||||||
case TAG_Zero:
|
case TAG_Zero:
|
||||||
case TAG_Valid:
|
case TAG_Valid:
|
||||||
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
|
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
|
||||||
|
|
|
@ -511,6 +511,7 @@ void cpu_reset(void)
|
||||||
"add %2, %2, %7\n\t"
|
"add %2, %2, %7\n\t"
|
||||||
"addi %0, %0, -1\n\t"
|
"addi %0, %0, -1\n\t"
|
||||||
"bnez %0, 1b\n\t"
|
"bnez %0, 1b\n\t"
|
||||||
|
"isync\n\t"
|
||||||
/* Jump to identity mapping */
|
/* Jump to identity mapping */
|
||||||
"jx %3\n"
|
"jx %3\n"
|
||||||
"2:\n\t"
|
"2:\n\t"
|
||||||
|
|
|
@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
rq = blk_mq_get_request(q, bio, &data);
|
rq = blk_mq_get_request(q, bio, &data);
|
||||||
if (unlikely(!rq)) {
|
if (unlikely(!rq)) {
|
||||||
rq_qos_cleanup(q, bio);
|
rq_qos_cleanup(q, bio);
|
||||||
|
if (bio->bi_opf & REQ_NOWAIT)
|
||||||
cookie = BLK_QC_T_NONE;
|
|
||||||
if (bio->bi_opf & REQ_NOWAIT_INLINE)
|
|
||||||
cookie = BLK_QC_T_EAGAIN;
|
|
||||||
else if (bio->bi_opf & REQ_NOWAIT)
|
|
||||||
bio_wouldblock_error(bio);
|
bio_wouldblock_error(bio);
|
||||||
return cookie;
|
return BLK_QC_T_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_block_getrq(q, bio, bio->bi_opf);
|
trace_block_getrq(q, bio, bio->bi_opf);
|
||||||
|
@ -2666,8 +2662,6 @@ void blk_mq_release(struct request_queue *q)
|
||||||
struct blk_mq_hw_ctx *hctx, *next;
|
struct blk_mq_hw_ctx *hctx, *next;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cancel_delayed_work_sync(&q->requeue_work);
|
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i)
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
|
WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
|
||||||
|
|
||||||
|
|
|
@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work)
|
||||||
|
|
||||||
blk_free_queue_stats(q->stats);
|
blk_free_queue_stats(q->stats);
|
||||||
|
|
||||||
|
if (queue_is_mq(q))
|
||||||
|
cancel_delayed_work_sync(&q->requeue_work);
|
||||||
|
|
||||||
blk_exit_queue(q);
|
blk_exit_queue(q);
|
||||||
|
|
||||||
blk_queue_free_zone_bitmaps(q);
|
blk_queue_free_zone_bitmaps(q);
|
||||||
|
|
|
@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
|
||||||
choice
|
choice
|
||||||
prompt "Backlight initial state"
|
prompt "Backlight initial state"
|
||||||
default CHARLCD_BL_FLASH
|
default CHARLCD_BL_FLASH
|
||||||
|
---help---
|
||||||
|
Select the initial backlight state on boot or module load.
|
||||||
|
|
||||||
|
Previously, there was no option for this: the backlight flashed
|
||||||
|
briefly on init. Now you can also turn it off/on.
|
||||||
|
|
||||||
config CHARLCD_BL_OFF
|
config CHARLCD_BL_OFF
|
||||||
bool "Off"
|
bool "Off"
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
#include <generated/utsrelease.h>
|
#include <generated/utsrelease.h>
|
||||||
|
|
||||||
#include <misc/charlcd.h>
|
#include "charlcd.h"
|
||||||
|
|
||||||
#define LCD_MINOR 156
|
#define LCD_MINOR 156
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,9 @@
|
||||||
* Copyright (C) 2016-2017 Glider bvba
|
* Copyright (C) 2016-2017 Glider bvba
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef _CHARLCD_H
|
||||||
|
#define _CHARLCD_H
|
||||||
|
|
||||||
struct charlcd {
|
struct charlcd {
|
||||||
const struct charlcd_ops *ops;
|
const struct charlcd_ops *ops;
|
||||||
const unsigned char *char_conv; /* Optional */
|
const unsigned char *char_conv; /* Optional */
|
||||||
|
@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd);
|
||||||
int charlcd_unregister(struct charlcd *lcd);
|
int charlcd_unregister(struct charlcd *lcd);
|
||||||
|
|
||||||
void charlcd_poke(struct charlcd *lcd);
|
void charlcd_poke(struct charlcd *lcd);
|
||||||
|
|
||||||
|
#endif /* CHARLCD_H */
|
|
@ -14,8 +14,7 @@
|
||||||
#include <linux/property.h>
|
#include <linux/property.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#include <misc/charlcd.h>
|
#include "charlcd.h"
|
||||||
|
|
||||||
|
|
||||||
enum hd44780_pin {
|
enum hd44780_pin {
|
||||||
/* Order does matter due to writing to GPIO array subsets! */
|
/* Order does matter due to writing to GPIO array subsets! */
|
||||||
|
|
|
@ -55,7 +55,7 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
#include <misc/charlcd.h>
|
#include "charlcd.h"
|
||||||
|
|
||||||
#define KEYPAD_MINOR 185
|
#define KEYPAD_MINOR 185
|
||||||
|
|
||||||
|
@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
err_lcd_unreg:
|
err_lcd_unreg:
|
||||||
|
if (scan_timer.function)
|
||||||
|
del_timer_sync(&scan_timer);
|
||||||
if (lcd.enabled)
|
if (lcd.enabled)
|
||||||
charlcd_unregister(lcd.charlcd);
|
charlcd_unregister(lcd.charlcd);
|
||||||
err_unreg_device:
|
err_unreg_device:
|
||||||
|
|
|
@ -44,7 +44,7 @@ config REGMAP_IRQ
|
||||||
|
|
||||||
config REGMAP_SOUNDWIRE
|
config REGMAP_SOUNDWIRE
|
||||||
tristate
|
tristate
|
||||||
depends on SOUNDWIRE_BUS
|
depends on SOUNDWIRE
|
||||||
|
|
||||||
config REGMAP_SCCB
|
config REGMAP_SCCB
|
||||||
tristate
|
tristate
|
||||||
|
|
|
@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = -ENOMEM;
|
||||||
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
|
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
|
||||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||||
if (!req)
|
if (!req)
|
||||||
|
@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
|
||||||
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
|
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
|
||||||
if (err) {
|
if (err) {
|
||||||
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
|
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
|
||||||
return err;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1007,8 +1008,7 @@ fail:
|
||||||
}
|
}
|
||||||
kfree(req);
|
kfree(req);
|
||||||
}
|
}
|
||||||
return -ENOMEM;
|
return err;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int connect_ring(struct backend_info *be)
|
static int connect_ring(struct backend_info *be)
|
||||||
|
|
|
@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
|
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ struct dw_edma_burst {
|
||||||
|
|
||||||
struct dw_edma_region {
|
struct dw_edma_region {
|
||||||
phys_addr_t paddr;
|
phys_addr_t paddr;
|
||||||
dma_addr_t vaddr;
|
void __iomem *vaddr;
|
||||||
size_t sz;
|
size_t sz;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
|
||||||
chip->id = pdev->devfn;
|
chip->id = pdev->devfn;
|
||||||
chip->irq = pdev->irq;
|
chip->irq = pdev->irq;
|
||||||
|
|
||||||
dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
|
dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
|
||||||
dw->rg_region.vaddr += pdata->rg_off;
|
dw->rg_region.vaddr += pdata->rg_off;
|
||||||
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
|
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
|
||||||
dw->rg_region.paddr += pdata->rg_off;
|
dw->rg_region.paddr += pdata->rg_off;
|
||||||
dw->rg_region.sz = pdata->rg_sz;
|
dw->rg_region.sz = pdata->rg_sz;
|
||||||
|
|
||||||
dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
|
dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
|
||||||
dw->ll_region.vaddr += pdata->ll_off;
|
dw->ll_region.vaddr += pdata->ll_off;
|
||||||
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
|
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
|
||||||
dw->ll_region.paddr += pdata->ll_off;
|
dw->ll_region.paddr += pdata->ll_off;
|
||||||
dw->ll_region.sz = pdata->ll_sz;
|
dw->ll_region.sz = pdata->ll_sz;
|
||||||
|
|
||||||
dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
|
dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
|
||||||
dw->dt_region.vaddr += pdata->dt_off;
|
dw->dt_region.vaddr += pdata->dt_off;
|
||||||
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
|
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
|
||||||
dw->dt_region.paddr += pdata->dt_off;
|
dw->dt_region.paddr += pdata->dt_off;
|
||||||
|
@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
|
||||||
pci_dbg(pdev, "Mode:\t%s\n",
|
pci_dbg(pdev, "Mode:\t%s\n",
|
||||||
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
|
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
|
||||||
|
|
||||||
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
|
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
|
||||||
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
|
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
|
||||||
&dw->rg_region.vaddr, &dw->rg_region.paddr);
|
dw->rg_region.vaddr, &dw->rg_region.paddr);
|
||||||
|
|
||||||
pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
|
pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
|
||||||
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
|
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
|
||||||
&dw->ll_region.vaddr, &dw->ll_region.paddr);
|
dw->ll_region.vaddr, &dw->ll_region.paddr);
|
||||||
|
|
||||||
pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
|
pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
|
||||||
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
|
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
|
||||||
&dw->dt_region.vaddr, &dw->dt_region.paddr);
|
dw->dt_region.vaddr, &dw->dt_region.paddr);
|
||||||
|
|
||||||
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
|
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ enum dw_edma_control {
|
||||||
|
|
||||||
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
|
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
|
||||||
{
|
{
|
||||||
return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
|
return dw->rg_region.vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SET(dw, name, value) \
|
#define SET(dw, name, value) \
|
||||||
|
@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
|
||||||
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
||||||
{
|
{
|
||||||
struct dw_edma_burst *child;
|
struct dw_edma_burst *child;
|
||||||
struct dw_edma_v0_lli *lli;
|
struct dw_edma_v0_lli __iomem *lli;
|
||||||
struct dw_edma_v0_llp *llp;
|
struct dw_edma_v0_llp __iomem *llp;
|
||||||
u32 control = 0, i = 0;
|
u32 control = 0, i = 0;
|
||||||
u64 sar, dar, addr;
|
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
|
lli = chunk->ll_region.vaddr;
|
||||||
|
|
||||||
if (chunk->cb)
|
if (chunk->cb)
|
||||||
control = DW_EDMA_V0_CB;
|
control = DW_EDMA_V0_CB;
|
||||||
|
@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
||||||
/* Transfer size */
|
/* Transfer size */
|
||||||
SET_LL(&lli[i].transfer_size, child->sz);
|
SET_LL(&lli[i].transfer_size, child->sz);
|
||||||
/* SAR - low, high */
|
/* SAR - low, high */
|
||||||
sar = cpu_to_le64(child->sar);
|
SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
|
||||||
SET_LL(&lli[i].sar_low, lower_32_bits(sar));
|
SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
|
||||||
SET_LL(&lli[i].sar_high, upper_32_bits(sar));
|
|
||||||
/* DAR - low, high */
|
/* DAR - low, high */
|
||||||
dar = cpu_to_le64(child->dar);
|
SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
|
||||||
SET_LL(&lli[i].dar_low, lower_32_bits(dar));
|
SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
|
||||||
SET_LL(&lli[i].dar_high, upper_32_bits(dar));
|
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
llp = (struct dw_edma_v0_llp *)&lli[i];
|
llp = (void __iomem *)&lli[i];
|
||||||
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
|
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
|
||||||
if (!chunk->cb)
|
if (!chunk->cb)
|
||||||
control |= DW_EDMA_V0_CB;
|
control |= DW_EDMA_V0_CB;
|
||||||
|
@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
||||||
/* Channel control */
|
/* Channel control */
|
||||||
SET_LL(&llp->control, control);
|
SET_LL(&llp->control, control);
|
||||||
/* Linked list - low, high */
|
/* Linked list - low, high */
|
||||||
addr = cpu_to_le64(chunk->ll_region.paddr);
|
SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
|
||||||
SET_LL(&llp->llp_low, lower_32_bits(addr));
|
SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
|
||||||
SET_LL(&llp->llp_high, upper_32_bits(addr));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||||
|
@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||||
struct dw_edma_chan *chan = chunk->chan;
|
struct dw_edma_chan *chan = chunk->chan;
|
||||||
struct dw_edma *dw = chan->chip->dw;
|
struct dw_edma *dw = chan->chip->dw;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
u64 llp;
|
|
||||||
|
|
||||||
dw_edma_v0_core_write_chunk(chunk);
|
dw_edma_v0_core_write_chunk(chunk);
|
||||||
|
|
||||||
|
@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||||
SET_CH(dw, chan->dir, chan->id, ch_control1,
|
SET_CH(dw, chan->dir, chan->id, ch_control1,
|
||||||
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
|
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
|
||||||
/* Linked list - low, high */
|
/* Linked list - low, high */
|
||||||
llp = cpu_to_le64(chunk->ll_region.paddr);
|
SET_CH(dw, chan->dir, chan->id, llp_low,
|
||||||
SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
|
lower_32_bits(chunk->ll_region.paddr));
|
||||||
SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
|
SET_CH(dw, chan->dir, chan->id, llp_high,
|
||||||
|
upper_32_bits(chunk->ll_region.paddr));
|
||||||
}
|
}
|
||||||
/* Doorbell */
|
/* Doorbell */
|
||||||
SET_RW(dw, chan->dir, doorbell,
|
SET_RW(dw, chan->dir, doorbell,
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
#include "dw-edma-core.h"
|
#include "dw-edma-core.h"
|
||||||
|
|
||||||
#define REGS_ADDR(name) \
|
#define REGS_ADDR(name) \
|
||||||
((dma_addr_t *)®s->name)
|
((void __force *)®s->name)
|
||||||
#define REGISTER(name) \
|
#define REGISTER(name) \
|
||||||
{ #name, REGS_ADDR(name) }
|
{ #name, REGS_ADDR(name) }
|
||||||
|
|
||||||
|
@ -40,36 +40,37 @@
|
||||||
|
|
||||||
static struct dentry *base_dir;
|
static struct dentry *base_dir;
|
||||||
static struct dw_edma *dw;
|
static struct dw_edma *dw;
|
||||||
static struct dw_edma_v0_regs *regs;
|
static struct dw_edma_v0_regs __iomem *regs;
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
void *start;
|
void __iomem *start;
|
||||||
void *end;
|
void __iomem *end;
|
||||||
} lim[2][EDMA_V0_MAX_NR_CH];
|
} lim[2][EDMA_V0_MAX_NR_CH];
|
||||||
|
|
||||||
struct debugfs_entries {
|
struct debugfs_entries {
|
||||||
char name[24];
|
const char *name;
|
||||||
dma_addr_t *reg;
|
dma_addr_t *reg;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
|
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
|
||||||
{
|
{
|
||||||
|
void __iomem *reg = (void __force __iomem *)data;
|
||||||
if (dw->mode == EDMA_MODE_LEGACY &&
|
if (dw->mode == EDMA_MODE_LEGACY &&
|
||||||
data >= (void *)®s->type.legacy.ch) {
|
reg >= (void __iomem *)®s->type.legacy.ch) {
|
||||||
void *ptr = (void *)®s->type.legacy.ch;
|
void __iomem *ptr = ®s->type.legacy.ch;
|
||||||
u32 viewport_sel = 0;
|
u32 viewport_sel = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u16 ch;
|
u16 ch;
|
||||||
|
|
||||||
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
|
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
|
||||||
if (lim[0][ch].start >= data && data < lim[0][ch].end) {
|
if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
|
||||||
ptr += (data - lim[0][ch].start);
|
ptr += (reg - lim[0][ch].start);
|
||||||
goto legacy_sel_wr;
|
goto legacy_sel_wr;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
|
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
|
||||||
if (lim[1][ch].start >= data && data < lim[1][ch].end) {
|
if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
|
||||||
ptr += (data - lim[1][ch].start);
|
ptr += (reg - lim[1][ch].start);
|
||||||
goto legacy_sel_rd;
|
goto legacy_sel_rd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +87,7 @@ legacy_sel_wr:
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&dw->lock, flags);
|
raw_spin_unlock_irqrestore(&dw->lock, flags);
|
||||||
} else {
|
} else {
|
||||||
*val = readl(data);
|
*val = readl(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
|
static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
|
||||||
struct dentry *dir)
|
struct dentry *dir)
|
||||||
{
|
{
|
||||||
int nr_entries;
|
int nr_entries;
|
||||||
|
@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
|
||||||
if (!dw)
|
if (!dw)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
|
regs = dw->rg_region.vaddr;
|
||||||
if (!regs)
|
if (!regs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -142,7 +142,7 @@ enum d40_events {
|
||||||
* when the DMA hw is powered off.
|
* when the DMA hw is powered off.
|
||||||
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
|
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
|
||||||
*/
|
*/
|
||||||
static u32 d40_backup_regs[] = {
|
static __maybe_unused u32 d40_backup_regs[] = {
|
||||||
D40_DREG_LCPA,
|
D40_DREG_LCPA,
|
||||||
D40_DREG_LCLA,
|
D40_DREG_LCLA,
|
||||||
D40_DREG_PRMSE,
|
D40_DREG_PRMSE,
|
||||||
|
@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
|
||||||
|
|
||||||
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
|
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
|
||||||
|
|
||||||
static u32 d40_backup_regs_chan[] = {
|
static __maybe_unused u32 d40_backup_regs_chan[] = {
|
||||||
D40_CHAN_REG_SSCFG,
|
D40_CHAN_REG_SSCFG,
|
||||||
D40_CHAN_REG_SSELT,
|
D40_CHAN_REG_SSELT,
|
||||||
D40_CHAN_REG_SSPTR,
|
D40_CHAN_REG_SSPTR,
|
||||||
|
|
|
@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
|
||||||
|
|
||||||
chan = &dmadev->chan[id];
|
chan = &dmadev->chan[id];
|
||||||
if (!chan) {
|
if (!chan) {
|
||||||
dev_err(chan2dev(chan), "MDMA channel not initialized\n");
|
dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
|
||||||
return chan;
|
return chan;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tegra_adma_runtime_suspend(struct device *dev)
|
static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct tegra_adma *tdma = dev_get_drvdata(dev);
|
struct tegra_adma *tdma = dev_get_drvdata(dev);
|
||||||
struct tegra_adma_chan_regs *ch_reg;
|
struct tegra_adma_chan_regs *ch_reg;
|
||||||
|
@ -744,7 +744,7 @@ clk_disable:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tegra_adma_runtime_resume(struct device *dev)
|
static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct tegra_adma *tdma = dev_get_drvdata(dev);
|
struct tegra_adma *tdma = dev_get_drvdata(dev);
|
||||||
struct tegra_adma_chan_regs *ch_reg;
|
struct tegra_adma_chan_regs *ch_reg;
|
||||||
|
|
|
@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
|
||||||
if (src_icg) {
|
if (src_icg) {
|
||||||
d->ccr |= CCR_SRC_AMODE_DBLIDX;
|
d->ccr |= CCR_SRC_AMODE_DBLIDX;
|
||||||
d->ei = 1;
|
d->ei = 1;
|
||||||
d->fi = src_icg;
|
d->fi = src_icg + 1;
|
||||||
} else if (xt->src_inc) {
|
} else if (xt->src_inc) {
|
||||||
d->ccr |= CCR_SRC_AMODE_POSTINC;
|
d->ccr |= CCR_SRC_AMODE_POSTINC;
|
||||||
d->fi = 0;
|
d->fi = 0;
|
||||||
|
@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
|
||||||
if (dst_icg) {
|
if (dst_icg) {
|
||||||
d->ccr |= CCR_DST_AMODE_DBLIDX;
|
d->ccr |= CCR_DST_AMODE_DBLIDX;
|
||||||
sg->ei = 1;
|
sg->ei = 1;
|
||||||
sg->fi = dst_icg;
|
sg->fi = dst_icg + 1;
|
||||||
} else if (xt->dst_inc) {
|
} else if (xt->dst_inc) {
|
||||||
d->ccr |= CCR_DST_AMODE_POSTINC;
|
d->ccr |= CCR_DST_AMODE_POSTINC;
|
||||||
sg->fi = 0;
|
sg->fi = 0;
|
||||||
|
|
|
@ -927,17 +927,33 @@ fail:
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define GET_EFI_CONFIG_TABLE(bits) \
|
||||||
|
static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
|
||||||
|
efi_guid_t guid) \
|
||||||
|
{ \
|
||||||
|
efi_system_table_##bits##_t *sys_table; \
|
||||||
|
efi_config_table_##bits##_t *tables; \
|
||||||
|
int i; \
|
||||||
|
\
|
||||||
|
sys_table = (typeof(sys_table))_sys_table; \
|
||||||
|
tables = (typeof(tables))(unsigned long)sys_table->tables; \
|
||||||
|
\
|
||||||
|
for (i = 0; i < sys_table->nr_tables; i++) { \
|
||||||
|
if (efi_guidcmp(tables[i].guid, guid) != 0) \
|
||||||
|
continue; \
|
||||||
|
\
|
||||||
|
return (void *)(unsigned long)tables[i].table; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
return NULL; \
|
||||||
|
}
|
||||||
|
GET_EFI_CONFIG_TABLE(32)
|
||||||
|
GET_EFI_CONFIG_TABLE(64)
|
||||||
|
|
||||||
void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
|
void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
|
||||||
{
|
{
|
||||||
efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables;
|
if (efi_is_64bit())
|
||||||
int i;
|
return get_efi_config_table64(sys_table, guid);
|
||||||
|
else
|
||||||
for (i = 0; i < sys_table->nr_tables; i++) {
|
return get_efi_config_table32(sys_table, guid);
|
||||||
if (efi_guidcmp(tables[i].guid, guid) != 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
return (void *)tables[i].table;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4869,7 +4869,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
|
||||||
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
|
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
|
||||||
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
|
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
|
||||||
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
|
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
|
||||||
WREG32(mmSQ_CMD, value);
|
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
|
||||||
#include "dm_services.h"
|
#include "dm_services.h"
|
||||||
|
|
||||||
|
@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||||
|
|
||||||
struct dc_state *dc_create_state(struct dc *dc)
|
struct dc_state *dc_create_state(struct dc *dc)
|
||||||
{
|
{
|
||||||
struct dc_state *context = kzalloc(sizeof(struct dc_state),
|
struct dc_state *context = kvzalloc(sizeof(struct dc_state),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!context)
|
if (!context)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
|
||||||
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
struct dc_state *new_ctx = kmemdup(src_ctx,
|
struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
|
||||||
sizeof(struct dc_state), GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!new_ctx)
|
if (!new_ctx)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
|
||||||
|
|
||||||
for (i = 0; i < MAX_PIPES; i++) {
|
for (i = 0; i < MAX_PIPES; i++) {
|
||||||
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
|
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
|
||||||
|
@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct dc_state *context = container_of(kref, struct dc_state, refcount);
|
struct dc_state *context = container_of(kref, struct dc_state, refcount);
|
||||||
dc_resource_state_destruct(context);
|
dc_resource_state_destruct(context);
|
||||||
kfree(context);
|
kvfree(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dc_release_state(struct dc_state *context)
|
void dc_release_state(struct dc_state *context)
|
||||||
|
|
|
@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||||
|
|
||||||
|
|
||||||
/* Enable extended register access */
|
/* Enable extended register access */
|
||||||
ast_enable_mmio(dev);
|
|
||||||
ast_open_key(ast);
|
ast_open_key(ast);
|
||||||
|
ast_enable_mmio(dev);
|
||||||
|
|
||||||
/* Find out whether P2A works or whether to use device-tree */
|
/* Find out whether P2A works or whether to use device-tree */
|
||||||
ast_detect_config_mode(dev, &scu_rev);
|
ast_detect_config_mode(dev, &scu_rev);
|
||||||
|
@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct ast_private *ast = dev->dev_private;
|
struct ast_private *ast = dev->dev_private;
|
||||||
|
|
||||||
|
/* enable standard VGA decode */
|
||||||
|
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
|
||||||
|
|
||||||
ast_release_firmware(dev);
|
ast_release_firmware(dev);
|
||||||
kfree(ast->dp501_fw_addr);
|
kfree(ast->dp501_fw_addr);
|
||||||
ast_mode_fini(dev);
|
ast_mode_fini(dev);
|
||||||
|
|
|
@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ast_open_key(ast);
|
ast_open_key(ast);
|
||||||
|
|
||||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
|
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
|
||||||
|
|
||||||
ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
|
ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
|
||||||
ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
|
ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
|
||||||
|
|
|
@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct ast_private *ast = dev->dev_private;
|
struct ast_private *ast = dev->dev_private;
|
||||||
|
|
||||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
|
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1528,9 +1528,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
||||||
if (!intel_gvt_ggtt_validate_range(vgpu,
|
if (!intel_gvt_ggtt_validate_range(vgpu,
|
||||||
workload->wa_ctx.indirect_ctx.guest_gma,
|
workload->wa_ctx.indirect_ctx.guest_gma,
|
||||||
workload->wa_ctx.indirect_ctx.size)) {
|
workload->wa_ctx.indirect_ctx.size)) {
|
||||||
kmem_cache_free(s->workloads, workload);
|
|
||||||
gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
|
gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
|
||||||
workload->wa_ctx.indirect_ctx.guest_gma);
|
workload->wa_ctx.indirect_ctx.guest_gma);
|
||||||
|
kmem_cache_free(s->workloads, workload);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1542,9 +1542,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
||||||
if (!intel_gvt_ggtt_validate_range(vgpu,
|
if (!intel_gvt_ggtt_validate_range(vgpu,
|
||||||
workload->wa_ctx.per_ctx.guest_gma,
|
workload->wa_ctx.per_ctx.guest_gma,
|
||||||
CACHELINE_BYTES)) {
|
CACHELINE_BYTES)) {
|
||||||
kmem_cache_free(s->workloads, workload);
|
|
||||||
gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
|
gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
|
||||||
workload->wa_ctx.per_ctx.guest_gma);
|
workload->wa_ctx.per_ctx.guest_gma);
|
||||||
|
kmem_cache_free(s->workloads, workload);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
|
||||||
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
||||||
int slots;
|
int slots;
|
||||||
|
|
||||||
/* When restoring duplicated states, we need to make sure that the
|
if (crtc_state->mode_changed || crtc_state->connectors_changed) {
|
||||||
* bw remains the same and avoid recalculating it, as the connector's
|
/*
|
||||||
* bpc may have changed after the state was duplicated
|
* When restoring duplicated states, we need to make sure that
|
||||||
*/
|
* the bw remains the same and avoid recalculating it, as the
|
||||||
if (!state->duplicated)
|
* connector's bpc may have changed after the state was
|
||||||
asyh->dp.pbn =
|
* duplicated
|
||||||
drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
|
*/
|
||||||
connector->display_info.bpc * 3);
|
if (!state->duplicated) {
|
||||||
|
const int bpp = connector->display_info.bpc * 3;
|
||||||
|
const int clock = crtc_state->adjusted_mode.clock;
|
||||||
|
|
||||||
|
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
|
||||||
|
}
|
||||||
|
|
||||||
if (crtc_state->mode_changed) {
|
|
||||||
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
|
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
|
||||||
mstc->port,
|
mstc->port,
|
||||||
asyh->dp.pbn);
|
asyh->dp.pbn);
|
||||||
|
|
|
@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
|
||||||
rmb(); /* for list_empty to work without lock */
|
rmb(); /* for list_empty to work without lock */
|
||||||
|
|
||||||
if (list_empty(&entity->list) ||
|
if (list_empty(&entity->list) ||
|
||||||
spsc_queue_peek(&entity->job_queue) == NULL)
|
spsc_queue_count(&entity->job_queue) == 0)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
|
||||||
/* Consumption of existing IBs wasn't completed. Forcefully
|
/* Consumption of existing IBs wasn't completed. Forcefully
|
||||||
* remove them here.
|
* remove them here.
|
||||||
*/
|
*/
|
||||||
if (spsc_queue_peek(&entity->job_queue)) {
|
if (spsc_queue_count(&entity->job_queue)) {
|
||||||
if (sched) {
|
if (sched) {
|
||||||
/* Park the kernel for a moment to make sure it isn't processing
|
/* Park the kernel for a moment to make sure it isn't processing
|
||||||
* our enity.
|
* our enity.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_SYSTEM hyperv
|
#define TRACE_SYSTEM hyperv
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
/*
|
/*
|
||||||
* Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
|
* Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
|
||||||
*
|
*
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
/*
|
/*
|
||||||
* Intel(R) Trace Hub PTI output data structures
|
* Intel(R) Trace Hub PTI output data structures
|
||||||
*
|
*
|
||||||
|
|
|
@ -69,6 +69,7 @@ struct em_i2c_device {
|
||||||
struct completion msg_done;
|
struct completion msg_done;
|
||||||
struct clk *sclk;
|
struct clk *sclk;
|
||||||
struct i2c_client *slave;
|
struct i2c_client *slave;
|
||||||
|
int irq;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
|
static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
|
||||||
|
@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
|
||||||
|
|
||||||
writeb(0, priv->base + I2C_OFS_SVA0);
|
writeb(0, priv->base + I2C_OFS_SVA0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait for interrupt to finish. New slave irqs cannot happen because we
|
||||||
|
* cleared the slave address and, thus, only extension codes will be
|
||||||
|
* detected which do not use the slave ptr.
|
||||||
|
*/
|
||||||
|
synchronize_irq(priv->irq);
|
||||||
priv->slave = NULL;
|
priv->slave = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct em_i2c_device *priv;
|
struct em_i2c_device *priv;
|
||||||
struct resource *r;
|
struct resource *r;
|
||||||
int irq, ret;
|
int ret;
|
||||||
|
|
||||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||||
if (!priv)
|
if (!priv)
|
||||||
|
@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
em_i2c_reset(&priv->adap);
|
em_i2c_reset(&priv->adap);
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
priv->irq = platform_get_irq(pdev, 0);
|
||||||
ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
|
ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
|
||||||
"em_i2c", priv);
|
"em_i2c", priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_clk;
|
goto err_clk;
|
||||||
|
@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_clk;
|
goto err_clk;
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
|
dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
|
||||||
|
priv->irq);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Functions for DMA support */
|
/* Functions for DMA support */
|
||||||
static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
|
static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
|
||||||
dma_addr_t phy_addr)
|
dma_addr_t phy_addr)
|
||||||
{
|
{
|
||||||
struct imx_i2c_dma *dma;
|
struct imx_i2c_dma *dma;
|
||||||
struct dma_slave_config dma_sconfig;
|
struct dma_slave_config dma_sconfig;
|
||||||
|
@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
|
||||||
|
|
||||||
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
|
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
|
||||||
if (!dma)
|
if (!dma)
|
||||||
return -ENOMEM;
|
return;
|
||||||
|
|
||||||
dma->chan_tx = dma_request_chan(dev, "tx");
|
dma->chan_tx = dma_request_chan(dev, "tx");
|
||||||
if (IS_ERR(dma->chan_tx)) {
|
if (IS_ERR(dma->chan_tx)) {
|
||||||
|
@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
|
||||||
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
|
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
|
||||||
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
|
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
|
||||||
|
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
fail_rx:
|
fail_rx:
|
||||||
dma_release_channel(dma->chan_rx);
|
dma_release_channel(dma->chan_rx);
|
||||||
|
@ -336,8 +336,6 @@ fail_tx:
|
||||||
dma_release_channel(dma->chan_tx);
|
dma_release_channel(dma->chan_tx);
|
||||||
fail_al:
|
fail_al:
|
||||||
devm_kfree(dev, dma);
|
devm_kfree(dev, dma);
|
||||||
/* return successfully if there is no dma support */
|
|
||||||
return ret == -ENODEV ? 0 : ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i2c_imx_dma_callback(void *arg)
|
static void i2c_imx_dma_callback(void *arg)
|
||||||
|
@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
||||||
dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
|
dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
|
||||||
dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
|
dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
|
||||||
i2c_imx->adapter.name);
|
i2c_imx->adapter.name);
|
||||||
|
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
|
||||||
|
|
||||||
/* Init DMA config if supported */
|
/* Init DMA config if supported */
|
||||||
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
|
i2c_imx_dma_request(i2c_imx, phy_addr);
|
||||||
if (ret < 0)
|
|
||||||
goto del_adapter;
|
|
||||||
|
|
||||||
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
|
|
||||||
return 0; /* Return OK */
|
return 0; /* Return OK */
|
||||||
|
|
||||||
del_adapter:
|
|
||||||
i2c_del_adapter(&i2c_imx->adapter);
|
|
||||||
clk_notifier_unregister:
|
clk_notifier_unregister:
|
||||||
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
|
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
|
||||||
rpm_disable:
|
rpm_disable:
|
||||||
|
|
|
@ -139,6 +139,7 @@ struct rcar_i2c_priv {
|
||||||
enum dma_data_direction dma_direction;
|
enum dma_data_direction dma_direction;
|
||||||
|
|
||||||
struct reset_control *rstc;
|
struct reset_control *rstc;
|
||||||
|
int irq;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
|
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
|
||||||
|
@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
|
||||||
|
|
||||||
WARN_ON(!priv->slave);
|
WARN_ON(!priv->slave);
|
||||||
|
|
||||||
|
/* disable irqs and ensure none is running before clearing ptr */
|
||||||
rcar_i2c_write(priv, ICSIER, 0);
|
rcar_i2c_write(priv, ICSIER, 0);
|
||||||
rcar_i2c_write(priv, ICSCR, 0);
|
rcar_i2c_write(priv, ICSCR, 0);
|
||||||
|
|
||||||
|
synchronize_irq(priv->irq);
|
||||||
priv->slave = NULL;
|
priv->slave = NULL;
|
||||||
|
|
||||||
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
|
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
|
||||||
|
@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
|
||||||
struct i2c_adapter *adap;
|
struct i2c_adapter *adap;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct i2c_timings i2c_t;
|
struct i2c_timings i2c_t;
|
||||||
int irq, ret;
|
int ret;
|
||||||
|
|
||||||
/* Otherwise logic will break because some bytes must always use PIO */
|
/* Otherwise logic will break because some bytes must always use PIO */
|
||||||
BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
|
BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
|
||||||
|
@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
|
||||||
pm_runtime_put(dev);
|
pm_runtime_put(dev);
|
||||||
|
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
priv->irq = platform_get_irq(pdev, 0);
|
||||||
ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv);
|
ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(dev, "cannot get irq %d\n", irq);
|
dev_err(dev, "cannot get irq %d\n", priv->irq);
|
||||||
goto out_pm_disable;
|
goto out_pm_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
/*
|
/*
|
||||||
* i2c-stm32.h
|
* i2c-stm32.h
|
||||||
*
|
*
|
||||||
|
|
|
@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
regval = ret & MAX9611_TEMP_MASK;
|
regval &= MAX9611_TEMP_MASK;
|
||||||
|
|
||||||
if ((regval > MAX9611_TEMP_MAX_POS &&
|
if ((regval > MAX9611_TEMP_MAX_POS &&
|
||||||
regval < MAX9611_TEMP_MIN_NEG) ||
|
regval < MAX9611_TEMP_MIN_NEG) ||
|
||||||
|
|
|
@ -276,11 +276,11 @@ static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq,
|
||||||
st->buf[0] = st->integer >> 8;
|
st->buf[0] = st->integer >> 8;
|
||||||
st->buf[1] = 0x40; /* REG12 default */
|
st->buf[1] = 0x40; /* REG12 default */
|
||||||
st->buf[2] = 0x00;
|
st->buf[2] = 0x00;
|
||||||
st->buf[3] = st->fract2 & 0xFF;
|
st->buf[3] = st->fract1 & 0xFF;
|
||||||
st->buf[4] = st->fract2 >> 7;
|
st->buf[4] = st->fract1 >> 8;
|
||||||
st->buf[5] = st->fract2 >> 15;
|
st->buf[5] = st->fract1 >> 16;
|
||||||
st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
|
st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
|
||||||
ADF4371_FRAC1WORD(st->fract1 >> 23);
|
ADF4371_FRAC1WORD(st->fract1 >> 24);
|
||||||
st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
|
st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
|
||||||
st->buf[8] = st->mod2 & 0xFF;
|
st->buf[8] = st->mod2 & 0xFF;
|
||||||
st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
|
st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
|
||||||
|
|
|
@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
port_counter = &dev->port_data[port].port_counter;
|
port_counter = &dev->port_data[port].port_counter;
|
||||||
|
if (!port_counter->hstats)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
mutex_lock(&port_counter->lock);
|
mutex_lock(&port_counter->lock);
|
||||||
if (on) {
|
if (on) {
|
||||||
ret = __counter_set_mode(&port_counter->mode,
|
ret = __counter_set_mode(&port_counter->mode,
|
||||||
|
@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
|
||||||
if (!rdma_is_port_valid(dev, port))
|
if (!rdma_is_port_valid(dev, port))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!dev->port_data[port].port_counter.hstats)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
qp = rdma_counter_get_qp(dev, qp_num);
|
qp = rdma_counter_get_qp(dev, qp_num);
|
||||||
if (!qp)
|
if (!qp)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
|
@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
|
|
||||||
if (fill_nldev_handle(msg, device) ||
|
if (fill_nldev_handle(msg, device) ||
|
||||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
|
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
|
||||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
|
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
|
||||||
|
ret = -EMSGSIZE;
|
||||||
goto err_msg;
|
goto err_msg;
|
||||||
|
}
|
||||||
|
|
||||||
if ((mode == RDMA_COUNTER_MODE_AUTO) &&
|
if ((mode == RDMA_COUNTER_MODE_AUTO) &&
|
||||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
|
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
|
||||||
|
ret = -EMSGSIZE;
|
||||||
goto err_msg;
|
goto err_msg;
|
||||||
|
}
|
||||||
|
|
||||||
nlmsg_end(msg, nlh);
|
nlmsg_end(msg, nlh);
|
||||||
ib_device_put(device);
|
ib_device_put(device);
|
||||||
|
|
|
@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
|
||||||
* prevent any further fault handling on this MR.
|
* prevent any further fault handling on this MR.
|
||||||
*/
|
*/
|
||||||
ib_umem_notifier_start_account(umem_odp);
|
ib_umem_notifier_start_account(umem_odp);
|
||||||
umem_odp->dying = 1;
|
|
||||||
/* Make sure that the fact the umem is dying is out before we release
|
|
||||||
* all pending page faults. */
|
|
||||||
smp_wmb();
|
|
||||||
complete_all(&umem_odp->notifier_completion);
|
complete_all(&umem_odp->notifier_completion);
|
||||||
umem_odp->umem.context->invalidate_range(
|
umem_odp->umem.context->invalidate_range(
|
||||||
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
|
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
|
||||||
|
|
|
@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
|
||||||
event_sub->eventfd =
|
event_sub->eventfd =
|
||||||
eventfd_ctx_fdget(redirect_fd);
|
eventfd_ctx_fdget(redirect_fd);
|
||||||
|
|
||||||
if (IS_ERR(event_sub)) {
|
if (IS_ERR(event_sub->eventfd)) {
|
||||||
err = PTR_ERR(event_sub->eventfd);
|
err = PTR_ERR(event_sub->eventfd);
|
||||||
event_sub->eventfd = NULL;
|
event_sub->eventfd = NULL;
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
|
||||||
struct devx_async_event_file *ev_file = filp->private_data;
|
struct devx_async_event_file *ev_file = filp->private_data;
|
||||||
struct devx_event_subscription *event_sub, *event_sub_tmp;
|
struct devx_event_subscription *event_sub, *event_sub_tmp;
|
||||||
struct devx_async_event_data *entry, *tmp;
|
struct devx_async_event_data *entry, *tmp;
|
||||||
|
struct mlx5_ib_dev *dev = ev_file->dev;
|
||||||
|
|
||||||
mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
|
mutex_lock(&dev->devx_event_table.event_xa_lock);
|
||||||
/* delete the subscriptions which are related to this FD */
|
/* delete the subscriptions which are related to this FD */
|
||||||
list_for_each_entry_safe(event_sub, event_sub_tmp,
|
list_for_each_entry_safe(event_sub, event_sub_tmp,
|
||||||
&ev_file->subscribed_events_list, file_list) {
|
&ev_file->subscribed_events_list, file_list) {
|
||||||
devx_cleanup_subscription(ev_file->dev, event_sub);
|
devx_cleanup_subscription(dev, event_sub);
|
||||||
if (event_sub->eventfd)
|
if (event_sub->eventfd)
|
||||||
eventfd_ctx_put(event_sub->eventfd);
|
eventfd_ctx_put(event_sub->eventfd);
|
||||||
|
|
||||||
|
@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
|
||||||
kfree_rcu(event_sub, rcu);
|
kfree_rcu(event_sub, rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
|
mutex_unlock(&dev->devx_event_table.event_xa_lock);
|
||||||
|
|
||||||
/* free the pending events allocation */
|
/* free the pending events allocation */
|
||||||
if (!ev_file->omit_data) {
|
if (!ev_file->omit_data) {
|
||||||
|
@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
|
||||||
}
|
}
|
||||||
|
|
||||||
uverbs_close_fd(filp);
|
uverbs_close_fd(filp);
|
||||||
put_device(&ev_file->dev->ib_dev.dev);
|
put_device(&dev->ib_dev.dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||||
u32 flags)
|
u32 flags)
|
||||||
{
|
{
|
||||||
int npages = 0, current_seq, page_shift, ret, np;
|
int npages = 0, current_seq, page_shift, ret, np;
|
||||||
bool implicit = false;
|
|
||||||
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
|
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
|
||||||
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
|
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
|
||||||
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
|
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
|
||||||
|
@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||||
if (IS_ERR(odp))
|
if (IS_ERR(odp))
|
||||||
return PTR_ERR(odp);
|
return PTR_ERR(odp);
|
||||||
mr = odp->private;
|
mr = odp->private;
|
||||||
implicit = true;
|
|
||||||
} else {
|
} else {
|
||||||
odp = odp_mr;
|
odp = odp_mr;
|
||||||
}
|
}
|
||||||
|
@ -682,19 +680,15 @@ next_mr:
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
if (implicit || !odp->dying) {
|
unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
|
||||||
unsigned long timeout =
|
|
||||||
msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
|
|
||||||
|
|
||||||
if (!wait_for_completion_timeout(
|
if (!wait_for_completion_timeout(&odp->notifier_completion,
|
||||||
&odp->notifier_completion,
|
timeout)) {
|
||||||
timeout)) {
|
mlx5_ib_warn(
|
||||||
mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
|
dev,
|
||||||
current_seq, odp->notifiers_seq, odp->notifiers_count);
|
"timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
|
||||||
}
|
current_seq, odp->notifiers_seq,
|
||||||
} else {
|
odp->notifiers_count);
|
||||||
/* The MR is being killed, kill the QP as well. */
|
|
||||||
ret = -EFAULT;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
config RDMA_SIW
|
config RDMA_SIW
|
||||||
tristate "Software RDMA over TCP/IP (iWARP) driver"
|
tristate "Software RDMA over TCP/IP (iWARP) driver"
|
||||||
depends on INET && INFINIBAND && LIBCRC32C && 64BIT
|
depends on INET && INFINIBAND && LIBCRC32C
|
||||||
select DMA_VIRT_OPS
|
select DMA_VIRT_OPS
|
||||||
help
|
help
|
||||||
This driver implements the iWARP RDMA transport over
|
This driver implements the iWARP RDMA transport over
|
||||||
|
|
|
@ -214,7 +214,7 @@ struct siw_wqe {
|
||||||
struct siw_cq {
|
struct siw_cq {
|
||||||
struct ib_cq base_cq;
|
struct ib_cq base_cq;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
u64 *notify;
|
struct siw_cq_ctrl *notify;
|
||||||
struct siw_cqe *queue;
|
struct siw_cqe *queue;
|
||||||
u32 cq_put;
|
u32 cq_put;
|
||||||
u32 cq_get;
|
u32 cq_get;
|
||||||
|
|
|
@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
siw_cpu_info.num_nodes = 0;
|
siw_cpu_info.num_nodes = 0;
|
||||||
while (i) {
|
while (--i >= 0)
|
||||||
kfree(siw_cpu_info.tx_valid_cpus[i]);
|
kfree(siw_cpu_info.tx_valid_cpus[i]);
|
||||||
siw_cpu_info.tx_valid_cpus[i--] = NULL;
|
|
||||||
}
|
|
||||||
kfree(siw_cpu_info.tx_valid_cpus);
|
kfree(siw_cpu_info.tx_valid_cpus);
|
||||||
siw_cpu_info.tx_valid_cpus = NULL;
|
siw_cpu_info.tx_valid_cpus = NULL;
|
||||||
|
|
||||||
|
|
|
@ -1013,18 +1013,24 @@ out:
|
||||||
*/
|
*/
|
||||||
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
|
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
|
||||||
{
|
{
|
||||||
u64 cq_notify;
|
u32 cq_notify;
|
||||||
|
|
||||||
if (!cq->base_cq.comp_handler)
|
if (!cq->base_cq.comp_handler)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
cq_notify = READ_ONCE(*cq->notify);
|
/* Read application shared notification state */
|
||||||
|
cq_notify = READ_ONCE(cq->notify->flags);
|
||||||
|
|
||||||
if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
|
if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
|
||||||
((cq_notify & SIW_NOTIFY_SOLICITED) &&
|
((cq_notify & SIW_NOTIFY_SOLICITED) &&
|
||||||
(flags & SIW_WQE_SOLICITED))) {
|
(flags & SIW_WQE_SOLICITED))) {
|
||||||
/* dis-arm CQ */
|
/*
|
||||||
smp_store_mb(*cq->notify, SIW_NOTIFY_NOT);
|
* CQ notification is one-shot: Since the
|
||||||
|
* current CQE causes user notification,
|
||||||
|
* the CQ gets dis-aremd and must be re-aremd
|
||||||
|
* by the user for a new notification.
|
||||||
|
*/
|
||||||
|
WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
|
||||||
|
|
||||||
spin_lock_init(&cq->lock);
|
spin_lock_init(&cq->lock);
|
||||||
|
|
||||||
cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify;
|
cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
|
||||||
|
|
||||||
if (udata) {
|
if (udata) {
|
||||||
struct siw_uresp_create_cq uresp = {};
|
struct siw_uresp_create_cq uresp = {};
|
||||||
|
@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
|
||||||
siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
|
siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
|
||||||
|
|
||||||
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
|
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
|
||||||
/* CQ event for next solicited completion */
|
/*
|
||||||
smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED);
|
* Enable CQ event for next solicited completion.
|
||||||
|
* and make it visible to all associated producers.
|
||||||
|
*/
|
||||||
|
smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
|
||||||
else
|
else
|
||||||
/* CQ event for any signalled completion */
|
/*
|
||||||
smp_store_mb(*cq->notify, SIW_NOTIFY_ALL);
|
* Enable CQ event for any signalled completion.
|
||||||
|
* and make it visible to all associated producers.
|
||||||
|
*/
|
||||||
|
smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
|
||||||
|
|
||||||
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
|
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
|
||||||
return cq->cq_put - cq->cq_get;
|
return cq->cq_put - cq->cq_get;
|
||||||
|
|
|
@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
|
||||||
ste_live = true;
|
ste_live = true;
|
||||||
break;
|
break;
|
||||||
case STRTAB_STE_0_CFG_ABORT:
|
case STRTAB_STE_0_CFG_ABORT:
|
||||||
if (disable_bypass)
|
BUG_ON(!disable_bypass);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG(); /* STE corruption */
|
BUG(); /* STE corruption */
|
||||||
}
|
}
|
||||||
|
|
|
@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||||
size_t iova_off = 0;
|
struct iova_domain *iovad = &cookie->iovad;
|
||||||
|
size_t iova_off = iova_offset(iovad, phys);
|
||||||
dma_addr_t iova;
|
dma_addr_t iova;
|
||||||
|
|
||||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
|
size = iova_align(iovad, size + iova_off);
|
||||||
iova_off = iova_offset(&cookie->iovad, phys);
|
|
||||||
size = iova_align(&cookie->iovad, size + iova_off);
|
|
||||||
}
|
|
||||||
|
|
||||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||||
if (!iova)
|
if (!iova)
|
||||||
|
@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||||
struct iova_domain *iovad = &cookie->iovad;
|
struct iova_domain *iovad = &cookie->iovad;
|
||||||
bool coherent = dev_is_dma_coherent(dev);
|
bool coherent = dev_is_dma_coherent(dev);
|
||||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
|
||||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
struct sg_table sgt;
|
struct sg_table sgt;
|
||||||
|
@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
* - and wouldn't make the resulting output segment too long
|
* - and wouldn't make the resulting output segment too long
|
||||||
*/
|
*/
|
||||||
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
|
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
|
||||||
(cur_len + s_length <= max_len)) {
|
(max_len - cur_len >= s_length)) {
|
||||||
/* ...then concatenate it with the previous one */
|
/* ...then concatenate it with the previous one */
|
||||||
cur_len += s_length;
|
cur_len += s_length;
|
||||||
} else {
|
} else {
|
||||||
|
@ -975,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
|
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
|
||||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
|
||||||
|
|
||||||
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
|
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
|
||||||
VM_USERMAP, prot, __builtin_return_address(0));
|
VM_USERMAP, prot, __builtin_return_address(0));
|
||||||
|
@ -1035,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
unsigned long pfn, off = vma->vm_pgoff;
|
unsigned long pfn, off = vma->vm_pgoff;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
|
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
|
||||||
|
|
||||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||||
if (!msi_page)
|
if (!msi_page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
iova = __iommu_dma_map(dev, msi_addr, size, prot);
|
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||||
if (iova == DMA_MAPPING_ERROR)
|
if (!iova)
|
||||||
goto out_free_page;
|
goto out_free_page;
|
||||||
|
|
||||||
|
if (iommu_map(domain, iova, msi_addr, size, prot))
|
||||||
|
goto out_free_iova;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&msi_page->list);
|
INIT_LIST_HEAD(&msi_page->list);
|
||||||
msi_page->phys = msi_addr;
|
msi_page->phys = msi_addr;
|
||||||
msi_page->iova = iova;
|
msi_page->iova = iova;
|
||||||
list_add(&msi_page->list, &cookie->msi_page_list);
|
list_add(&msi_page->list, &cookie->msi_page_list);
|
||||||
return msi_page;
|
return msi_page;
|
||||||
|
|
||||||
|
out_free_iova:
|
||||||
|
iommu_dma_free_iova(cookie, iova, size);
|
||||||
out_free_page:
|
out_free_page:
|
||||||
kfree(msi_page);
|
kfree(msi_page);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
|
||||||
tbl_wlk.ctx_entry = context;
|
tbl_wlk.ctx_entry = context;
|
||||||
m->private = &tbl_wlk;
|
m->private = &tbl_wlk;
|
||||||
|
|
||||||
if (pasid_supported(iommu) && is_pasid_enabled(context)) {
|
if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
|
||||||
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
|
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
|
||||||
pasid_dir_size = get_pasid_dir_size(context);
|
pasid_dir_size = get_pasid_dir_size(context);
|
||||||
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
|
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
|
||||||
|
|
|
@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev)
|
||||||
dmar_domain = to_dmar_domain(domain);
|
dmar_domain = to_dmar_domain(domain);
|
||||||
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
|
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
|
||||||
}
|
}
|
||||||
|
dmar_remove_one_dev_info(dev);
|
||||||
get_private_domain_for_dev(dev);
|
get_private_domain_for_dev(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4790,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
|
||||||
|
|
||||||
/* free the private domain */
|
/* free the private domain */
|
||||||
if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
|
if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
|
||||||
!(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
|
!(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
|
||||||
|
list_empty(&domain->devices))
|
||||||
domain_exit(info->domain);
|
domain_exit(info->domain);
|
||||||
|
|
||||||
free_devinfo_mem(info);
|
free_devinfo_mem(info);
|
||||||
|
@ -4803,7 +4805,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
|
||||||
|
|
||||||
spin_lock_irqsave(&device_domain_lock, flags);
|
spin_lock_irqsave(&device_domain_lock, flags);
|
||||||
info = dev->archdata.iommu;
|
info = dev->archdata.iommu;
|
||||||
__dmar_remove_one_dev_info(info);
|
if (info)
|
||||||
|
__dmar_remove_one_dev_info(info);
|
||||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5281,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev)
|
||||||
if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
|
if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
|
||||||
ret = iommu_request_dm_for_dev(dev);
|
ret = iommu_request_dm_for_dev(dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
dmar_remove_one_dev_info(dev);
|
||||||
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
|
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
|
||||||
domain_add_dev_info(si_domain, dev);
|
domain_add_dev_info(si_domain, dev);
|
||||||
dev_info(dev,
|
dev_info(dev,
|
||||||
|
@ -5291,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev)
|
||||||
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
|
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
|
||||||
ret = iommu_request_dma_domain_for_dev(dev);
|
ret = iommu_request_dma_domain_for_dev(dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
dmar_remove_one_dev_info(dev);
|
||||||
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
|
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
|
||||||
if (!get_private_domain_for_dev(dev)) {
|
if (!get_private_domain_for_dev(dev)) {
|
||||||
dev_warn(dev,
|
dev_warn(dev,
|
||||||
|
@ -5316,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev)
|
||||||
if (!iommu)
|
if (!iommu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
dmar_remove_one_dev_info(dev);
|
||||||
|
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
|
|
||||||
iommu_device_unlink(&iommu->iommu, dev);
|
iommu_device_unlink(&iommu->iommu, dev);
|
||||||
|
|
|
@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
pixsize = vout->bpp * vout->vrfb_bpp;
|
pixsize = vout->bpp * vout->vrfb_bpp;
|
||||||
dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
|
dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
|
||||||
(vout->pix.width * vout->bpp)) + 1;
|
|
||||||
|
|
||||||
xt->src_start = vout->buf_phy_addr[vb->i];
|
xt->src_start = vout->buf_phy_addr[vb->i];
|
||||||
xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
|
xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
|
||||||
|
|
|
@ -456,6 +456,7 @@ config PCI_ENDPOINT_TEST
|
||||||
|
|
||||||
config XILINX_SDFEC
|
config XILINX_SDFEC
|
||||||
tristate "Xilinx SDFEC 16"
|
tristate "Xilinx SDFEC 16"
|
||||||
|
depends on HAS_IOMEM
|
||||||
help
|
help
|
||||||
This option enables support for the Xilinx SDFEC (Soft Decision
|
This option enables support for the Xilinx SDFEC (Soft Decision
|
||||||
Forward Error Correction) driver. This enables a char driver
|
Forward Error Correction) driver. This enables a char driver
|
||||||
|
|
|
@ -970,7 +970,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
|
||||||
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
|
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(hdev->dev, "failed to initialize kernel context\n");
|
dev_err(hdev->dev, "failed to initialize kernel context\n");
|
||||||
goto free_ctx;
|
kfree(hdev->kernel_ctx);
|
||||||
|
goto mmu_fini;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = hl_cb_pool_init(hdev);
|
rc = hl_cb_pool_init(hdev);
|
||||||
|
@ -1053,8 +1054,6 @@ release_ctx:
|
||||||
if (hl_ctx_put(hdev->kernel_ctx) != 1)
|
if (hl_ctx_put(hdev->kernel_ctx) != 1)
|
||||||
dev_err(hdev->dev,
|
dev_err(hdev->dev,
|
||||||
"kernel ctx is still alive on initialization failure\n");
|
"kernel ctx is still alive on initialization failure\n");
|
||||||
free_ctx:
|
|
||||||
kfree(hdev->kernel_ctx);
|
|
||||||
mmu_fini:
|
mmu_fini:
|
||||||
hl_mmu_fini(hdev);
|
hl_mmu_fini(hdev);
|
||||||
eq_fini:
|
eq_fini:
|
||||||
|
|
|
@ -2729,9 +2729,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
|
||||||
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
|
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
|
void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
|
||||||
{
|
{
|
||||||
/* Not needed in Goya */
|
/* The QMANs are on the SRAM so need to copy to IO space */
|
||||||
|
memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
|
static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
|
||||||
|
@ -3313,9 +3314,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
dev_dbg(hdev->dev, "DMA packet details:\n");
|
dev_dbg(hdev->dev, "DMA packet details:\n");
|
||||||
dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
|
dev_dbg(hdev->dev, "source == 0x%llx\n",
|
||||||
dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
|
le64_to_cpu(user_dma_pkt->src_addr));
|
||||||
dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
|
dev_dbg(hdev->dev, "destination == 0x%llx\n",
|
||||||
|
le64_to_cpu(user_dma_pkt->dst_addr));
|
||||||
|
dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
|
||||||
|
|
||||||
ctl = le32_to_cpu(user_dma_pkt->ctl);
|
ctl = le32_to_cpu(user_dma_pkt->ctl);
|
||||||
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
|
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
|
||||||
|
@ -3344,9 +3347,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
|
||||||
struct packet_lin_dma *user_dma_pkt)
|
struct packet_lin_dma *user_dma_pkt)
|
||||||
{
|
{
|
||||||
dev_dbg(hdev->dev, "DMA packet details:\n");
|
dev_dbg(hdev->dev, "DMA packet details:\n");
|
||||||
dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
|
dev_dbg(hdev->dev, "source == 0x%llx\n",
|
||||||
dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
|
le64_to_cpu(user_dma_pkt->src_addr));
|
||||||
dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
|
dev_dbg(hdev->dev, "destination == 0x%llx\n",
|
||||||
|
le64_to_cpu(user_dma_pkt->dst_addr));
|
||||||
|
dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WA for HW-23.
|
* WA for HW-23.
|
||||||
|
@ -3386,7 +3391,8 @@ static int goya_validate_wreg32(struct hl_device *hdev,
|
||||||
|
|
||||||
dev_dbg(hdev->dev, "WREG32 packet details:\n");
|
dev_dbg(hdev->dev, "WREG32 packet details:\n");
|
||||||
dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
|
dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
|
||||||
dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
|
dev_dbg(hdev->dev, "value == 0x%x\n",
|
||||||
|
le32_to_cpu(wreg_pkt->value));
|
||||||
|
|
||||||
if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
|
if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
|
||||||
dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
|
dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
|
||||||
|
@ -3428,12 +3434,13 @@ static int goya_validate_cb(struct hl_device *hdev,
|
||||||
while (cb_parsed_length < parser->user_cb_size) {
|
while (cb_parsed_length < parser->user_cb_size) {
|
||||||
enum packet_id pkt_id;
|
enum packet_id pkt_id;
|
||||||
u16 pkt_size;
|
u16 pkt_size;
|
||||||
void *user_pkt;
|
struct goya_packet *user_pkt;
|
||||||
|
|
||||||
user_pkt = (void *) (uintptr_t)
|
user_pkt = (struct goya_packet *) (uintptr_t)
|
||||||
(parser->user_cb->kernel_address + cb_parsed_length);
|
(parser->user_cb->kernel_address + cb_parsed_length);
|
||||||
|
|
||||||
pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
|
pkt_id = (enum packet_id) (
|
||||||
|
(le64_to_cpu(user_pkt->header) &
|
||||||
PACKET_HEADER_PACKET_ID_MASK) >>
|
PACKET_HEADER_PACKET_ID_MASK) >>
|
||||||
PACKET_HEADER_PACKET_ID_SHIFT);
|
PACKET_HEADER_PACKET_ID_SHIFT);
|
||||||
|
|
||||||
|
@ -3453,7 +3460,8 @@ static int goya_validate_cb(struct hl_device *hdev,
|
||||||
* need to validate here as well because patch_cb() is
|
* need to validate here as well because patch_cb() is
|
||||||
* not called in MMU path while this function is called
|
* not called in MMU path while this function is called
|
||||||
*/
|
*/
|
||||||
rc = goya_validate_wreg32(hdev, parser, user_pkt);
|
rc = goya_validate_wreg32(hdev,
|
||||||
|
parser, (struct packet_wreg32 *) user_pkt);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PACKET_WREG_BULK:
|
case PACKET_WREG_BULK:
|
||||||
|
@ -3481,10 +3489,10 @@ static int goya_validate_cb(struct hl_device *hdev,
|
||||||
case PACKET_LIN_DMA:
|
case PACKET_LIN_DMA:
|
||||||
if (is_mmu)
|
if (is_mmu)
|
||||||
rc = goya_validate_dma_pkt_mmu(hdev, parser,
|
rc = goya_validate_dma_pkt_mmu(hdev, parser,
|
||||||
user_pkt);
|
(struct packet_lin_dma *) user_pkt);
|
||||||
else
|
else
|
||||||
rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
|
rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
|
||||||
user_pkt);
|
(struct packet_lin_dma *) user_pkt);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PACKET_MSG_LONG:
|
case PACKET_MSG_LONG:
|
||||||
|
@ -3657,15 +3665,16 @@ static int goya_patch_cb(struct hl_device *hdev,
|
||||||
enum packet_id pkt_id;
|
enum packet_id pkt_id;
|
||||||
u16 pkt_size;
|
u16 pkt_size;
|
||||||
u32 new_pkt_size = 0;
|
u32 new_pkt_size = 0;
|
||||||
void *user_pkt, *kernel_pkt;
|
struct goya_packet *user_pkt, *kernel_pkt;
|
||||||
|
|
||||||
user_pkt = (void *) (uintptr_t)
|
user_pkt = (struct goya_packet *) (uintptr_t)
|
||||||
(parser->user_cb->kernel_address + cb_parsed_length);
|
(parser->user_cb->kernel_address + cb_parsed_length);
|
||||||
kernel_pkt = (void *) (uintptr_t)
|
kernel_pkt = (struct goya_packet *) (uintptr_t)
|
||||||
(parser->patched_cb->kernel_address +
|
(parser->patched_cb->kernel_address +
|
||||||
cb_patched_cur_length);
|
cb_patched_cur_length);
|
||||||
|
|
||||||
pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
|
pkt_id = (enum packet_id) (
|
||||||
|
(le64_to_cpu(user_pkt->header) &
|
||||||
PACKET_HEADER_PACKET_ID_MASK) >>
|
PACKET_HEADER_PACKET_ID_MASK) >>
|
||||||
PACKET_HEADER_PACKET_ID_SHIFT);
|
PACKET_HEADER_PACKET_ID_SHIFT);
|
||||||
|
|
||||||
|
@ -3680,15 +3689,18 @@ static int goya_patch_cb(struct hl_device *hdev,
|
||||||
|
|
||||||
switch (pkt_id) {
|
switch (pkt_id) {
|
||||||
case PACKET_LIN_DMA:
|
case PACKET_LIN_DMA:
|
||||||
rc = goya_patch_dma_packet(hdev, parser, user_pkt,
|
rc = goya_patch_dma_packet(hdev, parser,
|
||||||
kernel_pkt, &new_pkt_size);
|
(struct packet_lin_dma *) user_pkt,
|
||||||
|
(struct packet_lin_dma *) kernel_pkt,
|
||||||
|
&new_pkt_size);
|
||||||
cb_patched_cur_length += new_pkt_size;
|
cb_patched_cur_length += new_pkt_size;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PACKET_WREG_32:
|
case PACKET_WREG_32:
|
||||||
memcpy(kernel_pkt, user_pkt, pkt_size);
|
memcpy(kernel_pkt, user_pkt, pkt_size);
|
||||||
cb_patched_cur_length += pkt_size;
|
cb_patched_cur_length += pkt_size;
|
||||||
rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
|
rc = goya_validate_wreg32(hdev, parser,
|
||||||
|
(struct packet_wreg32 *) kernel_pkt);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PACKET_WREG_BULK:
|
case PACKET_WREG_BULK:
|
||||||
|
@ -4352,6 +4364,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
|
||||||
size_t total_pkt_size;
|
size_t total_pkt_size;
|
||||||
long result;
|
long result;
|
||||||
int rc;
|
int rc;
|
||||||
|
int irq_num_entries, irq_arr_index;
|
||||||
|
__le32 *goya_irq_arr;
|
||||||
|
|
||||||
total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
|
total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
|
||||||
irq_arr_size;
|
irq_arr_size;
|
||||||
|
@ -4369,8 +4383,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
|
||||||
if (!pkt)
|
if (!pkt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
|
irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
|
||||||
memcpy(&pkt->irqs, irq_arr, irq_arr_size);
|
pkt->length = cpu_to_le32(irq_num_entries);
|
||||||
|
|
||||||
|
/* We must perform any necessary endianness conversation on the irq
|
||||||
|
* array being passed to the goya hardware
|
||||||
|
*/
|
||||||
|
for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
|
||||||
|
irq_arr_index < irq_num_entries ; irq_arr_index++)
|
||||||
|
goya_irq_arr[irq_arr_index] =
|
||||||
|
cpu_to_le32(irq_arr[irq_arr_index]);
|
||||||
|
|
||||||
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
|
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
|
||||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||||
|
@ -5042,7 +5064,7 @@ static const struct hl_asic_funcs goya_funcs = {
|
||||||
.resume = goya_resume,
|
.resume = goya_resume,
|
||||||
.cb_mmap = goya_cb_mmap,
|
.cb_mmap = goya_cb_mmap,
|
||||||
.ring_doorbell = goya_ring_doorbell,
|
.ring_doorbell = goya_ring_doorbell,
|
||||||
.flush_pq_write = goya_flush_pq_write,
|
.pqe_write = goya_pqe_write,
|
||||||
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
|
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
|
||||||
.asic_dma_free_coherent = goya_dma_free_coherent,
|
.asic_dma_free_coherent = goya_dma_free_coherent,
|
||||||
.get_int_queue_base = goya_get_int_queue_base,
|
.get_int_queue_base = goya_get_int_queue_base,
|
||||||
|
|
|
@ -177,7 +177,7 @@ int goya_late_init(struct hl_device *hdev);
|
||||||
void goya_late_fini(struct hl_device *hdev);
|
void goya_late_fini(struct hl_device *hdev);
|
||||||
|
|
||||||
void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
|
void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
|
||||||
void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
|
void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
|
||||||
void goya_update_eq_ci(struct hl_device *hdev, u32 val);
|
void goya_update_eq_ci(struct hl_device *hdev, u32 val);
|
||||||
void goya_restore_phase_topology(struct hl_device *hdev);
|
void goya_restore_phase_topology(struct hl_device *hdev);
|
||||||
int goya_context_switch(struct hl_device *hdev, u32 asid);
|
int goya_context_switch(struct hl_device *hdev, u32 asid);
|
||||||
|
|
|
@ -441,7 +441,11 @@ enum hl_pll_frequency {
|
||||||
* @resume: handles IP specific H/W or SW changes for resume.
|
* @resume: handles IP specific H/W or SW changes for resume.
|
||||||
* @cb_mmap: maps a CB.
|
* @cb_mmap: maps a CB.
|
||||||
* @ring_doorbell: increment PI on a given QMAN.
|
* @ring_doorbell: increment PI on a given QMAN.
|
||||||
* @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
|
* @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
|
||||||
|
* function because the PQs are located in different memory areas
|
||||||
|
* per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
|
||||||
|
* writing the PQE must match the destination memory area
|
||||||
|
* properties.
|
||||||
* @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
|
* @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
|
||||||
* dma_alloc_coherent(). This is ASIC function because
|
* dma_alloc_coherent(). This is ASIC function because
|
||||||
* its implementation is not trivial when the driver
|
* its implementation is not trivial when the driver
|
||||||
|
@ -510,7 +514,8 @@ struct hl_asic_funcs {
|
||||||
int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
|
int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
|
||||||
u64 kaddress, phys_addr_t paddress, u32 size);
|
u64 kaddress, phys_addr_t paddress, u32 size);
|
||||||
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
|
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
|
||||||
void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
|
void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
|
||||||
|
struct hl_bd *bd);
|
||||||
void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
|
void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flag);
|
dma_addr_t *dma_handle, gfp_t flag);
|
||||||
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
|
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
|
||||||
|
|
|
@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
|
||||||
struct hl_device *hdev = job->cs->ctx->hdev;
|
struct hl_device *hdev = job->cs->ctx->hdev;
|
||||||
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
|
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
|
||||||
struct hl_bd bd;
|
struct hl_bd bd;
|
||||||
u64 *pi, *pbd = (u64 *) &bd;
|
__le64 *pi;
|
||||||
|
|
||||||
bd.ctl = 0;
|
bd.ctl = 0;
|
||||||
bd.len = __cpu_to_le32(job->job_cb_size);
|
bd.len = cpu_to_le32(job->job_cb_size);
|
||||||
bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
|
bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
|
||||||
|
|
||||||
pi = (u64 *) (uintptr_t) (q->kernel_address +
|
pi = (__le64 *) (uintptr_t) (q->kernel_address +
|
||||||
((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
|
((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
|
||||||
|
|
||||||
pi[0] = pbd[0];
|
|
||||||
pi[1] = pbd[1];
|
|
||||||
|
|
||||||
q->pi++;
|
q->pi++;
|
||||||
q->pi &= ((q->int_queue_len << 1) - 1);
|
q->pi &= ((q->int_queue_len << 1) - 1);
|
||||||
|
|
||||||
/* Flush PQ entry write. Relevant only for specific ASICs */
|
hdev->asic_funcs->pqe_write(hdev, pi, &bd);
|
||||||
hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
|
|
||||||
|
|
||||||
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
|
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,19 @@ enum goya_dma_direction {
|
||||||
#define GOYA_PKT_CTL_MB_SHIFT 31
|
#define GOYA_PKT_CTL_MB_SHIFT 31
|
||||||
#define GOYA_PKT_CTL_MB_MASK 0x80000000
|
#define GOYA_PKT_CTL_MB_MASK 0x80000000
|
||||||
|
|
||||||
|
/* All packets have, at least, an 8-byte header, which contains
|
||||||
|
* the packet type. The kernel driver uses the packet header for packet
|
||||||
|
* validation and to perform any necessary required preparation before
|
||||||
|
* sending them off to the hardware.
|
||||||
|
*/
|
||||||
|
struct goya_packet {
|
||||||
|
__le64 header;
|
||||||
|
/* The rest of the packet data follows. Use the corresponding
|
||||||
|
* packet_XXX struct to deference the data, based on packet type
|
||||||
|
*/
|
||||||
|
u8 contents[0];
|
||||||
|
};
|
||||||
|
|
||||||
struct packet_nop {
|
struct packet_nop {
|
||||||
__le32 reserved;
|
__le32 reserved;
|
||||||
__le32 ctl;
|
__le32 ctl;
|
||||||
|
|
|
@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
|
||||||
struct hl_cs_job *job;
|
struct hl_cs_job *job;
|
||||||
bool shadow_index_valid;
|
bool shadow_index_valid;
|
||||||
u16 shadow_index;
|
u16 shadow_index;
|
||||||
u32 *cq_entry;
|
struct hl_cq_entry *cq_entry, *cq_base;
|
||||||
u32 *cq_base;
|
|
||||||
|
|
||||||
if (hdev->disabled) {
|
if (hdev->disabled) {
|
||||||
dev_dbg(hdev->dev,
|
dev_dbg(hdev->dev,
|
||||||
|
@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
cq_base = (u32 *) (uintptr_t) cq->kernel_address;
|
cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
|
bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
|
||||||
|
CQ_ENTRY_READY_MASK)
|
||||||
>> CQ_ENTRY_READY_SHIFT);
|
>> CQ_ENTRY_READY_SHIFT);
|
||||||
|
|
||||||
if (!entry_ready)
|
if (!entry_ready)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
cq_entry = (u32 *) &cq_base[cq->ci];
|
cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
|
||||||
|
|
||||||
/*
|
/* Make sure we read CQ entry contents after we've
|
||||||
* Make sure we read CQ entry contents after we've
|
|
||||||
* checked the ownership bit.
|
* checked the ownership bit.
|
||||||
*/
|
*/
|
||||||
dma_rmb();
|
dma_rmb();
|
||||||
|
|
||||||
shadow_index_valid =
|
shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
|
||||||
((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
|
CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
|
||||||
>> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
|
>> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
|
||||||
|
|
||||||
shadow_index = (u16)
|
shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
|
||||||
((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
|
CQ_ENTRY_SHADOW_INDEX_MASK)
|
||||||
>> CQ_ENTRY_SHADOW_INDEX_SHIFT);
|
>> CQ_ENTRY_SHADOW_INDEX_SHIFT);
|
||||||
|
|
||||||
queue = &hdev->kernel_queues[cq->hw_queue_id];
|
queue = &hdev->kernel_queues[cq->hw_queue_id];
|
||||||
|
@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
|
||||||
queue_work(hdev->cq_wq, &job->finish_work);
|
queue_work(hdev->cq_wq, &job->finish_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Update ci of the context's queue. There is no
|
||||||
* Update ci of the context's queue. There is no
|
|
||||||
* need to protect it with spinlock because this update is
|
* need to protect it with spinlock because this update is
|
||||||
* done only inside IRQ and there is a different IRQ per
|
* done only inside IRQ and there is a different IRQ per
|
||||||
* queue
|
* queue
|
||||||
|
@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
|
||||||
queue->ci = hl_queue_inc_ptr(queue->ci);
|
queue->ci = hl_queue_inc_ptr(queue->ci);
|
||||||
|
|
||||||
/* Clear CQ entry ready bit */
|
/* Clear CQ entry ready bit */
|
||||||
cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
|
cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
|
||||||
|
~CQ_ENTRY_READY_MASK);
|
||||||
|
|
||||||
cq->ci = hl_cq_inc_ptr(cq->ci);
|
cq->ci = hl_cq_inc_ptr(cq->ci);
|
||||||
|
|
||||||
|
|
|
@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
|
||||||
dev_dbg(hdev->dev,
|
dev_dbg(hdev->dev,
|
||||||
"page list 0x%p of asid %d is still alive\n",
|
"page list 0x%p of asid %d is still alive\n",
|
||||||
phys_pg_list, ctx->asid);
|
phys_pg_list, ctx->asid);
|
||||||
|
atomic64_sub(phys_pg_list->total_size,
|
||||||
|
&hdev->dram_used_mem);
|
||||||
free_phys_pg_pack(hdev, phys_pg_list);
|
free_phys_pg_pack(hdev, phys_pg_list);
|
||||||
idr_remove(&vm->phys_pg_pack_handles, i);
|
idr_remove(&vm->phys_pg_pack_handles, i);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3780,8 +3780,6 @@ static int spi_nor_init_params(struct spi_nor *nor,
|
||||||
default:
|
default:
|
||||||
/* Kept only for backward compatibility purpose. */
|
/* Kept only for backward compatibility purpose. */
|
||||||
params->quad_enable = spansion_quad_enable;
|
params->quad_enable = spansion_quad_enable;
|
||||||
if (nor->clear_sr_bp)
|
|
||||||
nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4035,6 +4033,9 @@ static int spi_nor_init(struct spi_nor *nor)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (nor->clear_sr_bp) {
|
if (nor->clear_sr_bp) {
|
||||||
|
if (nor->quad_enable == spansion_quad_enable)
|
||||||
|
nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
|
||||||
|
|
||||||
err = nor->clear_sr_bp(nor);
|
err = nor->clear_sr_bp(nor);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(nor->dev,
|
dev_err(nor->dev,
|
||||||
|
|
|
@ -1286,6 +1286,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||||
*/
|
*/
|
||||||
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
|
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
|
||||||
mutex_lock(&ctrl->scan_lock);
|
mutex_lock(&ctrl->scan_lock);
|
||||||
|
mutex_lock(&ctrl->subsys->lock);
|
||||||
|
nvme_mpath_start_freeze(ctrl->subsys);
|
||||||
|
nvme_mpath_wait_freeze(ctrl->subsys);
|
||||||
nvme_start_freeze(ctrl);
|
nvme_start_freeze(ctrl);
|
||||||
nvme_wait_freeze(ctrl);
|
nvme_wait_freeze(ctrl);
|
||||||
}
|
}
|
||||||
|
@ -1316,6 +1319,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
|
||||||
nvme_update_formats(ctrl);
|
nvme_update_formats(ctrl);
|
||||||
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
|
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
|
||||||
nvme_unfreeze(ctrl);
|
nvme_unfreeze(ctrl);
|
||||||
|
nvme_mpath_unfreeze(ctrl->subsys);
|
||||||
|
mutex_unlock(&ctrl->subsys->lock);
|
||||||
mutex_unlock(&ctrl->scan_lock);
|
mutex_unlock(&ctrl->scan_lock);
|
||||||
}
|
}
|
||||||
if (effects & NVME_CMD_EFFECTS_CCC)
|
if (effects & NVME_CMD_EFFECTS_CCC)
|
||||||
|
@ -1715,6 +1720,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
|
||||||
if (ns->head->disk) {
|
if (ns->head->disk) {
|
||||||
nvme_update_disk_info(ns->head->disk, ns, id);
|
nvme_update_disk_info(ns->head->disk, ns, id);
|
||||||
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
|
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
|
||||||
|
revalidate_disk(ns->head->disk);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -2487,6 +2493,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(ctrl->device,
|
dev_err(ctrl->device,
|
||||||
"failed to register subsystem device.\n");
|
"failed to register subsystem device.\n");
|
||||||
|
put_device(&subsys->dev);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
ida_init(&subsys->ns_ida);
|
ida_init(&subsys->ns_ida);
|
||||||
|
@ -2509,7 +2516,6 @@ out_put_subsystem:
|
||||||
nvme_put_subsystem(subsys);
|
nvme_put_subsystem(subsys);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&nvme_subsystems_lock);
|
mutex_unlock(&nvme_subsystems_lock);
|
||||||
put_device(&subsys->dev);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3571,6 +3577,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
|
||||||
struct nvme_ns *ns, *next;
|
struct nvme_ns *ns, *next;
|
||||||
LIST_HEAD(ns_list);
|
LIST_HEAD(ns_list);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* make sure to requeue I/O to all namespaces as these
|
||||||
|
* might result from the scan itself and must complete
|
||||||
|
* for the scan_work to make progress
|
||||||
|
*/
|
||||||
|
nvme_mpath_clear_ctrl_paths(ctrl);
|
||||||
|
|
||||||
/* prevent racing with ns scanning */
|
/* prevent racing with ns scanning */
|
||||||
flush_work(&ctrl->scan_work);
|
flush_work(&ctrl->scan_work);
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,36 @@ module_param(multipath, bool, 0444);
|
||||||
MODULE_PARM_DESC(multipath,
|
MODULE_PARM_DESC(multipath,
|
||||||
"turn on native support for multiple controllers per subsystem");
|
"turn on native support for multiple controllers per subsystem");
|
||||||
|
|
||||||
|
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
|
||||||
|
{
|
||||||
|
struct nvme_ns_head *h;
|
||||||
|
|
||||||
|
lockdep_assert_held(&subsys->lock);
|
||||||
|
list_for_each_entry(h, &subsys->nsheads, entry)
|
||||||
|
if (h->disk)
|
||||||
|
blk_mq_unfreeze_queue(h->disk->queue);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
|
||||||
|
{
|
||||||
|
struct nvme_ns_head *h;
|
||||||
|
|
||||||
|
lockdep_assert_held(&subsys->lock);
|
||||||
|
list_for_each_entry(h, &subsys->nsheads, entry)
|
||||||
|
if (h->disk)
|
||||||
|
blk_mq_freeze_queue_wait(h->disk->queue);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
|
||||||
|
{
|
||||||
|
struct nvme_ns_head *h;
|
||||||
|
|
||||||
|
lockdep_assert_held(&subsys->lock);
|
||||||
|
list_for_each_entry(h, &subsys->nsheads, entry)
|
||||||
|
if (h->disk)
|
||||||
|
blk_freeze_queue_start(h->disk->queue);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If multipathing is enabled we need to always use the subsystem instance
|
* If multipathing is enabled we need to always use the subsystem instance
|
||||||
* number for numbering our devices to avoid conflicts between subsystems that
|
* number for numbering our devices to avoid conflicts between subsystems that
|
||||||
|
@ -104,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
|
||||||
[NVME_ANA_CHANGE] = "change",
|
[NVME_ANA_CHANGE] = "change",
|
||||||
};
|
};
|
||||||
|
|
||||||
void nvme_mpath_clear_current_path(struct nvme_ns *ns)
|
bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
|
||||||
{
|
{
|
||||||
struct nvme_ns_head *head = ns->head;
|
struct nvme_ns_head *head = ns->head;
|
||||||
|
bool changed = false;
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
if (!head)
|
if (!head)
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
for_each_node(node) {
|
for_each_node(node) {
|
||||||
if (ns == rcu_access_pointer(head->current_path[node]))
|
if (ns == rcu_access_pointer(head->current_path[node])) {
|
||||||
rcu_assign_pointer(head->current_path[node], NULL);
|
rcu_assign_pointer(head->current_path[node], NULL);
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
|
return changed;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
struct nvme_ns *ns;
|
||||||
|
|
||||||
|
mutex_lock(&ctrl->scan_lock);
|
||||||
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||||
|
if (nvme_mpath_clear_current_path(ns))
|
||||||
|
kblockd_schedule_work(&ns->head->requeue_work);
|
||||||
|
mutex_unlock(&ctrl->scan_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nvme_path_is_disabled(struct nvme_ns *ns)
|
static bool nvme_path_is_disabled(struct nvme_ns *ns)
|
||||||
|
@ -226,6 +272,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
|
||||||
return ns;
|
return ns;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool nvme_available_path(struct nvme_ns_head *head)
|
||||||
|
{
|
||||||
|
struct nvme_ns *ns;
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(ns, &head->list, siblings) {
|
||||||
|
switch (ns->ctrl->state) {
|
||||||
|
case NVME_CTRL_LIVE:
|
||||||
|
case NVME_CTRL_RESETTING:
|
||||||
|
case NVME_CTRL_CONNECTING:
|
||||||
|
/* fallthru */
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
|
static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
|
@ -252,14 +316,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
|
||||||
disk_devt(ns->head->disk),
|
disk_devt(ns->head->disk),
|
||||||
bio->bi_iter.bi_sector);
|
bio->bi_iter.bi_sector);
|
||||||
ret = direct_make_request(bio);
|
ret = direct_make_request(bio);
|
||||||
} else if (!list_empty_careful(&head->list)) {
|
} else if (nvme_available_path(head)) {
|
||||||
dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
|
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
|
||||||
|
|
||||||
spin_lock_irq(&head->requeue_lock);
|
spin_lock_irq(&head->requeue_lock);
|
||||||
bio_list_add(&head->requeue_list, bio);
|
bio_list_add(&head->requeue_list, bio);
|
||||||
spin_unlock_irq(&head->requeue_lock);
|
spin_unlock_irq(&head->requeue_lock);
|
||||||
} else {
|
} else {
|
||||||
dev_warn_ratelimited(dev, "no path - failing I/O\n");
|
dev_warn_ratelimited(dev, "no available path - failing I/O\n");
|
||||||
|
|
||||||
bio->bi_status = BLK_STS_IOERR;
|
bio->bi_status = BLK_STS_IOERR;
|
||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
|
|
|
@ -490,6 +490,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
|
||||||
return ctrl->ana_log_buf != NULL;
|
return ctrl->ana_log_buf != NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
|
||||||
|
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
|
||||||
|
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
|
||||||
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
||||||
struct nvme_ctrl *ctrl, int *flags);
|
struct nvme_ctrl *ctrl, int *flags);
|
||||||
void nvme_failover_req(struct request *req);
|
void nvme_failover_req(struct request *req);
|
||||||
|
@ -500,7 +503,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
|
||||||
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
|
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
|
||||||
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
|
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
|
||||||
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
|
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
|
||||||
void nvme_mpath_clear_current_path(struct nvme_ns *ns);
|
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
|
||||||
|
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
|
||||||
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
|
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
|
||||||
|
|
||||||
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
||||||
|
@ -548,7 +552,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
|
||||||
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
|
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
|
static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
||||||
|
@ -568,6 +576,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
||||||
static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
|
static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif /* CONFIG_NVME_MULTIPATH */
|
#endif /* CONFIG_NVME_MULTIPATH */
|
||||||
|
|
||||||
#ifdef CONFIG_NVM
|
#ifdef CONFIG_NVM
|
||||||
|
|
|
@ -2695,7 +2695,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = data;
|
struct nvme_dev *dev = data;
|
||||||
|
|
||||||
nvme_reset_ctrl_sync(&dev->ctrl);
|
flush_work(&dev->ctrl.reset_work);
|
||||||
flush_work(&dev->ctrl.scan_work);
|
flush_work(&dev->ctrl.scan_work);
|
||||||
nvme_put_ctrl(&dev->ctrl);
|
nvme_put_ctrl(&dev->ctrl);
|
||||||
}
|
}
|
||||||
|
@ -2761,6 +2761,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
|
||||||
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
|
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
|
||||||
|
|
||||||
|
nvme_reset_ctrl(&dev->ctrl);
|
||||||
nvme_get_ctrl(&dev->ctrl);
|
nvme_get_ctrl(&dev->ctrl);
|
||||||
async_schedule(nvme_async_probe, dev);
|
async_schedule(nvme_async_probe, dev);
|
||||||
|
|
||||||
|
@ -2846,7 +2847,7 @@ static int nvme_resume(struct device *dev)
|
||||||
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
|
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
|
||||||
struct nvme_ctrl *ctrl = &ndev->ctrl;
|
struct nvme_ctrl *ctrl = &ndev->ctrl;
|
||||||
|
|
||||||
if (pm_resume_via_firmware() || !ctrl->npss ||
|
if (ndev->last_ps == U32_MAX ||
|
||||||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
|
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
|
||||||
nvme_reset_ctrl(ctrl);
|
nvme_reset_ctrl(ctrl);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2859,6 +2860,8 @@ static int nvme_suspend(struct device *dev)
|
||||||
struct nvme_ctrl *ctrl = &ndev->ctrl;
|
struct nvme_ctrl *ctrl = &ndev->ctrl;
|
||||||
int ret = -EBUSY;
|
int ret = -EBUSY;
|
||||||
|
|
||||||
|
ndev->last_ps = U32_MAX;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The platform does not remove power for a kernel managed suspend so
|
* The platform does not remove power for a kernel managed suspend so
|
||||||
* use host managed nvme power settings for lowest idle power if
|
* use host managed nvme power settings for lowest idle power if
|
||||||
|
@ -2866,8 +2869,14 @@ static int nvme_suspend(struct device *dev)
|
||||||
* shutdown. But if the firmware is involved after the suspend or the
|
* shutdown. But if the firmware is involved after the suspend or the
|
||||||
* device does not support any non-default power states, shut down the
|
* device does not support any non-default power states, shut down the
|
||||||
* device fully.
|
* device fully.
|
||||||
|
*
|
||||||
|
* If ASPM is not enabled for the device, shut down the device and allow
|
||||||
|
* the PCI bus layer to put it into D3 in order to take the PCIe link
|
||||||
|
* down, so as to allow the platform to achieve its minimum low-power
|
||||||
|
* state (which may not be possible if the link is up).
|
||||||
*/
|
*/
|
||||||
if (pm_suspend_via_firmware() || !ctrl->npss) {
|
if (pm_suspend_via_firmware() || !ctrl->npss ||
|
||||||
|
!pcie_aspm_enabled(pdev)) {
|
||||||
nvme_dev_disable(ndev, true);
|
nvme_dev_disable(ndev, true);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2880,7 +2889,6 @@ static int nvme_suspend(struct device *dev)
|
||||||
ctrl->state != NVME_CTRL_ADMIN_ONLY)
|
ctrl->state != NVME_CTRL_ADMIN_ONLY)
|
||||||
goto unfreeze;
|
goto unfreeze;
|
||||||
|
|
||||||
ndev->last_ps = 0;
|
|
||||||
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
|
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto unfreeze;
|
goto unfreeze;
|
||||||
|
|
|
@ -562,13 +562,17 @@ out_destroy_cm_id:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
|
||||||
|
{
|
||||||
|
rdma_disconnect(queue->cm_id);
|
||||||
|
ib_drain_qp(queue->qp);
|
||||||
|
}
|
||||||
|
|
||||||
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
|
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
|
||||||
{
|
{
|
||||||
if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
|
if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
|
||||||
return;
|
return;
|
||||||
|
__nvme_rdma_stop_queue(queue);
|
||||||
rdma_disconnect(queue->cm_id);
|
|
||||||
ib_drain_qp(queue->qp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
|
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
|
||||||
|
@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
|
||||||
else
|
else
|
||||||
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
|
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
|
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
|
||||||
else
|
} else {
|
||||||
|
__nvme_rdma_stop_queue(queue);
|
||||||
dev_info(ctrl->ctrl.device,
|
dev_info(ctrl->ctrl.device,
|
||||||
"failed to connect queue: %d ret=%d\n", idx, ret);
|
"failed to connect queue: %d ret=%d\n", idx, ret);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -675,6 +675,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
|
||||||
|
|
||||||
found:
|
found:
|
||||||
list_del(&p->entry);
|
list_del(&p->entry);
|
||||||
|
nvmet_port_del_ctrls(port, subsys);
|
||||||
nvmet_port_disc_changed(port, subsys);
|
nvmet_port_disc_changed(port, subsys);
|
||||||
|
|
||||||
if (list_empty(&port->subsystems))
|
if (list_empty(&port->subsystems))
|
||||||
|
|
|
@ -46,6 +46,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
|
||||||
u16 status;
|
u16 status;
|
||||||
|
|
||||||
switch (errno) {
|
switch (errno) {
|
||||||
|
case 0:
|
||||||
|
status = NVME_SC_SUCCESS;
|
||||||
|
break;
|
||||||
case -ENOSPC:
|
case -ENOSPC:
|
||||||
req->error_loc = offsetof(struct nvme_rw_command, length);
|
req->error_loc = offsetof(struct nvme_rw_command, length);
|
||||||
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
||||||
|
@ -280,6 +283,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
|
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
|
||||||
|
|
||||||
|
void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl;
|
||||||
|
|
||||||
|
mutex_lock(&subsys->lock);
|
||||||
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
|
||||||
|
if (ctrl->port == port)
|
||||||
|
ctrl->ops->delete_ctrl(ctrl);
|
||||||
|
}
|
||||||
|
mutex_unlock(&subsys->lock);
|
||||||
|
}
|
||||||
|
|
||||||
int nvmet_enable_port(struct nvmet_port *port)
|
int nvmet_enable_port(struct nvmet_port *port)
|
||||||
{
|
{
|
||||||
const struct nvmet_fabrics_ops *ops;
|
const struct nvmet_fabrics_ops *ops;
|
||||||
|
|
|
@ -654,6 +654,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
|
||||||
mutex_lock(&nvme_loop_ports_mutex);
|
mutex_lock(&nvme_loop_ports_mutex);
|
||||||
list_del_init(&port->entry);
|
list_del_init(&port->entry);
|
||||||
mutex_unlock(&nvme_loop_ports_mutex);
|
mutex_unlock(&nvme_loop_ports_mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure any ctrls that are in the process of being
|
||||||
|
* deleted are in fact deleted before we return
|
||||||
|
* and free the port. This is to prevent active
|
||||||
|
* ctrls from using a port after it's freed.
|
||||||
|
*/
|
||||||
|
flush_workqueue(nvme_delete_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct nvmet_fabrics_ops nvme_loop_ops = {
|
static const struct nvmet_fabrics_ops nvme_loop_ops = {
|
||||||
|
|
|
@ -418,6 +418,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port);
|
||||||
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
|
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
|
||||||
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
|
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
|
||||||
|
|
||||||
|
void nvmet_port_del_ctrls(struct nvmet_port *port,
|
||||||
|
struct nvmet_subsys *subsys);
|
||||||
|
|
||||||
int nvmet_enable_port(struct nvmet_port *port);
|
int nvmet_enable_port(struct nvmet_port *port);
|
||||||
void nvmet_disable_port(struct nvmet_port *port);
|
void nvmet_disable_port(struct nvmet_port *port);
|
||||||
|
|
||||||
|
|
|
@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
|
||||||
* of_irq_parse_one - Resolve an interrupt for a device
|
* of_irq_parse_one - Resolve an interrupt for a device
|
||||||
* @device: the device whose interrupt is to be resolved
|
* @device: the device whose interrupt is to be resolved
|
||||||
* @index: index of the interrupt to resolve
|
* @index: index of the interrupt to resolve
|
||||||
* @out_irq: structure of_irq filled by this function
|
* @out_irq: structure of_phandle_args filled by this function
|
||||||
*
|
*
|
||||||
* This function resolves an interrupt for a node by walking the interrupt tree,
|
* This function resolves an interrupt for a node by walking the interrupt tree,
|
||||||
* finding which interrupt controller node it is attached to, and returning the
|
* finding which interrupt controller node it is attached to, and returning the
|
||||||
|
|
|
@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
|
||||||
for_each_child_of_node(local_fixups, child) {
|
for_each_child_of_node(local_fixups, child) {
|
||||||
|
|
||||||
for_each_child_of_node(overlay, overlay_child)
|
for_each_child_of_node(overlay, overlay_child)
|
||||||
if (!node_name_cmp(child, overlay_child))
|
if (!node_name_cmp(child, overlay_child)) {
|
||||||
|
of_node_put(overlay_child);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (!overlay_child)
|
if (!overlay_child) {
|
||||||
|
of_node_put(child);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
err = adjust_local_phandle_references(child, overlay_child,
|
err = adjust_local_phandle_references(child, overlay_child,
|
||||||
phandle_delta);
|
phandle_delta);
|
||||||
if (err)
|
if (err) {
|
||||||
|
of_node_put(child);
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
|
||||||
module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
|
module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
|
||||||
NULL, 0644);
|
NULL, 0644);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
|
||||||
|
* @pdev: Target device.
|
||||||
|
*/
|
||||||
|
bool pcie_aspm_enabled(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct pci_dev *bridge = pci_upstream_bridge(pdev);
|
||||||
|
bool ret;
|
||||||
|
|
||||||
|
if (!bridge)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
mutex_lock(&aspm_lock);
|
||||||
|
ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
|
||||||
|
mutex_unlock(&aspm_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
|
||||||
|
|
||||||
#ifdef CONFIG_PCIEASPM_DEBUG
|
#ifdef CONFIG_PCIEASPM_DEBUG
|
||||||
static ssize_t link_state_show(struct device *dev,
|
static ssize_t link_state_show(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
|
|
|
@ -10776,12 +10776,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
|
||||||
/* This loop sets up all CPUs that are affinitized with a
|
/* This loop sets up all CPUs that are affinitized with a
|
||||||
* irq vector assigned to the driver. All affinitized CPUs
|
* irq vector assigned to the driver. All affinitized CPUs
|
||||||
* will get a link to that vectors IRQ and EQ.
|
* will get a link to that vectors IRQ and EQ.
|
||||||
|
*
|
||||||
|
* NULL affinity mask handling:
|
||||||
|
* If irq count is greater than one, log an error message.
|
||||||
|
* If the null mask is received for the first irq, find the
|
||||||
|
* first present cpu, and assign the eq index to ensure at
|
||||||
|
* least one EQ is assigned.
|
||||||
*/
|
*/
|
||||||
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
|
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
|
||||||
/* Get a CPU mask for all CPUs affinitized to this vector */
|
/* Get a CPU mask for all CPUs affinitized to this vector */
|
||||||
maskp = pci_irq_get_affinity(phba->pcidev, idx);
|
maskp = pci_irq_get_affinity(phba->pcidev, idx);
|
||||||
if (!maskp)
|
if (!maskp) {
|
||||||
continue;
|
if (phba->cfg_irq_chann > 1)
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"3329 No affinity mask found "
|
||||||
|
"for vector %d (%d)\n",
|
||||||
|
idx, phba->cfg_irq_chann);
|
||||||
|
if (!idx) {
|
||||||
|
cpu = cpumask_first(cpu_present_mask);
|
||||||
|
cpup = &phba->sli4_hba.cpu_map[cpu];
|
||||||
|
cpup->eq = idx;
|
||||||
|
cpup->irq = pci_irq_vector(phba->pcidev, idx);
|
||||||
|
cpup->flag |= LPFC_CPU_FIRST_IRQ;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
/* Loop through all CPUs associated with vector idx */
|
/* Loop through all CPUs associated with vector idx */
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
menuconfig SOUNDWIRE
|
menuconfig SOUNDWIRE
|
||||||
bool "SoundWire support"
|
tristate "SoundWire support"
|
||||||
help
|
help
|
||||||
SoundWire is a 2-Pin interface with data and clock line ratified
|
SoundWire is a 2-Pin interface with data and clock line ratified
|
||||||
by the MIPI Alliance. SoundWire is used for transporting data
|
by the MIPI Alliance. SoundWire is used for transporting data
|
||||||
|
@ -17,17 +17,12 @@ if SOUNDWIRE
|
||||||
|
|
||||||
comment "SoundWire Devices"
|
comment "SoundWire Devices"
|
||||||
|
|
||||||
config SOUNDWIRE_BUS
|
|
||||||
tristate
|
|
||||||
select REGMAP_SOUNDWIRE
|
|
||||||
|
|
||||||
config SOUNDWIRE_CADENCE
|
config SOUNDWIRE_CADENCE
|
||||||
tristate
|
tristate
|
||||||
|
|
||||||
config SOUNDWIRE_INTEL
|
config SOUNDWIRE_INTEL
|
||||||
tristate "Intel SoundWire Master driver"
|
tristate "Intel SoundWire Master driver"
|
||||||
select SOUNDWIRE_CADENCE
|
select SOUNDWIRE_CADENCE
|
||||||
select SOUNDWIRE_BUS
|
|
||||||
depends on X86 && ACPI && SND_SOC
|
depends on X86 && ACPI && SND_SOC
|
||||||
help
|
help
|
||||||
SoundWire Intel Master driver.
|
SoundWire Intel Master driver.
|
||||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче