Including:
 
 	- Removal of the dev->archdata.iommu (or similar) pointers from
 	  most architectures. Only Sparc is left, but this is private to
 	  Sparc as their drivers don't use the IOMMU-API.
 
 	- ARM-SMMU Updates from Will Deacon:
 
 	  -  Support for SMMU-500 implementation in Marvell
 	     Armada-AP806 SoC
 
 	  - Support for SMMU-500 implementation in NVIDIA Tegra194 SoC
 
 	  - DT compatible string updates
 
 	  - Remove unused IOMMU_SYS_CACHE_ONLY flag
 
 	  - Move ARM-SMMU drivers into their own subdirectory
 
 	- Intel VT-d Updates from Lu Baolu:
 
 	  - Misc tweaks and fixes for vSVA
 
 	  - Report/response page request events
 
 	  - Cleanups
 
 	- Move the Kconfig and Makefile bits for the AMD and Intel
 	  drivers into their respective subdirectory.
 
 	- MT6779 IOMMU Support
 
 	- Support for new chipsets in the Renesas IOMMU driver
 
 	- Other misc cleanups and fixes (e.g. to improve compile test
 	  coverage)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAl8ygTIACgkQK/BELZcB
 GuPZmRAAzSLuUNoQPWrFUbocNuZ/YHUCKdluKdYx26AgtYFwBrwzDAHPdq8HF8Hm
 y8w2xiUVVP9uZ8gnDkAuwXBtg+yOnG9sRNFZMNdtCy1Q0ehp0HNsn/6NabxVpSml
 QuAmd2PxMMopQRVLOR5YYvZl6JdiZx19W8X+trgwnR9Kghqq+7QXI9+D00jztRxQ
 Qvh/9NvIdX3k+5R4ZPJaV6OhaFvxzQzQZwKuO61VqFOWZRH1z9Oo+aXDCWTFUjYN
 IClTcG8qOK2W9/SOyYDXMoz30Yf0vcuDxhafi2JJVNcTPRmMWoeqff6yKslp76ea
 lTepDcIKld1Ul9NoqfYzhhKiEaLcgMEW2ua6vk5YFVxBBqJfg5qdtDZzBxa0FiNx
 TQrZFX3xjtZC6tRyy+eKWOj6vx7l0ONwwDxRc3HdvL+xE+KUdmsg82qHU4cAHRjp
 U2dgTdlkTEd56q4BEQxmJAHYMIUrx2QAp6pa2+Jv/Iqpi9PsZ2k+l9Gy6h+rM7dn
 Est/1gA4kDhKdCKfTx7g9EL6AAoU50WttxNmwMxrUrXX3fsstfY1fKgyZUPpkL7V
 V5iXbbsdMQLHzOF2qiqIIMxMGYxr/x/FJ1DnSJ7j+jAXMF77d2B9iQttzImOVN2c
 VXBxcVstWN7/xXjIy13C/83bRKwWqXaaS4cbv3Di0ZGFeD2oAF0=
 =3O2Z
 -----END PGP SIGNATURE-----

Merge tag 'iommu-updates-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:

 - Remove of the dev->archdata.iommu (or similar) pointers from most
   architectures. Only Sparc is left, but this is private to Sparc as
   their drivers don't use the IOMMU-API.

 - ARM-SMMU updates from Will Deacon:

     - Support for SMMU-500 implementation in Marvell Armada-AP806 SoC

     - Support for SMMU-500 implementation in NVIDIA Tegra194 SoC

     - DT compatible string updates

     - Remove unused IOMMU_SYS_CACHE_ONLY flag

     - Move ARM-SMMU drivers into their own subdirectory

 - Intel VT-d updates from Lu Baolu:

     - Misc tweaks and fixes for vSVA

     - Report/response page request events

     - Cleanups

 - Move the Kconfig and Makefile bits for the AMD and Intel drivers into
   their respective subdirectory.

 - MT6779 IOMMU Support

 - Support for new chipsets in the Renesas IOMMU driver

 - Other misc cleanups and fixes (e.g. to improve compile test coverage)

* tag 'iommu-updates-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (77 commits)
  iommu/amd: Move Kconfig and Makefile bits down into amd directory
  iommu/vt-d: Move Kconfig and Makefile bits down into intel directory
  iommu/arm-smmu: Move Arm SMMU drivers into their own subdirectory
  iommu/vt-d: Skip TE disabling on quirky gfx dedicated iommu
  iommu: Add gfp parameter to io_pgtable_ops->map()
  iommu: Mark __iommu_map_sg() as static
  iommu/vt-d: Rename intel-pasid.h to pasid.h
  iommu/vt-d: Add page response ops support
  iommu/vt-d: Report page request faults for guest SVA
  iommu/vt-d: Add a helper to get svm and sdev for pasid
  iommu/vt-d: Refactor device_to_iommu() helper
  iommu/vt-d: Disable multiple GPASID-dev bind
  iommu/vt-d: Warn on out-of-range invalidation address
  iommu/vt-d: Fix devTLB flush for vSVA
  iommu/vt-d: Handle non-page aligned address
  iommu/vt-d: Fix PASID devTLB invalidation
  iommu/vt-d: Remove global page support in devTLB flush
  iommu/vt-d: Enforce PASID devTLB field mask
  iommu: Make some functions static
  iommu/amd: Remove double zero check
  ...
This commit is contained in:
Linus Torvalds 2020-08-11 14:13:24 -07:00
Родитель 96f970feeb e46b3c0d01
Коммит 952ace797c
63 изменённых файлов: 1469 добавлений и 575 удалений

Просмотреть файл

@ -125,6 +125,9 @@ stable kernels.
| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Marvell | ARM-MMU-500 | #582743 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+

Просмотреть файл

@ -37,7 +37,18 @@ properties:
- enum:
- qcom,sc7180-smmu-500
- qcom,sdm845-smmu-500
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
- const: arm,mmu-500
- description: Marvell SoCs implementing "arm,mmu-500"
items:
- const: marvell,ap806-smmu-500
- const: arm,mmu-500
- description: NVIDIA SoCs that program two ARM MMU-500s identically
items:
- enum:
- nvidia,tegra194-smmu
- const: nvidia,smmu-500
- items:
- const: arm,mmu-500
- const: arm,smmu-v2
@ -55,7 +66,8 @@ properties:
- cavium,smmu-v2
reg:
maxItems: 1
minItems: 1
maxItems: 2
'#global-interrupts':
description: The number of global interrupts exposed by the device.
@ -138,6 +150,23 @@ required:
additionalProperties: false
allOf:
- if:
properties:
compatible:
contains:
enum:
- nvidia,tegra194-smmu
then:
properties:
reg:
minItems: 2
maxItems: 2
else:
properties:
reg:
maxItems: 1
examples:
- |+
/* SMMU with stream matching or stream indexing */

Просмотреть файл

@ -58,6 +58,7 @@ Required properties:
- compatible : must be one of the following string:
"mediatek,mt2701-m4u" for mt2701 which uses generation one m4u HW.
"mediatek,mt2712-m4u" for mt2712 which uses generation two m4u HW.
"mediatek,mt6779-m4u" for mt6779 which uses generation two m4u HW.
"mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses
generation one m4u HW.
"mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
@ -78,6 +79,7 @@ Required properties:
Specifies the mtk_m4u_id as defined in
dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623
dt-binding/memory/mt2712-larb-port.h for mt2712,
dt-binding/memory/mt6779-larb-port.h for mt6779,
dt-binding/memory/mt8173-larb-port.h for mt8173, and
dt-binding/memory/mt8183-larb-port.h for mt8183.

Просмотреть файл

@ -36,6 +36,7 @@ properties:
- renesas,ipmmu-r8a774c0 # RZ/G2E
- renesas,ipmmu-r8a7795 # R-Car H3
- renesas,ipmmu-r8a7796 # R-Car M3-W
- renesas,ipmmu-r8a77961 # R-Car M3-W+
- renesas,ipmmu-r8a77965 # R-Car M3-N
- renesas,ipmmu-r8a77970 # R-Car V3M
- renesas,ipmmu-r8a77980 # R-Car V3H

Просмотреть файл

@ -5,7 +5,7 @@ The hardware block diagram please check bindings/iommu/mediatek,iommu.txt
Mediatek SMI have two generations of HW architecture, here is the list
which generation the SoCs use:
generation 1: mt2701 and mt7623.
generation 2: mt2712, mt8173 and mt8183.
generation 2: mt2712, mt6779, mt8173 and mt8183.
There's slight differences between the two SMI, for generation 2, the
register which control the iommu port is at each larb's register base. But
@ -18,6 +18,7 @@ Required properties:
- compatible : must be one of :
"mediatek,mt2701-smi-common"
"mediatek,mt2712-smi-common"
"mediatek,mt6779-smi-common"
"mediatek,mt7623-smi-common", "mediatek,mt2701-smi-common"
"mediatek,mt8173-smi-common"
"mediatek,mt8183-smi-common"
@ -35,7 +36,7 @@ Required properties:
and these 2 option clocks for generation 2 smi HW:
- "gals0": the path0 clock of GALS(Global Async Local Sync).
- "gals1": the path1 clock of GALS(Global Async Local Sync).
Here is the list which has this GALS: mt8183.
Here is the list which has this GALS: mt6779 and mt8183.
Example:
smi_common: smi@14022000 {

Просмотреть файл

@ -6,6 +6,7 @@ Required properties:
- compatible : must be one of :
"mediatek,mt2701-smi-larb"
"mediatek,mt2712-smi-larb"
"mediatek,mt6779-smi-larb"
"mediatek,mt7623-smi-larb", "mediatek,mt2701-smi-larb"
"mediatek,mt8173-smi-larb"
"mediatek,mt8183-smi-larb"
@ -21,7 +22,7 @@ Required properties:
- "gals": the clock for GALS(Global Async Local Sync).
Here is the list which has this GALS: mt8183.
Required property for mt2701, mt2712 and mt7623:
Required property for mt2701, mt2712, mt6779 and mt7623:
- mediatek,larb-id :the hardware id of this larb.
Example:

Просмотреть файл

@ -1505,7 +1505,7 @@ R: Robin Murphy <robin.murphy@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/iommu/arm,smmu*
F: drivers/iommu/arm-smmu*
F: drivers/iommu/arm/
F: drivers/iommu/io-pgtable-arm-v7s.c
F: drivers/iommu/io-pgtable-arm.c
@ -9102,6 +9102,7 @@ F: drivers/iommu/
F: include/linux/iommu.h
F: include/linux/iova.h
F: include/linux/of_iommu.h
F: include/uapi/linux/iommu.h
IO_URING
M: Jens Axboe <axboe@kernel.dk>
@ -16973,8 +16974,10 @@ F: drivers/i2c/busses/i2c-tegra.c
TEGRA IOMMU DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>
R: Krishna Reddy <vdumpa@nvidia.com>
L: linux-tegra@vger.kernel.org
S: Supported
F: drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
F: drivers/iommu/tegra*
TEGRA KBC DRIVER

Просмотреть файл

@ -9,9 +9,6 @@ struct dev_archdata {
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif

Просмотреть файл

@ -6,9 +6,6 @@
#define __ASM_DEVICE_H
struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
};
struct pdev_archdata {

Просмотреть файл

@ -6,9 +6,6 @@
#define _ASM_IA64_DEVICE_H
struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* hook for IOMMU specific extension */
#endif
};
struct pdev_archdata {

Просмотреть файл

@ -29,9 +29,6 @@ struct dev_archdata {
struct iommu_table *iommu_table_base;
#endif
#ifdef CONFIG_IOMMU_API
void *iommu_domain;
#endif
#ifdef CONFIG_PPC64
struct pci_dn *pci_data;
#endif

Просмотреть файл

@ -3,9 +3,6 @@
#define _ASM_X86_DEVICE_H
struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* hook for IOMMU specific extension */
#endif
};
struct pdev_archdata {

Просмотреть файл

@ -24,6 +24,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/iommu.h>
#include <drm/drm_managed.h>
@ -118,6 +119,9 @@ struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
struct pci_dev *pdev;
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
struct dev_iommu iommu;
#endif
int err;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@ -136,8 +140,10 @@ struct drm_i915_private *mock_gem_device(void)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
/* hack to disable iommu for the fake device; force identity mapping */
pdev->dev.archdata.iommu = (void *)-1;
/* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
memset(&iommu, 0, sizeof(iommu));
iommu.priv = (void *)-1;
pdev->dev.iommu = &iommu;
#endif
pci_set_drvdata(pdev, i915);

Просмотреть файл

@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
while (len) {
size_t pgsize = get_pgsize(iova | paddr, len);
ops->map(ops, iova, paddr, pgsize, prot);
ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
iova += pgsize;
paddr += pgsize;
len -= pgsize;

Просмотреть файл

@ -129,140 +129,8 @@ config MSM_IOMMU
If unsure, say N here.
config IOMMU_PGTABLES_L2
def_bool y
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
select SWIOTLB
select PCI_MSI
select PCI_ATS
select PCI_PRI
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
depends on X86_64 && PCI && ACPI
help
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
remapping of DMA memory accesses from devices. With an AMD IOMMU you
can isolate the DMA memory of different devices and protect the
system from misbehaving device drivers or hardware.
You can find out if your system has an AMD IOMMU if you look into
your BIOS for an option to enable it or if you have an IVRS ACPI
table.
config AMD_IOMMU_V2
tristate "AMD IOMMU Version 2 driver"
depends on AMD_IOMMU
select MMU_NOTIFIER
help
This option enables support for the AMD IOMMUv2 features of the IOMMU
hardware. Select this option if you want to use devices that support
the PCI PRI and PASID interface.
config AMD_IOMMU_DEBUGFS
bool "Enable AMD IOMMU internals in DebugFS"
depends on AMD_IOMMU && IOMMU_DEBUGFS
help
!!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
Exposes AMD IOMMU device internals in DebugFS.
This option is -NOT- intended for production environments, and should
not generally be enabled.
# Intel IOMMU support
config DMAR_TABLE
bool
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64)
select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
select DMAR_TABLE
select SWIOTLB
select IOASID
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
These DMA remapping devices are reported via ACPI tables
and include PCI device scope covered by these DMA
remapping devices.
config INTEL_IOMMU_DEBUGFS
bool "Export Intel IOMMU internals in Debugfs"
depends on INTEL_IOMMU && IOMMU_DEBUGFS
help
!!!WARNING!!!
DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
Expose Intel IOMMU internals in Debugfs.
This option is -NOT- intended for production environments, and should
only be enabled for debugging Intel IOMMU.
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86_64
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER
select IOASID
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
means of a Process Address Space ID (PASID).
config INTEL_IOMMU_DEFAULT_ON
def_bool y
prompt "Enable Intel DMA Remapping Devices by default"
depends on INTEL_IOMMU
help
Selecting this option will enable a DMAR device at boot time if
one is found. If this option is not selected, DMAR support can
be enabled by passing intel_iommu=on to the kernel.
config INTEL_IOMMU_BROKEN_GFX_WA
bool "Workaround broken graphics drivers (going away soon)"
depends on INTEL_IOMMU && BROKEN && X86
help
Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config
option permits the IOMMU driver to set a unity map for
all the OS-visible memory. Hence the driver can continue
to use physical addresses for DMA, at least until this
option is removed in the 2.6.32 kernel.
config INTEL_IOMMU_FLOPPY_WA
def_bool y
depends on INTEL_IOMMU && X86
help
Floppy disk drivers are known to bypass DMA API calls
thereby failing to work when IOMMU is enabled. This
workaround will setup a 1:1 mapping for the first
16MiB to make floppy (an ISA device) work.
config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
bool "Enable Intel IOMMU scalable mode by default"
depends on INTEL_IOMMU
help
Selecting this option will enable by default the scalable mode if
hardware presents the capability. The scalable mode is defined in
VT-d 3.0. The scalable mode capability could be checked by reading
/sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
is not selected, scalable mode support could also be enabled by
passing intel_iommu=sm_on to the kernel. If not sure, please use
the default value.
source "drivers/iommu/amd/Kconfig"
source "drivers/iommu/intel/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
@ -276,7 +144,6 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
help
@ -294,7 +161,6 @@ config OMAP_IOMMU_DEBUG
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC))
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@ -311,7 +177,6 @@ config SUN50I_IOMMU
depends on ARCH_SUNXI || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
select IOMMU_DMA
help
Support for the IOMMU introduced in the Allwinner H6 SoCs.
@ -338,7 +203,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on ARCH_EXYNOS || COMPILE_TEST
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
select IOMMU_API
select ARM_DMA_USE_IOMMU
@ -361,7 +226,6 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARM || IOMMU_DMA
depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
@ -383,7 +247,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU
depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@ -469,11 +333,9 @@ config S390_AP_IOMMU
config MTK_IOMMU
bool "MTK IOMMU Support"
depends on HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
select IOMMU_DMA
select IOMMU_IO_PGTABLE_ARMV7S
select MEMORY
select MTK_SMI

Просмотреть файл

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += amd/ intel/ arm/
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
@ -11,19 +12,8 @@ obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd/iommu.o amd/init.o amd/quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd/debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd/iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += intel/dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel/iommu.o intel/pasid.o
obj-$(CONFIG_INTEL_IOMMU) += intel/trace.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel/debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel/svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel/irq_remapping.o irq_remapping.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
@ -35,6 +25,5 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o

44
drivers/iommu/amd/Kconfig Normal file
Просмотреть файл

@ -0,0 +1,44 @@
# SPDX-License-Identifier: GPL-2.0-only
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
select SWIOTLB
select PCI_MSI
select PCI_ATS
select PCI_PRI
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
depends on X86_64 && PCI && ACPI
help
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
remapping of DMA memory accesses from devices. With an AMD IOMMU you
can isolate the DMA memory of different devices and protect the
system from misbehaving device drivers or hardware.
You can find out if your system has an AMD IOMMU if you look into
your BIOS for an option to enable it or if you have an IVRS ACPI
table.
config AMD_IOMMU_V2
tristate "AMD IOMMU Version 2 driver"
depends on AMD_IOMMU
select MMU_NOTIFIER
help
This option enables support for the AMD IOMMUv2 features of the IOMMU
hardware. Select this option if you want to use devices that support
the PCI PRI and PASID interface.
config AMD_IOMMU_DEBUGFS
bool "Enable AMD IOMMU internals in DebugFS"
depends on AMD_IOMMU && IOMMU_DEBUGFS
help
!!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
Exposes AMD IOMMU device internals in DebugFS.
This option is -NOT- intended for production environments, and should
not generally be enabled.

Просмотреть файл

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o

Просмотреть файл

@ -720,21 +720,14 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
static void __init free_ppr_log(struct amd_iommu *iommu)
{
if (iommu->ppr_log == NULL)
return;
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
if (iommu->ga_log)
free_pages((unsigned long)iommu->ga_log,
get_order(GA_LOG_SIZE));
if (iommu->ga_log_tail)
free_pages((unsigned long)iommu->ga_log_tail,
get_order(8));
free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
#endif
}
@ -1842,7 +1835,7 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pci_info(pdev, "Extended features (%#llx):\n",
pci_info(pdev, "Extended features (%#llx):",
iommu->features);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))

Просмотреть файл

@ -162,7 +162,18 @@ static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
}
static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
{
atomic64_set(&domain->pt_root, root);
}
static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
{
amd_iommu_domain_set_pt_root(domain, 0);
}
static void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode)
{
u64 pt_root;
@ -170,7 +181,7 @@ static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
pt_root = mode & 7;
pt_root |= (u64)root;
return pt_root;
amd_iommu_domain_set_pt_root(domain, pt_root);
}
static struct iommu_dev_data *alloc_dev_data(u16 devid)
@ -1410,7 +1421,7 @@ static bool increase_address_space(struct protection_domain *domain,
struct domain_pgtable pgtable;
unsigned long flags;
bool ret = true;
u64 *pte, root;
u64 *pte;
spin_lock_irqsave(&domain->lock, flags);
@ -1438,8 +1449,7 @@ static bool increase_address_space(struct protection_domain *domain,
* Device Table needs to be updated and flushed before the new root can
* be published.
*/
root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
atomic64_set(&domain->pt_root, root);
amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
ret = true;
@ -2319,7 +2329,7 @@ static void protection_domain_free(struct protection_domain *domain)
domain_id_free(domain->id);
amd_iommu_domain_get_pgtable(domain, &pgtable);
atomic64_set(&domain->pt_root, 0);
amd_iommu_domain_clr_pt_root(domain);
free_pagetable(&pgtable);
kfree(domain);
@ -2327,7 +2337,7 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain, int mode)
{
u64 *pt_root = NULL, root;
u64 *pt_root = NULL;
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
@ -2343,8 +2353,7 @@ static int protection_domain_init(struct protection_domain *domain, int mode)
return -ENOMEM;
}
root = amd_iommu_domain_encode_pgtable(pt_root, mode);
atomic64_set(&domain->pt_root, root);
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
@ -2713,8 +2722,8 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
/* First save pgtable configuration*/
amd_iommu_domain_get_pgtable(domain, &pgtable);
/* Update data structure */
atomic64_set(&domain->pt_root, 0);
/* Remove page-table from domain */
amd_iommu_domain_clr_pt_root(domain);
/* Make changes visible to IOMMUs */
update_domain(domain);

Просмотреть файл

@ -0,0 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += arm-smmu/ arm-smmu-v3/

Просмотреть файл

@ -0,0 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o

Просмотреть файл

@ -1479,7 +1479,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
}
/*
* Try to unlock the cmq lock. This will fail if we're the last
* Try to unlock the cmdq lock. This will fail if we're the last
* reader, in which case we can safely update cmdq->q.llq.cons
*/
if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
@ -2850,7 +2850,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
if (!ops)
return -ENODEV;
return ops->map(ops, iova, paddr, size, prot);
return ops->map(ops, iova, paddr, size, prot, gfp);
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,

Просмотреть файл

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o

Просмотреть файл

@ -147,16 +147,57 @@ static const struct arm_smmu_impl arm_mmu500_impl = {
.reset = arm_mmu500_reset,
};
static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
{
/*
* Marvell Armada-AP806 erratum #582743.
* Split all the readq to double readl
*/
return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
}
static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
u64 val)
{
/*
* Marvell Armada-AP806 erratum #582743.
* Split all the writeq to double writel
*/
hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
}
static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
{
/*
* Armada-AP806 erratum #582743.
* Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
* formats altogether and allow using 32 bits access on the
* interconnect.
*/
smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
ARM_SMMU_FEAT_FMT_AARCH64_16K |
ARM_SMMU_FEAT_FMT_AARCH64_64K);
return 0;
}
static const struct arm_smmu_impl mrvl_mmu500_impl = {
.read_reg64 = mrvl_mmu500_readq,
.write_reg64 = mrvl_mmu500_writeq,
.cfg_probe = mrvl_mmu500_cfg_probe,
.reset = arm_mmu500_reset,
};
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
{
const struct device_node *np = smmu->dev->of_node;
/*
* We will inevitably have to combine model-specific implementation
* quirks with platform-specific integration quirks, but everything
* we currently support happens to work out as straightforward
* mutually-exclusive assignments.
* Set the impl for model-specific implementation quirks first,
* such that platform integration quirks can pick it up and
* inherit from it if necessary.
*/
switch (smmu->model) {
case ARM_MMU500:
@ -168,12 +209,21 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
break;
}
/* This is implicitly MMU-400 */
if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
return nvidia_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
of_device_is_compatible(np, "qcom,sc7180-smmu-500"))
of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
return qcom_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
smmu->impl = &mrvl_mmu500_impl;
return smmu;
}

Просмотреть файл

@ -0,0 +1,278 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved.
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "arm-smmu.h"
/*
* Tegra194 has three ARM MMU-500 Instances.
* Two of them are used together and must be programmed identically for
* interleaved IOVA accesses across them and translates accesses from
* non-isochronous HW devices.
* Third one is used for translating accesses from isochronous HW devices.
* This implementation supports programming of the two instances that must
* be programmed identically.
* The third instance usage is through standard arm-smmu driver itself and
* is out of scope of this implementation.
*/
#define NUM_SMMU_INSTANCES 2
struct nvidia_smmu {
struct arm_smmu_device smmu;
void __iomem *bases[NUM_SMMU_INSTANCES];
};
static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
unsigned int inst, int page)
{
struct nvidia_smmu *nvidia_smmu;
nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
}
static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
int page, int offset)
{
void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
return readl_relaxed(reg);
}
static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
int page, int offset, u32 val)
{
unsigned int i;
for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
writel_relaxed(val, reg);
}
}
static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
int page, int offset)
{
void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
return readq_relaxed(reg);
}
static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
int page, int offset, u64 val)
{
unsigned int i;
for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
writeq_relaxed(val, reg);
}
}
static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
int sync, int status)
{
unsigned int delay;
arm_smmu_writel(smmu, page, sync, 0);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
unsigned int spin_cnt;
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
u32 val = 0;
unsigned int i;
for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
void __iomem *reg;
reg = nvidia_smmu_page(smmu, i, page) + status;
val |= readl_relaxed(reg);
}
if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
return;
cpu_relax();
}
udelay(delay);
}
dev_err_ratelimited(smmu->dev,
"TLB sync timed out -- SMMU may be deadlocked\n");
}
static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
{
unsigned int i;
for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
u32 val;
void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
ARM_SMMU_GR0_sGFSR;
/* clear global FSR */
val = readl_relaxed(reg);
writel_relaxed(val, reg);
}
return 0;
}
static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
struct arm_smmu_device *smmu,
int inst)
{
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
if (!gfsr)
return IRQ_NONE;
gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
dev_err_ratelimited(smmu->dev,
"Unexpected global fault, this could be serious\n");
dev_err_ratelimited(smmu->dev,
"\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
gfsr, gfsynr0, gfsynr1, gfsynr2);
writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
return IRQ_HANDLED;
}
static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
{
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu = dev;
for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
irqreturn_t irq_ret;
irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
if (irq_ret == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
return ret;
}
static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
struct arm_smmu_device *smmu,
int idx, int inst)
{
u32 fsr, fsynr, cbfrsynra;
unsigned long iova;
void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
return IRQ_HANDLED;
}
static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
{
int idx;
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain;
smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
smmu = smmu_domain->smmu;
for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
irqreturn_t irq_ret;
/*
* Interrupt line is shared between all contexts.
* Check for faults across all contexts.
*/
for (idx = 0; idx < smmu->num_context_banks; idx++) {
irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
idx, inst);
if (irq_ret == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
}
return ret;
}
static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nvidia_smmu_read_reg,
.write_reg = nvidia_smmu_write_reg,
.read_reg64 = nvidia_smmu_read_reg64,
.write_reg64 = nvidia_smmu_write_reg64,
.reset = nvidia_smmu_reset,
.tlb_sync = nvidia_smmu_tlb_sync,
.global_fault = nvidia_smmu_global_fault,
.context_fault = nvidia_smmu_context_fault,
};
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
{
struct resource *res;
struct device *dev = smmu->dev;
struct nvidia_smmu *nvidia_smmu;
struct platform_device *pdev = to_platform_device(dev);
nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
if (!nvidia_smmu)
return ERR_PTR(-ENOMEM);
/*
* Copy the data from struct arm_smmu_device *smmu allocated in
* arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
* pointer used in arm-smmu.c once this function returns.
* This is necessary to derive nvidia_smmu from smmu pointer passed
* through arm_smmu_impl function calls subsequently.
*/
nvidia_smmu->smmu = *smmu;
/* Instance 0 is ioremapped by arm-smmu.c. */
nvidia_smmu->bases[0] = smmu->base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return ERR_PTR(-ENODEV);
nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
if (IS_ERR(nvidia_smmu->bases[1]))
return ERR_CAST(nvidia_smmu->bases[1]);
nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
/*
* Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
* Once this function returns, arm-smmu.c would use arm_smmu_device
* allocated as part of struct nvidia_smmu.
*/
devm_kfree(dev, smmu);
return &nvidia_smmu->smmu;
}

Просмотреть файл

@ -52,9 +52,6 @@
*/
#define QCOM_DUMMY_VAL -1
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
#define TLB_SPIN_COUNT 10
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@ -673,6 +670,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
irqreturn_t (*context_fault)(int irq, void *dev);
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
@ -835,7 +833,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
if (smmu->impl && smmu->impl->context_fault)
context_fault = smmu->impl->context_fault;
else
context_fault = arm_smmu_context_fault;
ret = devm_request_irq(smmu->dev, irq, context_fault,
IRQF_SHARED, "arm-smmu-context-fault", domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
@ -1227,7 +1231,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
arm_smmu_rpm_get(smmu);
ret = ops->map(ops, iova, paddr, size, prot);
ret = ops->map(ops, iova, paddr, size, prot, gfp);
arm_smmu_rpm_put(smmu);
return ret;
@ -1728,7 +1732,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned int size;
u32 id;
bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
int i;
int i, ret;
dev_notice(smmu->dev, "probing hardware configuration...\n");
dev_notice(smmu->dev, "SMMUv%d with:\n",
@ -1891,6 +1895,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
}
if (smmu->impl && smmu->impl->cfg_probe) {
ret = smmu->impl->cfg_probe(smmu);
if (ret)
return ret;
}
/* Now we've corralled the various formats, what'll it do? */
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
@ -1918,9 +1928,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
smmu->ipa_size, smmu->pa_size);
if (smmu->impl && smmu->impl->cfg_probe)
return smmu->impl->cfg_probe(smmu);
return 0;
}
@ -1946,6 +1953,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
{ .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
{ },
};
@ -2107,6 +2115,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
irqreturn_t (*global_fault)(int irq, void *dev);
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@ -2123,10 +2132,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
smmu = arm_smmu_impl_init(smmu);
if (IS_ERR(smmu))
return PTR_ERR(smmu);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
@ -2138,6 +2143,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
*/
smmu->numpage = resource_size(res);
smmu = arm_smmu_impl_init(smmu);
if (IS_ERR(smmu))
return PTR_ERR(smmu);
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
@ -2193,9 +2202,14 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->num_context_irqs = smmu->num_context_banks;
}
if (smmu->impl && smmu->impl->global_fault)
global_fault = smmu->impl->global_fault;
else
global_fault = arm_smmu_global_fault;
for (i = 0; i < smmu->num_global_irqs; ++i) {
err = devm_request_irq(smmu->dev, smmu->irqs[i],
arm_smmu_global_fault,
global_fault,
IRQF_SHARED,
"arm-smmu global fault",
smmu);

Просмотреть файл

@ -18,6 +18,7 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@ -236,6 +237,8 @@ enum arm_smmu_cbar_type {
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
#define TLB_SPIN_COUNT 10
/* Shared driver definitions */
enum arm_smmu_arch_version {
@ -387,6 +390,8 @@ struct arm_smmu_impl {
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
int (*def_domain_type)(struct device *dev);
irqreturn_t (*global_fault)(int irq, void *dev);
irqreturn_t (*context_fault)(int irq, void *dev);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@ -450,6 +455,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
int arm_mmu500_reset(struct arm_smmu_device *smmu);

Просмотреть файл

@ -37,14 +37,20 @@
#define SMMU_INTR_SEL_NS 0x2000
enum qcom_iommu_clk {
CLK_IFACE,
CLK_BUS,
CLK_TBU,
CLK_NUM,
};
struct qcom_iommu_ctx;
struct qcom_iommu_dev {
/* IOMMU core code handle */
struct iommu_device iommu;
struct device *dev;
struct clk *iface_clk;
struct clk *bus_clk;
struct clk_bulk_data clks[CLK_NUM];
void __iomem *local_base;
u32 sec_id;
u8 num_ctxs;
@ -301,7 +307,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
ARM_SMMU_SCTLR_CFCFG;
if (IS_ENABLED(CONFIG_BIG_ENDIAN))
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
reg |= ARM_SMMU_SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
@ -438,7 +444,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
ret = ops->map(ops, iova, paddr, size, prot);
ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
return ret;
}
@ -613,32 +619,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
};
static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
{
int ret;
ret = clk_prepare_enable(qcom_iommu->iface_clk);
if (ret) {
dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
return ret;
}
ret = clk_prepare_enable(qcom_iommu->bus_clk);
if (ret) {
dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
clk_disable_unprepare(qcom_iommu->iface_clk);
return ret;
}
return 0;
}
static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
{
clk_disable_unprepare(qcom_iommu->bus_clk);
clk_disable_unprepare(qcom_iommu->iface_clk);
}
static int qcom_iommu_sec_ptbl_init(struct device *dev)
{
size_t psize = 0;
@ -795,6 +775,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
struct qcom_iommu_dev *qcom_iommu;
struct device *dev = &pdev->dev;
struct resource *res;
struct clk *clk;
int ret, max_asid = 0;
/* find the max asid (which is 1:1 to ctx bank idx), so we know how
@ -817,17 +798,26 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
return PTR_ERR(qcom_iommu->local_base);
}
qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
if (IS_ERR(qcom_iommu->iface_clk)) {
clk = devm_clk_get(dev, "iface");
if (IS_ERR(clk)) {
dev_err(dev, "failed to get iface clock\n");
return PTR_ERR(qcom_iommu->iface_clk);
return PTR_ERR(clk);
}
qcom_iommu->clks[CLK_IFACE].clk = clk;
qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
if (IS_ERR(qcom_iommu->bus_clk)) {
clk = devm_clk_get(dev, "bus");
if (IS_ERR(clk)) {
dev_err(dev, "failed to get bus clock\n");
return PTR_ERR(qcom_iommu->bus_clk);
return PTR_ERR(clk);
}
qcom_iommu->clks[CLK_BUS].clk = clk;
clk = devm_clk_get_optional(dev, "tbu");
if (IS_ERR(clk)) {
dev_err(dev, "failed to get tbu clock\n");
return PTR_ERR(clk);
}
qcom_iommu->clks[CLK_TBU].clk = clk;
if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
&qcom_iommu->sec_id)) {
@ -899,14 +889,14 @@ static int __maybe_unused qcom_iommu_resume(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
return qcom_iommu_enable_clocks(qcom_iommu);
return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
}
static int __maybe_unused qcom_iommu_suspend(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
qcom_iommu_disable_clocks(qcom_iommu);
clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
return 0;
}

Просмотреть файл

@ -173,7 +173,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define REG_V5_FAULT_AR_VA 0x070
#define REG_V5_FAULT_AW_VA 0x080
#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
static struct device *dma_dev;
static struct kmem_cache *lv2table_kmem_cache;
@ -226,7 +226,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
};
/*
* This structure is attached to dev.archdata.iommu of the master device
* This structure is attached to dev->iommu->priv of the master device
* on device add, contains a list of SYSMMU controllers defined by device tree,
* which are bound to given master device. It is usually referenced by 'owner'
* pointer.
@ -670,7 +670,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct device *master = data->master;
if (master) {
struct exynos_iommu_owner *owner = master->archdata.iommu;
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (data->domain) {
@ -688,7 +688,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct device *master = data->master;
if (master) {
struct exynos_iommu_owner *owner = master->archdata.iommu;
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (data->domain) {
@ -721,7 +721,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
}
};
static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
@ -837,8 +837,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
struct sysmmu_drvdata *data, *next;
unsigned long flags;
@ -875,8 +875,8 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags;
@ -933,7 +933,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
if (!pent)
return ERR_PTR(-ENOMEM);
update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
@ -994,7 +994,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
update_pte(sent, mk_lv1ent_sect(paddr, prot));
exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@ -1018,7 +1018,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
update_pte(pent, mk_lv2ent_spage(paddr, prot));
exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
@ -1150,7 +1150,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
/* workaround for h/w bug in System MMU v3.3 */
update_pte(ent, ZERO_LV2LINK);
exynos_iommu_set_pte(ent, ZERO_LV2LINK);
size = SECT_SIZE;
goto done;
}
@ -1171,7 +1171,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
if (lv2ent_small(ent)) {
update_pte(ent, 0);
exynos_iommu_set_pte(ent, 0);
size = SPAGE_SIZE;
domain->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
@ -1237,7 +1237,7 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
@ -1263,7 +1263,7 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
static void exynos_iommu_release_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
@ -1287,8 +1287,8 @@ static void exynos_iommu_release_device(struct device *dev)
static int exynos_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data, *entry;
if (!sysmmu)
@ -1305,7 +1305,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
dev->archdata.iommu = owner;
dev_iommu_priv_set(dev, owner);
}
list_for_each_entry(entry, &owner->controllers, owner_node)

Просмотреть файл

@ -1174,10 +1174,7 @@ error:
if (irq != NO_IRQ)
free_irq(irq, data);
if (data) {
memset(data, 0, sizeof(struct pamu_isr_data));
kfree(data);
}
kzfree(data);
if (pamu_regs)
iounmap(pamu_regs);

Просмотреть файл

@ -323,7 +323,7 @@ static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
pamu_disable_liodn(info->liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
spin_lock_irqsave(&device_domain_lock, flags);
info->dev->archdata.iommu_domain = NULL;
dev_iommu_priv_set(info->dev, NULL);
kmem_cache_free(iommu_devinfo_cache, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@ -352,7 +352,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* Check here if the device is already attached to domain or not.
* If the device is already attached to a domain detach it.
*/
old_domain_info = dev->archdata.iommu_domain;
old_domain_info = dev_iommu_priv_get(dev);
if (old_domain_info && old_domain_info->domain != dma_domain) {
spin_unlock_irqrestore(&device_domain_lock, flags);
detach_device(dev, old_domain_info->domain);
@ -371,8 +371,8 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* the info for the first LIODN as all
* LIODNs share the same domain
*/
if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info;
if (!dev_iommu_priv_get(dev))
dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}

Просмотреть файл

@ -0,0 +1,87 @@
# SPDX-License-Identifier: GPL-2.0-only
# Intel IOMMU support
config DMAR_TABLE
bool
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64)
select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
select DMAR_TABLE
select SWIOTLB
select IOASID
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
These DMA remapping devices are reported via ACPI tables
and include PCI device scope covered by these DMA
remapping devices.
config INTEL_IOMMU_DEBUGFS
bool "Export Intel IOMMU internals in Debugfs"
depends on INTEL_IOMMU && IOMMU_DEBUGFS
help
!!!WARNING!!!
DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
Expose Intel IOMMU internals in Debugfs.
This option is -NOT- intended for production environments, and should
only be enabled for debugging Intel IOMMU.
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86_64
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER
select IOASID
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
means of a Process Address Space ID (PASID).
config INTEL_IOMMU_DEFAULT_ON
def_bool y
prompt "Enable Intel DMA Remapping Devices by default"
depends on INTEL_IOMMU
help
Selecting this option will enable a DMAR device at boot time if
one is found. If this option is not selected, DMAR support can
be enabled by passing intel_iommu=on to the kernel.
config INTEL_IOMMU_BROKEN_GFX_WA
bool "Workaround broken graphics drivers (going away soon)"
depends on INTEL_IOMMU && BROKEN && X86
help
Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config
option permits the IOMMU driver to set a unity map for
all the OS-visible memory. Hence the driver can continue
to use physical addresses for DMA, at least until this
option is removed in the 2.6.32 kernel.
config INTEL_IOMMU_FLOPPY_WA
def_bool y
depends on INTEL_IOMMU && X86
help
Floppy disk drivers are known to bypass DMA API calls
thereby failing to work when IOMMU is enabled. This
workaround will setup a 1:1 mapping for the first
16MiB to make floppy (an ISA device) work.
config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
bool "Enable Intel IOMMU scalable mode by default"
depends on INTEL_IOMMU
help
Selecting this option will enable by default the scalable mode if
hardware presents the capability. The scalable mode is defined in
VT-d 3.0. The scalable mode capability could be checked by reading
/sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
is not selected, scalable mode support could also be enabled by
passing intel_iommu=sm_on to the kernel. If not sure, please use
the default value.

Просмотреть файл

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
obj-$(CONFIG_INTEL_IOMMU) += trace.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o

Просмотреть файл

@ -15,7 +15,7 @@
#include <asm/irq_remapping.h>
#include "intel-pasid.h"
#include "pasid.h"
struct tbl_walk {
u16 bus;

Просмотреть файл

@ -1102,6 +1102,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
drhd->iommu = iommu;
iommu->drhd = drhd;
return 0;
@ -1438,8 +1439,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
/* PASID-based device IOTLB Invalidate */
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr,
unsigned int size_order, u64 granu)
u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
{
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
@ -1447,7 +1447,6 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID(pfsid);
desc.qw1 = QI_DEV_EIOTLB_GLOB(granu);
/*
* If S bit is 0, we only flush a single page. If S bit is set,
@ -1458,9 +1457,26 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
desc.qw1 |= addr & ~mask;
if (size_order)
if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order);
/* Take page address */
desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
if (size_order) {
/*
* Existing 0s in address below size_order may be the least
* significant bit, we must set them to 1s to avoid having
* smaller size than desired.
*/
desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
VTD_PAGE_SHIFT);
/* Clear size_order bit to indicate size */
desc.qw1 &= ~mask;
/* Set the S bit to indicate flushing more than 1 page */
desc.qw1 |= QI_DEV_EIOTLB_SIZE;
}
qi_submit_sync(iommu, &desc, 1, 0);
}

Просмотреть файл

@ -48,7 +48,7 @@
#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
#include "intel-pasid.h"
#include "pasid.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@ -356,6 +356,7 @@ static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int intel_no_bounce;
static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
@ -372,7 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev)
if (!dev)
return NULL;
info = dev->archdata.iommu;
info = dev_iommu_priv_get(dev);
if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
info == DEFER_DEVICE_DOMAIN_INFO))
return NULL;
@ -743,12 +744,12 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
static int iommu_dummy(struct device *dev)
{
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
}
static bool attach_deferred(struct device *dev)
{
return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
}
/**
@ -778,16 +779,16 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
return false;
}
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct pci_dev *pdev = NULL;
struct intel_iommu *iommu;
struct device *tmp;
struct pci_dev *pdev = NULL;
u16 segment = 0;
int i;
if (iommu_dummy(dev))
if (!dev || iommu_dummy(dev))
return NULL;
if (dev_is_pci(dev)) {
@ -818,8 +819,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (pdev && pdev->is_virtfn)
goto got_pdev;
*bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn;
if (bus && devfn) {
*bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn;
}
goto out;
}
@ -829,8 +832,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (pdev && drhd->include_all) {
got_pdev:
*bus = pdev->bus->number;
*devfn = pdev->devfn;
if (bus && devfn) {
*bus = pdev->bus->number;
*devfn = pdev->devfn;
}
goto out;
}
}
@ -1629,6 +1634,10 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
u32 sts;
unsigned long flag;
if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
(cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@ -2420,7 +2429,7 @@ static inline void unlink_domain_info(struct device_domain_info *info)
list_del(&info->link);
list_del(&info->global);
if (info->dev)
info->dev->archdata.iommu = NULL;
dev_iommu_priv_set(info->dev, NULL);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@ -2453,7 +2462,7 @@ static void do_deferred_attach(struct device *dev)
{
struct iommu_domain *domain;
dev->archdata.iommu = NULL;
dev_iommu_priv_set(dev, NULL);
domain = iommu_get_domain_for_dev(dev);
if (domain)
intel_iommu_attach_device(domain, dev);
@ -2599,7 +2608,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
if (dev)
dev->archdata.iommu = info;
dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
@ -4004,7 +4013,7 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
@ -4039,11 +4048,12 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
}
}
}
@ -5146,11 +5156,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
struct device *dev)
{
int ret;
u8 bus, devfn;
unsigned long flags;
struct intel_iommu *iommu;
iommu = device_to_iommu(dev, &bus, &devfn);
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
@ -5236,9 +5245,8 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu;
int addr_width;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
@ -5416,7 +5424,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
sid = PCI_DEVID(bus, devfn);
/* Size is only valid in address selective invalidation */
if (inv_info->granularity != IOMMU_INV_GRANU_PASID)
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
size = to_vtd_size(inv_info->addr_info.granule_size,
inv_info->addr_info.nb_granules);
@ -5425,6 +5433,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
IOMMU_CACHE_INV_TYPE_NR) {
int granu = 0;
u64 pasid = 0;
u64 addr = 0;
granu = to_vtd_granularity(cache_type, inv_info->granularity);
if (granu == -EINVAL) {
@ -5446,13 +5455,12 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
switch (BIT(cache_type)) {
case IOMMU_CACHE_INV_TYPE_IOTLB:
/* HW will ignore LSB bits based on address mask */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
size &&
(inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
inv_info->addr_info.addr, size);
ret = -ERANGE;
goto out_unlock;
}
/*
@ -5464,25 +5472,35 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
if (!info->ats_enabled)
break;
/*
* Always flush device IOTLB if ATS is enabled. vIOMMU
* in the guest may assume IOTLB flush is inclusive,
* which is more efficient.
*/
if (info->ats_enabled)
qi_flush_dev_iotlb_pasid(iommu, sid,
info->pfsid, pasid,
info->ats_qdep,
inv_info->addr_info.addr,
size, granu);
break;
fallthrough;
case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
/*
* PASID based device TLB invalidation does not support
* IOMMU_INV_GRANU_PASID granularity but only supports
* IOMMU_INV_GRANU_ADDR.
* The equivalent of that is we set the size to be the
* entire range of 64 bit. User only provides PASID info
* without address info. So we set addr to 0.
*/
if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
size = 64 - VTD_PAGE_SHIFT;
addr = 0;
} else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
addr = inv_info->addr_info.addr;
}
if (info->ats_enabled)
qi_flush_dev_iotlb_pasid(iommu, sid,
info->pfsid, pasid,
info->ats_qdep,
inv_info->addr_info.addr,
size, granu);
info->ats_qdep, addr,
size);
else
pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
break;
@ -5658,14 +5676,13 @@ static bool intel_iommu_capable(enum iommu_cap cap)
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return ERR_PTR(-ENODEV);
if (translation_pre_enabled(iommu))
dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
return &iommu->iommu;
}
@ -5673,9 +5690,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
static void intel_iommu_release_device(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return;
@ -5825,37 +5841,14 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
return generic_device_group(dev);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
if (iommu_dummy(dev)) {
dev_warn(dev,
"No IOMMU translation for device; cannot enable SVM\n");
return NULL;
}
iommu = device_to_iommu(dev, &bus, &devfn);
if ((!iommu)) {
dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
return NULL;
}
return iommu;
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
static int intel_iommu_enable_auxd(struct device *dev)
{
struct device_domain_info *info;
struct intel_iommu *iommu;
unsigned long flags;
u8 bus, devfn;
int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu || dmar_disabled)
return -EINVAL;
@ -6080,6 +6073,7 @@ const struct iommu_ops intel_iommu_ops = {
.sva_bind = intel_svm_bind,
.sva_unbind = intel_svm_unbind,
.sva_get_pasid = intel_svm_get_pasid,
.page_response = intel_svm_page_response,
#endif
};
@ -6182,6 +6176,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_g
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
{
unsigned short ver;
if (!IS_GFX_DEVICE(dev))
return;
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
ver != 0x9a)
return;
if (risky_device(dev))
return;
pci_info(dev, "Skip IOMMU disabling for graphics\n");
iommu_skip_te_disable = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do

Просмотреть файл

@ -19,7 +19,7 @@
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
#include "intel-pasid.h"
#include "pasid.h"
/*
* Intel IOMMU system wide PASID name space:
@ -486,7 +486,16 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
qdep = info->ats_qdep;
pfsid = info->pfsid;
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
/*
* When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
* devTLB flush w/o PASID should be used. For non-zero PASID under
* SVA usage, device could do DMA with multiple PASIDs. It is more
* efficient to flush devTLB specific to the PASID.
*/
if (pasid == PASID_RID2PASID)
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
else
qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
}
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,

Просмотреть файл

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* intel-pasid.h - PASID idr, table and entry header
* pasid.h - PASID idr, table and entry header
*
* Copyright (C) 2018 Intel Corporation
*

Просмотреть файл

@ -20,7 +20,7 @@
#include <linux/ioasid.h>
#include <asm/page.h>
#include "intel-pasid.h"
#include "pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, int pasid);
@ -228,13 +228,57 @@ static LIST_HEAD(global_svm_list);
list_for_each_entry((sdev), &(svm)->devs, list) \
if ((d) != (sdev)->dev) {} else
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
struct intel_svm **rsvm,
struct intel_svm_dev **rsdev)
{
struct intel_svm_dev *d, *sdev = NULL;
struct intel_svm *svm;
/* The caller should hold the pasid_mutex lock */
if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
return -EINVAL;
if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
return -EINVAL;
svm = ioasid_find(NULL, pasid, NULL);
if (IS_ERR(svm))
return PTR_ERR(svm);
if (!svm)
goto out;
/*
* If we found svm for the PASID, there must be at least one device
* bond.
*/
if (WARN_ON(list_empty(&svm->devs)))
return -EINVAL;
rcu_read_lock();
list_for_each_entry_rcu(d, &svm->devs, list) {
if (d->dev == dev) {
sdev = d;
break;
}
}
rcu_read_unlock();
out:
*rsvm = svm;
*rsdev = sdev;
return 0;
}
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct iommu_gpasid_bind_data *data)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev = NULL;
struct dmar_domain *dmar_domain;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
struct intel_svm *svm = NULL;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@ -261,39 +305,23 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
dmar_domain = to_dmar_domain(domain);
mutex_lock(&pasid_mutex);
svm = ioasid_find(NULL, data->hpasid, NULL);
if (IS_ERR(svm)) {
ret = PTR_ERR(svm);
ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
if (ret)
goto out;
if (sdev) {
/*
* Do not allow multiple bindings of the same device-PASID since
* there is only one SL page tables per PASID. We may revisit
* once sharing PGD across domains are supported.
*/
dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
svm->pasid);
ret = -EBUSY;
goto out;
}
if (svm) {
/*
* If we found svm for the PASID, there must be at
* least one device bond, otherwise svm should be freed.
*/
if (WARN_ON(list_empty(&svm->devs))) {
ret = -EINVAL;
goto out;
}
for_each_svm_dev(sdev, svm, dev) {
/*
* For devices with aux domains, we should allow
* multiple bind calls with the same PASID and pdev.
*/
if (iommu_dev_feature_enabled(dev,
IOMMU_DEV_FEAT_AUX)) {
sdev->users++;
} else {
dev_warn_ratelimited(dev,
"Already bound with PASID %u\n",
svm->pasid);
ret = -EBUSY;
}
goto out;
}
} else {
if (!svm) {
/* We come here when PASID has never been bond to a device. */
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm) {
@ -373,28 +401,20 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
int intel_svm_unbind_gpasid(struct device *dev, int pasid)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev;
struct intel_svm *svm;
int ret = -EINVAL;
int ret;
if (WARN_ON(!iommu))
return -EINVAL;
mutex_lock(&pasid_mutex);
svm = ioasid_find(NULL, pasid, NULL);
if (!svm) {
ret = -EINVAL;
ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
if (ret)
goto out;
}
if (IS_ERR(svm)) {
ret = PTR_ERR(svm);
goto out;
}
for_each_svm_dev(sdev, svm, dev) {
ret = 0;
if (sdev) {
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
sdev->users--;
if (!sdev->users) {
@ -418,7 +438,6 @@ int intel_svm_unbind_gpasid(struct device *dev, int pasid)
kfree(svm);
}
}
break;
}
out:
mutex_unlock(&pasid_mutex);
@ -430,7 +449,7 @@ static int
intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
struct mm_struct *mm, struct intel_svm_dev **sd)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
@ -596,7 +615,7 @@ success:
if (sd)
*sd = sdev;
ret = 0;
out:
out:
return ret;
}
@ -608,21 +627,15 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
struct intel_svm *svm;
int ret = -EINVAL;
iommu = intel_svm_device_to_iommu(dev);
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
goto out;
svm = ioasid_find(NULL, pasid, NULL);
if (!svm)
ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
if (ret)
goto out;
if (IS_ERR(svm)) {
ret = PTR_ERR(svm);
goto out;
}
for_each_svm_dev(sdev, svm, dev) {
ret = 0;
if (sdev) {
sdev->users--;
if (!sdev->users) {
list_del_rcu(&sdev->list);
@ -651,10 +664,8 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree(svm);
}
}
break;
}
out:
out:
return ret;
}
@ -800,8 +811,63 @@ qi_retry:
}
}
static int prq_to_iommu_prot(struct page_req_dsc *req)
{
int prot = 0;
if (req->rd_req)
prot |= IOMMU_FAULT_PERM_READ;
if (req->wr_req)
prot |= IOMMU_FAULT_PERM_WRITE;
if (req->exe_req)
prot |= IOMMU_FAULT_PERM_EXEC;
if (req->pm_req)
prot |= IOMMU_FAULT_PERM_PRIV;
return prot;
}
static int
intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
{
struct iommu_fault_event event;
if (!dev || !dev_is_pci(dev))
return -ENODEV;
/* Fill in event data for device specific processing */
memset(&event, 0, sizeof(struct iommu_fault_event));
event.fault.type = IOMMU_FAULT_PAGE_REQ;
event.fault.prm.addr = desc->addr;
event.fault.prm.pasid = desc->pasid;
event.fault.prm.grpid = desc->prg_index;
event.fault.prm.perm = prq_to_iommu_prot(desc);
if (desc->lpig)
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
if (desc->pasid_present) {
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
}
if (desc->priv_data_present) {
/*
* Set last page in group bit if private data is present,
* page response is required as it does for LPIG.
* iommu_report_device_fault() doesn't understand this vendor
* specific requirement thus we set last_page as a workaround.
*/
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
memcpy(event.fault.prm.private_data, desc->priv_data,
sizeof(desc->priv_data));
}
return iommu_report_device_fault(dev, &event);
}
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
@ -813,7 +879,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) {
struct intel_svm_dev *sdev;
struct vm_area_struct *vma;
struct page_req_dsc *req;
struct qi_desc resp;
@ -849,6 +914,20 @@ static irqreturn_t prq_event_thread(int irq, void *d)
}
}
if (!sdev || sdev->sid != req->rid) {
struct intel_svm_dev *t;
sdev = NULL;
rcu_read_lock();
list_for_each_entry_rcu(t, &svm->devs, list) {
if (t->sid == req->rid) {
sdev = t;
break;
}
}
rcu_read_unlock();
}
result = QI_RESP_INVALID;
/* Since we're using init_mm.pgd directly, we should never take
* any faults on kernel addresses. */
@ -859,6 +938,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!is_canonical_address(address))
goto bad_req;
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
if (svm->flags & SVM_FLAG_GUEST_MODE) {
if (sdev && !intel_svm_prq_report(sdev->dev, req))
goto prq_advance;
else
goto bad_req;
}
/* If the mm is already defunct, don't handle faults. */
if (!mmget_not_zero(svm->mm))
goto bad_req;
@ -877,24 +967,11 @@ static irqreturn_t prq_event_thread(int irq, void *d)
goto invalid;
result = QI_RESP_SUCCESS;
invalid:
invalid:
mmap_read_unlock(svm->mm);
mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
if (sdev->sid == req->rid)
break;
}
/* Other devices can go away, but the drivers are not permitted
* to unbind while any page faults might be in flight. So it's
* OK to drop the 'lock' here now we have it. */
rcu_read_unlock();
if (WARN_ON(&sdev->list == &svm->devs))
sdev = NULL;
bad_req:
WARN_ON(!sdev);
if (sdev && sdev->ops && sdev->ops->fault_cb) {
int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
(req->exe_req << 1) | (req->pm_req);
@ -905,7 +982,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
and these can be NULL. Do not use them below this point! */
sdev = NULL;
svm = NULL;
no_pasid:
no_pasid:
if (req->lpig || req->priv_data_present) {
/*
* Per VT-d spec. v3.0 ch7.7, system software must
@ -930,6 +1007,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
resp.qw3 = 0;
qi_submit_sync(iommu, &resp, 1, 0);
}
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@ -1000,3 +1078,102 @@ int intel_svm_get_pasid(struct iommu_sva *sva)
return pasid;
}
int intel_svm_page_response(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg)
{
struct iommu_fault_page_request *prm;
struct intel_svm_dev *sdev = NULL;
struct intel_svm *svm = NULL;
struct intel_iommu *iommu;
bool private_present;
bool pasid_present;
bool last_page;
u8 bus, devfn;
int ret = 0;
u16 sid;
if (!dev || !dev_is_pci(dev))
return -ENODEV;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
if (!msg || !evt)
return -EINVAL;
mutex_lock(&pasid_mutex);
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
if (!pasid_present) {
ret = -EINVAL;
goto out;
}
if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
ret = -EINVAL;
goto out;
}
ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
if (ret || !sdev) {
ret = -ENODEV;
goto out;
}
/*
* For responses from userspace, need to make sure that the
* pasid has been bound to its mm.
*/
if (svm->flags & SVM_FLAG_GUEST_MODE) {
struct mm_struct *mm;
mm = get_task_mm(current);
if (!mm) {
ret = -EINVAL;
goto out;
}
if (mm != svm->mm) {
ret = -ENODEV;
mmput(mm);
goto out;
}
mmput(mm);
}
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
* or last page in group (LPIG) bit is set. This is an
* additional VT-d requirement beyond PCI ATS spec.
*/
if (last_page || private_present) {
struct qi_desc desc;
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
QI_PGRP_PASID_P(pasid_present) |
QI_PGRP_PDP(private_present) |
QI_PGRP_RESP_CODE(msg->code) |
QI_PGRP_RESP_TYPE;
desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
desc.qw2 = 0;
desc.qw3 = 0;
if (private_present)
memcpy(&desc.qw2, prm->private_data,
sizeof(prm->private_data));
qi_submit_sync(iommu, &desc, 1, 0);
}
out:
mutex_unlock(&pasid_mutex);
return ret;
}

Просмотреть файл

@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
int lvl, arm_v7s_iopte *ptep)
int lvl, arm_v7s_iopte *ptep, gfp_t gfp)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_v7s_iopte pte, *cptep;
@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data);
if (!cptep)
return -ENOMEM;
@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void)
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
IOMMU_NOEXEC |
IOMMU_CACHE))
IOMMU_CACHE, GFP_KERNEL))
return __FAIL(ops);
/* Overlapping mappings */
if (!ops->map(ops, iova, iova + size, size,
IOMMU_READ | IOMMU_NOEXEC))
IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
/* Remap of partial unmap */
if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova_start + size + 42)
@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
/* Remap full block */
if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))

Просмотреть файл

@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
int lvl, arm_lpae_iopte *ptep)
int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
{
arm_lpae_iopte *cptep, pte;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
if (!cptep)
return -ENOMEM;
@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@ -438,9 +438,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
else if (prot & IOMMU_SYS_CACHE_ONLY)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
if (prot & IOMMU_CACHE)
@ -461,7 +458,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
}
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
@ -483,7 +480,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@ -1178,12 +1175,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
IOMMU_NOEXEC |
IOMMU_CACHE))
IOMMU_CACHE, GFP_KERNEL))
return __FAIL(ops, i);
/* Overlapping mappings */
if (!ops->map(ops, iova, iova + size, size,
IOMMU_READ | IOMMU_NOEXEC))
IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@ -1198,7 +1195,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
/* Remap of partial unmap */
if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
@ -1216,7 +1213,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
/* Remap full block */
if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))

Просмотреть файл

@ -383,8 +383,8 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
* Elements are sorted by start address and overlapping segments
* of the same type are merged.
*/
int iommu_insert_resv_region(struct iommu_resv_region *new,
struct list_head *regions)
static int iommu_insert_resv_region(struct iommu_resv_region *new,
struct list_head *regions)
{
struct iommu_resv_region *iter, *tmp, *nr, *top;
LIST_HEAD(stack);
@ -1185,11 +1185,12 @@ EXPORT_SYMBOL_GPL(iommu_report_device_fault);
int iommu_page_response(struct device *dev,
struct iommu_page_response *msg)
{
bool pasid_valid;
bool needs_pasid;
int ret = -EINVAL;
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
struct dev_iommu *param = dev->iommu;
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain || !domain->ops->page_response)
@ -1214,14 +1215,24 @@ int iommu_page_response(struct device *dev,
*/
list_for_each_entry(evt, &param->fault_param->faults, list) {
prm = &evt->fault.prm;
pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
if ((pasid_valid && prm->pasid != msg->pasid) ||
prm->grpid != msg->grpid)
if (prm->grpid != msg->grpid)
continue;
/* Sanitize the reply */
msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
/*
* If the PASID is required, the corresponding request is
* matched using the group ID, the PASID valid bit and the PASID
* value. Otherwise only the group ID matches request and
* response.
*/
needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
continue;
if (!needs_pasid && has_pasid) {
/* No big deal, just clear it. */
msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
msg->pasid = 0;
}
ret = domain->ops->page_response(dev, evt, msg);
list_del(&evt->list);
@ -2168,8 +2179,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@ -2319,9 +2330,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;

Просмотреть файл

@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
for (i = 0 ; i < mag->size; ++i) {
struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
BUG_ON(!iova);
if (WARN_ON(!iova))
continue;
private_free_iova(iovad, iova);
}

Просмотреть файл

@ -3,7 +3,7 @@
* IOMMU API for Renesas VMSA-compatible IPMMU
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2014-2020 Renesas Electronics Corporation
*/
#include <linux/bitmap.h>
@ -686,7 +686,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
return domain->iop->map(domain->iop, iova, paddr, size, prot);
return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
@ -739,7 +739,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a774a1", },
{ .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a774e1", },
{ .soc_id = "r8a7795", },
{ .soc_id = "r8a77961", },
{ .soc_id = "r8a7796", },
{ .soc_id = "r8a77965", },
{ .soc_id = "r8a77970", },
@ -751,7 +753,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
{ .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a774e1", },
{ .soc_id = "r8a7795", .revision = "ES3.*" },
{ .soc_id = "r8a77961", },
{ .soc_id = "r8a77965", },
{ .soc_id = "r8a77990", },
{ .soc_id = "r8a77995", },
@ -962,12 +966,18 @@ static const struct of_device_id ipmmu_of_ids[] = {
}, {
.compatible = "renesas,ipmmu-r8a774c0",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a774e1",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a7795",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a7796",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a77961",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a77965",
.data = &ipmmu_features_rcar_gen3,

Просмотреть файл

@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret;
spin_lock_irqsave(&priv->pgtlock, flags);
ret = priv->iop->map(priv->iop, iova, pa, len, prot);
ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret;
@ -593,14 +593,14 @@ static void insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
struct of_phandle_args *spec)
{
struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
int sid;
if (list_empty(&(*iommu)->ctx_list)) {
master = kzalloc(sizeof(*master), GFP_ATOMIC);
master->of_node = dev->of_node;
list_add(&master->list, &(*iommu)->ctx_list);
dev->archdata.iommu = master;
dev_iommu_priv_set(dev, master);
}
for (sid = 0; sid < master->num_mids; sid++)

Просмотреть файл

@ -37,12 +37,18 @@
#define REG_MMU_INVLD_START_A 0x024
#define REG_MMU_INVLD_END_A 0x028
#define REG_MMU_INV_SEL 0x038
#define REG_MMU_INV_SEL_GEN2 0x02c
#define REG_MMU_INV_SEL_GEN1 0x038
#define F_INVLD_EN0 BIT(0)
#define F_INVLD_EN1 BIT(1)
#define REG_MMU_STANDARD_AXI_MODE 0x048
#define REG_MMU_MISC_CTRL 0x048
#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
#define REG_MMU_DCM_DIS 0x050
#define REG_MMU_WR_LEN_CTRL 0x054
#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
#define REG_MMU_CTRL_REG 0x110
#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
@ -88,10 +94,12 @@
#define REG_MMU1_INVLD_PA 0x148
#define REG_MMU0_INT_ID 0x150
#define REG_MMU1_INT_ID 0x154
#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
#define MTK_PROTECT_PA_ALIGN 128
#define MTK_PROTECT_PA_ALIGN 256
/*
* Get the local arbiter ID and the portid within the larb arbiter
@ -100,6 +108,18 @@
#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
#define HAS_4GB_MODE BIT(0)
/* HW will use the EMI clock if there isn't the "bclk". */
#define HAS_BCLK BIT(1)
#define HAS_VLD_PA_RNG BIT(2)
#define RESET_AXI BIT(3)
#define OUT_ORDER_WR_EN BIT(4)
#define HAS_SUB_COMM BIT(5)
#define WR_THROT_EN BIT(6)
#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
((((pdata)->flags) & (_x)) == (_x))
struct mtk_iommu_domain {
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@ -165,7 +185,7 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
for_each_m4u(data) {
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
data->base + data->plat_data->inv_sel_reg);
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
}
@ -182,7 +202,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
for_each_m4u(data) {
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
data->base + data->plat_data->inv_sel_reg);
writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
writel_relaxed(iova + size - 1,
@ -226,7 +246,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
struct mtk_iommu_data *data = dev_id;
struct mtk_iommu_domain *dom = data->m4u_dom;
u32 int_state, regval, fault_iova, fault_pa;
unsigned int fault_larb, fault_port;
unsigned int fault_larb, fault_port, sub_comm = 0;
bool layer, write;
/* Read error info from registers */
@ -242,10 +262,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
}
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
fault_larb = F_MMU_INT_ID_LARB_ID(regval);
fault_port = F_MMU_INT_ID_PORT_ID(regval);
fault_larb = data->plat_data->larbid_remap[fault_larb];
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
fault_larb = F_MMU_INT_ID_COMM_ID(regval);
sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
} else {
fault_larb = F_MMU_INT_ID_LARB_ID(regval);
}
fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@ -397,7 +421,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
return dom->iop->map(dom->iop, iova, paddr, size, prot);
return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@ -532,11 +556,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return ret;
}
if (data->plat_data->m4u_plat == M4U_MT8173)
if (data->plat_data->m4u_plat == M4U_MT8173) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
else
regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
} else {
regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
}
writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
regval = F_L2_MULIT_HIT_EN |
@ -563,7 +589,8 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
upper_32_bits(data->protect_base);
writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
if (data->enable_4GB &&
MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
/*
* If 4GB mode is enabled, the validate PA range is from
* 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@ -572,9 +599,23 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
}
writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
/* write command throttling mode */
regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
regval &= ~F_MMU_WR_THROT_DIS_MASK;
writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
}
if (data->plat_data->reset_axi)
writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
/* The register is called STANDARD_AXI_MODE in this case */
regval = 0;
} else {
regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
}
writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
dev_name(data->dev), (void *)data)) {
@ -616,7 +657,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
/* Whether the current dram is over 4GB */
data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
if (!data->plat_data->has_4gb_mode)
if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
data->enable_4GB = false;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -629,7 +670,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (data->irq < 0)
return data->irq;
if (data->plat_data->has_bclk) {
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
data->bclk = devm_clk_get(dev, "bclk");
if (IS_ERR(data->bclk))
return PTR_ERR(data->bclk);
@ -718,8 +759,8 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
struct mtk_iommu_suspend_reg *reg = &data->reg;
void __iomem *base = data->base;
reg->standard_axi_mode = readl_relaxed(base +
REG_MMU_STANDARD_AXI_MODE);
reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
@ -743,8 +784,8 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
return ret;
}
writel_relaxed(reg->standard_axi_mode,
base + REG_MMU_STANDARD_AXI_MODE);
writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
@ -763,28 +804,35 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
static const struct mtk_iommu_plat_data mt2712_data = {
.m4u_plat = M4U_MT2712,
.has_4gb_mode = true,
.has_bclk = true,
.has_vld_pa_rng = true,
.larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
.flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
};
static const struct mtk_iommu_plat_data mt6779_data = {
.m4u_plat = M4U_MT6779,
.flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
.has_4gb_mode = true,
.has_bclk = true,
.reset_axi = true,
.larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
};
static const struct mtk_iommu_plat_data mt8183_data = {
.m4u_plat = M4U_MT8183,
.reset_axi = true,
.larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
.flags = RESET_AXI,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
};
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{}

Просмотреть файл

@ -15,34 +15,39 @@
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/mediatek/smi.h>
#define MTK_LARB_COM_MAX 8
#define MTK_LARB_SUBCOM_MAX 4
struct mtk_iommu_suspend_reg {
u32 standard_axi_mode;
union {
u32 standard_axi_mode;/* v1 */
u32 misc_ctrl;/* v2 */
};
u32 dcm_dis;
u32 ctrl_reg;
u32 int_control0;
u32 int_main_control;
u32 ivrp_paddr;
u32 vld_pa_rng;
u32 wr_len_ctrl;
};
enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
M4U_MT6779,
M4U_MT8173,
M4U_MT8183,
};
struct mtk_iommu_plat_data {
enum mtk_iommu_plat m4u_plat;
bool has_4gb_mode;
/* HW will use the EMI clock if there isn't the "bclk". */
bool has_bclk;
bool has_vld_pa_rng;
bool reset_axi;
unsigned char larbid_remap[MTK_LARB_NR_MAX];
u32 flags;
u32 inv_sel_reg;
unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
};
struct mtk_iommu_domain;
@ -62,6 +67,8 @@ struct mtk_iommu_data {
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
struct list_head list;
struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};

Просмотреть файл

@ -269,7 +269,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
int ret;
/* Only allow the domain created internally. */
mtk_mapping = data->dev->archdata.iommu;
mtk_mapping = data->mapping;
if (mtk_mapping->domain != domain)
return 0;
@ -369,7 +369,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
struct mtk_iommu_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
struct device *m4udev;
int ret;
if (args->args_count != 1) {
@ -401,8 +400,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
return ret;
data = dev_iommu_priv_get(dev);
m4udev = data->dev;
mtk_mapping = m4udev->archdata.iommu;
mtk_mapping = data->mapping;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
@ -410,7 +408,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
if (IS_ERR(mtk_mapping))
return PTR_ERR(mtk_mapping);
m4udev->archdata.iommu = mtk_mapping;
data->mapping = mtk_mapping;
}
return 0;
@ -459,7 +457,7 @@ static void mtk_iommu_probe_finalize(struct device *dev)
int err;
data = dev_iommu_priv_get(dev);
mtk_mapping = data->dev->archdata.iommu;
mtk_mapping = data->mapping;
err = arm_iommu_attach_device(dev, mtk_mapping);
if (err)

Просмотреть файл

@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
mutex_lock(&iommu_debug_lock);
bytes = omap_iommu_dump_ctx(obj, p, count);
if (bytes < 0)
goto err;
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
err:
mutex_unlock(&iommu_debug_lock);
kfree(buf);

Просмотреть файл

@ -3,7 +3,7 @@
* omap iommu: tlb and pagetable primitives
*
* Copyright (C) 2008-2010 Nokia Corporation
* Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
@ -71,7 +71,7 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
**/
void omap_iommu_save_ctx(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu *obj;
u32 *p;
int i;
@ -101,7 +101,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
**/
void omap_iommu_restore_ctx(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu *obj;
u32 *p;
int i;
@ -1398,7 +1398,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
static int omap_iommu_count(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
int count = 0;
while (arch_data->iommu_dev) {
@ -1459,8 +1459,8 @@ static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
int ret = 0;
@ -1524,7 +1524,7 @@ out:
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu_device *iommu = omap_domain->iommus;
struct omap_iommu *oiommu;
int i;
@ -1650,7 +1650,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
int num_iommus, i;
/*
* Allocate the archdata iommu structure for DT-based devices.
* Allocate the per-device iommu structure for DT-based devices.
*
* TODO: Simplify this when removing non-DT support completely from the
* IOMMU users.
@ -1698,7 +1698,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
of_node_put(np);
}
dev->archdata.iommu = arch_data;
dev_iommu_priv_set(dev, arch_data);
/*
* use the first IOMMU alone for the sysfs device linking.
@ -1712,19 +1712,19 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
static void omap_iommu_release_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
if (!dev->of_node || !arch_data)
return;
dev->archdata.iommu = NULL;
dev_iommu_priv_set(dev, NULL);
kfree(arch_data);
}
static struct iommu_group *omap_iommu_device_group(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct iommu_group *group = ERR_PTR(-EINVAL);
if (!arch_data)

Просмотреть файл

@ -836,7 +836,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
struct rk_iommudata *data = dev->archdata.iommu;
struct rk_iommudata *data = dev_iommu_priv_get(dev);
return data ? data->iommu : NULL;
}
@ -1059,7 +1059,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
struct rk_iommudata *data;
struct rk_iommu *iommu;
data = dev->archdata.iommu;
data = dev_iommu_priv_get(dev);
if (!data)
return ERR_PTR(-ENODEV);
@ -1073,7 +1073,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
static void rk_iommu_release_device(struct device *dev)
{
struct rk_iommudata *data = dev->archdata.iommu;
struct rk_iommudata *data = dev_iommu_priv_get(dev);
device_link_del(data->link);
}
@ -1100,7 +1100,7 @@ static int rk_iommu_of_xlate(struct device *dev,
iommu_dev = of_find_device_by_node(args->np);
data->iommu = platform_get_drvdata(iommu_dev);
dev->archdata.iommu = data;
dev_iommu_priv_set(dev, data);
platform_device_put(iommu_dev);

Просмотреть файл

@ -113,8 +113,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
if (gart->active_domain && gart->active_domain != domain) {
ret = -EBUSY;
} else if (dev->archdata.iommu != domain) {
dev->archdata.iommu = domain;
} else if (dev_iommu_priv_get(dev) != domain) {
dev_iommu_priv_set(dev, domain);
gart->active_domain = domain;
gart->active_devices++;
}
@ -131,8 +131,8 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain,
spin_lock(&gart->dom_lock);
if (dev->archdata.iommu == domain) {
dev->archdata.iommu = NULL;
if (dev_iommu_priv_get(dev) == domain) {
dev_iommu_priv_set(dev, NULL);
if (--gart->active_devices == 0)
gart->active_domain = NULL;

Просмотреть файл

@ -465,7 +465,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct tegra_smmu *smmu = dev->archdata.iommu;
struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
@ -780,7 +780,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
* supported by the Linux kernel, so abort after the
* first match.
*/
dev->archdata.iommu = smmu;
dev_iommu_priv_set(dev, smmu);
break;
}
@ -797,7 +797,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
static void tegra_smmu_release_device(struct device *dev)
{
dev->archdata.iommu = NULL;
dev_iommu_priv_set(dev, NULL);
}
static const struct tegra_smmu_group_soc *
@ -856,7 +856,7 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
static struct iommu_group *tegra_smmu_device_group(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct tegra_smmu *smmu = dev->archdata.iommu;
struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct iommu_group *group;
group = tegra_smmu_group_get(smmu, fwspec->ids[0]);

Просмотреть файл

@ -9,9 +9,11 @@
#if defined(CONFIG_EXYNOS_IOMMU)
#include <linux/iommu.h>
static inline bool exynos_is_iommu_available(struct device *dev)
{
return dev->archdata.iommu != NULL;
return dev_iommu_priv_get(dev) != NULL;
}
#else

Просмотреть файл

@ -239,6 +239,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
.larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
.larb_direct_to_common_mask =
BIT(4) | BIT(6) | BIT(11) | BIT(12) | BIT(13),
/* DUMMY | IPU0 | IPU1 | CCU | MDLA */
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
.has_gals = true,
.config_port = mtk_smi_larb_config_port_gen2_general,
@ -259,6 +266,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.compatible = "mediatek,mt2712-smi-larb",
.data = &mtk_smi_larb_mt2712
},
{
.compatible = "mediatek,mt6779-smi-larb",
.data = &mtk_smi_larb_mt6779
},
{
.compatible = "mediatek,mt8183-smi-larb",
.data = &mtk_smi_larb_mt8183
@ -388,6 +399,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
.gen = MTK_SMI_GEN2,
};
static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
.gen = MTK_SMI_GEN2,
.has_gals = true,
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
};
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
.gen = MTK_SMI_GEN2,
.has_gals = true,
@ -408,6 +426,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.compatible = "mediatek,mt2712-smi-common",
.data = &mtk_smi_common_gen2,
},
{
.compatible = "mediatek,mt6779-smi-common",
.data = &mtk_smi_common_mt6779,
},
{
.compatible = "mediatek,mt8183-smi-common",
.data = &mtk_smi_common_mt8183,

Просмотреть файл

@ -0,0 +1,206 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019 MediaTek Inc.
* Author: Chao Hao <chao.hao@mediatek.com>
*/
#ifndef _DTS_IOMMU_PORT_MT6779_H_
#define _DTS_IOMMU_PORT_MT6779_H_
#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
#define M4U_LARB2_ID 2
#define M4U_LARB3_ID 3
#define M4U_LARB4_ID 4
#define M4U_LARB5_ID 5
#define M4U_LARB6_ID 6
#define M4U_LARB7_ID 7
#define M4U_LARB8_ID 8
#define M4U_LARB9_ID 9
#define M4U_LARB10_ID 10
#define M4U_LARB11_ID 11
/* larb0 */
#define M4U_PORT_DISP_POSTMASK0 MTK_M4U_ID(M4U_LARB0_ID, 0)
#define M4U_PORT_DISP_OVL0_HDR MTK_M4U_ID(M4U_LARB0_ID, 1)
#define M4U_PORT_DISP_OVL1_HDR MTK_M4U_ID(M4U_LARB0_ID, 2)
#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 3)
#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
#define M4U_PORT_DISP_PVRIC0 MTK_M4U_ID(M4U_LARB0_ID, 5)
#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6)
#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 7)
#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 8)
/* larb1 */
#define M4U_PORT_DISP_OVL0_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 0)
#define M4U_PORT_DISP_OVL1_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 1)
#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB1_ID, 2)
#define M4U_PORT_DISP_OVL1_2L MTK_M4U_ID(M4U_LARB1_ID, 3)
#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 4)
#define M4U_PORT_MDP_PVRIC0 MTK_M4U_ID(M4U_LARB1_ID, 5)
#define M4U_PORT_MDP_PVRIC1 MTK_M4U_ID(M4U_LARB1_ID, 6)
#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB1_ID, 7)
#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 8)
#define M4U_PORT_MDP_WROT0_R MTK_M4U_ID(M4U_LARB1_ID, 9)
#define M4U_PORT_MDP_WROT0_W MTK_M4U_ID(M4U_LARB1_ID, 10)
#define M4U_PORT_MDP_WROT1_R MTK_M4U_ID(M4U_LARB1_ID, 11)
#define M4U_PORT_MDP_WROT1_W MTK_M4U_ID(M4U_LARB1_ID, 12)
#define M4U_PORT_DISP_FAKE1 MTK_M4U_ID(M4U_LARB1_ID, 13)
/* larb2-VDEC */
#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0)
#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB2_ID, 1)
#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 2)
#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 3)
#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 4)
#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 5)
#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB2_ID, 6)
#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 7)
#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB2_ID, 8)
#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 9)
#define M4U_PORT_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(M4U_LARB2_ID, 10)
#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB2_ID, 11)
/* larb3-VENC */
#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 6)
#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB3_ID, 9)
#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 12)
#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 13)
#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 14)
#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 15)
#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 16)
#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 17)
#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 18)
/* larb4-dummy */
/* larb5-IMG */
#define M4U_PORT_IMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 0)
#define M4U_PORT_IMGBI_D1 MTK_M4U_ID(M4U_LARB5_ID, 1)
#define M4U_PORT_DMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 2)
#define M4U_PORT_DEPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 3)
#define M4U_PORT_LCEI_D1 MTK_M4U_ID(M4U_LARB5_ID, 4)
#define M4U_PORT_SMTI_D1 MTK_M4U_ID(M4U_LARB5_ID, 5)
#define M4U_PORT_SMTO_D2 MTK_M4U_ID(M4U_LARB5_ID, 6)
#define M4U_PORT_SMTO_D1 MTK_M4U_ID(M4U_LARB5_ID, 7)
#define M4U_PORT_CRZO_D1 MTK_M4U_ID(M4U_LARB5_ID, 8)
#define M4U_PORT_IMG3O_D1 MTK_M4U_ID(M4U_LARB5_ID, 9)
#define M4U_PORT_VIPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 10)
#define M4U_PORT_WPE_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 11)
#define M4U_PORT_WPE_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 12)
#define M4U_PORT_WPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 13)
#define M4U_PORT_TIMGO_D1 MTK_M4U_ID(M4U_LARB5_ID, 14)
#define M4U_PORT_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 15)
#define M4U_PORT_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 16)
#define M4U_PORT_MFB_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 17)
#define M4U_PORT_MFB_RDMA3 MTK_M4U_ID(M4U_LARB5_ID, 18)
#define M4U_PORT_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 19)
#define M4U_PORT_RESERVE1 MTK_M4U_ID(M4U_LARB5_ID, 20)
#define M4U_PORT_RESERVE2 MTK_M4U_ID(M4U_LARB5_ID, 21)
#define M4U_PORT_RESERVE3 MTK_M4U_ID(M4U_LARB5_ID, 22)
#define M4U_PORT_RESERVE4 MTK_M4U_ID(M4U_LARB5_ID, 23)
#define M4U_PORT_RESERVE5 MTK_M4U_ID(M4U_LARB5_ID, 24)
#define M4U_PORT_RESERVE6 MTK_M4U_ID(M4U_LARB5_ID, 25)
/* larb6-IMG-VPU */
#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB6_ID, 0)
#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB6_ID, 1)
#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB6_ID, 2)
/* larb7-DVS */
#define M4U_PORT_DVS_RDMA MTK_M4U_ID(M4U_LARB7_ID, 0)
#define M4U_PORT_DVS_WDMA MTK_M4U_ID(M4U_LARB7_ID, 1)
#define M4U_PORT_DVP_RDMA MTK_M4U_ID(M4U_LARB7_ID, 2)
#define M4U_PORT_DVP_WDMA MTK_M4U_ID(M4U_LARB7_ID, 3)
/* larb8-IPESYS */
#define M4U_PORT_FDVT_RDA MTK_M4U_ID(M4U_LARB8_ID, 0)
#define M4U_PORT_FDVT_RDB MTK_M4U_ID(M4U_LARB8_ID, 1)
#define M4U_PORT_FDVT_WRA MTK_M4U_ID(M4U_LARB8_ID, 2)
#define M4U_PORT_FDVT_WRB MTK_M4U_ID(M4U_LARB8_ID, 3)
#define M4U_PORT_FE_RD0 MTK_M4U_ID(M4U_LARB8_ID, 4)
#define M4U_PORT_FE_RD1 MTK_M4U_ID(M4U_LARB8_ID, 5)
#define M4U_PORT_FE_WR0 MTK_M4U_ID(M4U_LARB8_ID, 6)
#define M4U_PORT_FE_WR1 MTK_M4U_ID(M4U_LARB8_ID, 7)
#define M4U_PORT_RSC_RDMA0 MTK_M4U_ID(M4U_LARB8_ID, 8)
#define M4U_PORT_RSC_WDMA MTK_M4U_ID(M4U_LARB8_ID, 9)
/* larb9-CAM */
#define M4U_PORT_CAM_IMGO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 0)
#define M4U_PORT_CAM_RRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 1)
#define M4U_PORT_CAM_LSCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 2)
#define M4U_PORT_CAM_BPCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 3)
#define M4U_PORT_CAM_YUVO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 4)
#define M4U_PORT_CAM_UFDI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 5)
#define M4U_PORT_CAM_RAWI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 6)
#define M4U_PORT_CAM_RAWI_R5_C MTK_M4U_ID(M4U_LARB9_ID, 7)
#define M4U_PORT_CAM_CAMSV_1 MTK_M4U_ID(M4U_LARB9_ID, 8)
#define M4U_PORT_CAM_CAMSV_2 MTK_M4U_ID(M4U_LARB9_ID, 9)
#define M4U_PORT_CAM_CAMSV_3 MTK_M4U_ID(M4U_LARB9_ID, 10)
#define M4U_PORT_CAM_CAMSV_4 MTK_M4U_ID(M4U_LARB9_ID, 11)
#define M4U_PORT_CAM_CAMSV_5 MTK_M4U_ID(M4U_LARB9_ID, 12)
#define M4U_PORT_CAM_CAMSV_6 MTK_M4U_ID(M4U_LARB9_ID, 13)
#define M4U_PORT_CAM_AAO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 14)
#define M4U_PORT_CAM_AFO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 15)
#define M4U_PORT_CAM_FLKO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 16)
#define M4U_PORT_CAM_LCESO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 17)
#define M4U_PORT_CAM_CRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 18)
#define M4U_PORT_CAM_LTMSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 19)
#define M4U_PORT_CAM_RSSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 20)
#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB9_ID, 21)
#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB9_ID, 22)
#define M4U_PORT_CAM_FAKE MTK_M4U_ID(M4U_LARB9_ID, 23)
/* larb10-CAM_A */
#define M4U_PORT_CAM_IMGO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 0)
#define M4U_PORT_CAM_RRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 1)
#define M4U_PORT_CAM_LSCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 2)
#define M4U_PORT_CAM_BPCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 3)
#define M4U_PORT_CAM_YUVO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 4)
#define M4U_PORT_CAM_UFDI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 5)
#define M4U_PORT_CAM_RAWI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 6)
#define M4U_PORT_CAM_RAWI_R5_A MTK_M4U_ID(M4U_LARB10_ID, 7)
#define M4U_PORT_CAM_IMGO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 8)
#define M4U_PORT_CAM_RRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 9)
#define M4U_PORT_CAM_LSCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 10)
#define M4U_PORT_CAM_BPCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 11)
#define M4U_PORT_CAM_YUVO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 12)
#define M4U_PORT_CAM_UFDI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 13)
#define M4U_PORT_CAM_RAWI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 14)
#define M4U_PORT_CAM_RAWI_R5_B MTK_M4U_ID(M4U_LARB10_ID, 15)
#define M4U_PORT_CAM_CAMSV_0 MTK_M4U_ID(M4U_LARB10_ID, 16)
#define M4U_PORT_CAM_AAO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 17)
#define M4U_PORT_CAM_AFO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 18)
#define M4U_PORT_CAM_FLKO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 19)
#define M4U_PORT_CAM_LCESO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 20)
#define M4U_PORT_CAM_CRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 21)
#define M4U_PORT_CAM_AAO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 22)
#define M4U_PORT_CAM_AFO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 23)
#define M4U_PORT_CAM_FLKO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 24)
#define M4U_PORT_CAM_LCESO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 25)
#define M4U_PORT_CAM_CRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 26)
#define M4U_PORT_CAM_LTMSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 27)
#define M4U_PORT_CAM_RSSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 28)
#define M4U_PORT_CAM_LTMSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 29)
#define M4U_PORT_CAM_RSSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 30)
/* larb11-CAM-VPU */
#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB11_ID, 0)
#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB11_ID, 1)
#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB11_ID, 2)
#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB11_ID, 3)
#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB11_ID, 4)
#endif

Просмотреть файл

@ -48,6 +48,7 @@ struct dmar_drhd_unit {
u16 segment; /* PCI domain */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
u8 gfx_dedicated:1; /* graphic dedicated */
struct intel_iommu *iommu;
};

Просмотреть файл

@ -381,8 +381,7 @@ enum {
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
@ -600,6 +599,8 @@ struct intel_iommu {
struct iommu_device iommu; /* IOMMU core code handle */
int node;
u32 flags; /* Software defined flags */
struct dmar_drhd_unit *drhd;
};
/* PCI domain-device relationship */
@ -705,7 +706,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr,
unsigned int size_order, u64 granu);
unsigned int size_order);
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
int pasid);
@ -728,6 +729,7 @@ void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct dmar_domain *find_domain(struct device *dev);
struct device_domain_info *get_domain_info(struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
extern void intel_svm_check(struct intel_iommu *iommu);
@ -740,6 +742,9 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void intel_svm_unbind(struct iommu_sva *handle);
int intel_svm_get_pasid(struct iommu_sva *handle);
int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
struct svm_dev_ops;
struct intel_svm_dev {
@ -766,8 +771,6 @@ struct intel_svm {
struct list_head devs;
struct list_head list;
};
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
#endif

Просмотреть файл

@ -155,7 +155,7 @@ struct io_pgtable_cfg {
*/
struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,

Просмотреть файл

@ -31,12 +31,6 @@
* if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
/*
* Non-coherent masters can use this page protection flag to set cacheable
* memory attributes for only a transparent outer level of cache, also known as
* the last-level or system cache.
*/
#define IOMMU_SYS_CACHE_ONLY (1 << 6)
struct iommu_ops;
struct iommu_group;
@ -457,22 +451,6 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
/**
* iommu_map_sgtable - Map the given buffer to the IOMMU domain
* @domain: The IOMMU domain to perform the mapping
* @iova: The start address to map the buffer
* @sgt: The sg_table object describing the buffer
* @prot: IOMMU protection bits
*
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
}
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern void generic_iommu_put_resv_regions(struct device *dev,
@ -1079,6 +1057,22 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
}
#endif /* CONFIG_IOMMU_API */
/**
* iommu_map_sgtable - Map the given buffer to the IOMMU domain
* @domain: The IOMMU domain to perform the mapping
* @iova: The start address to map the buffer
* @sgt: The sg_table object describing the buffer
* @prot: IOMMU protection bits
*
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
}
#ifdef CONFIG_IOMMU_DEBUGFS
extern struct dentry *iommu_debugfs_dir;
void iommu_debugfs_setup(void);

Просмотреть файл

@ -81,7 +81,10 @@ struct iommu_fault_unrecoverable {
/**
* struct iommu_fault_page_request - Page Request data
* @flags: encodes whether the corresponding fields are valid and whether this
* is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values)
* is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
* When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
* must have the same PASID value as the page request. When it is clear,
* the page response should not have a PASID.
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
@ -92,6 +95,7 @@ struct iommu_fault_page_request {
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
__u32 flags;
__u32 pasid;
__u32 grpid;