IOMMU Updates for Linux v6.6
Including: - Core changes: - Consolidate probe_device path - Make the PCI-SAC IOVA allocation trick PCI-only - AMD IOMMU: - Consolidate PPR log handling - Interrupt handling improvements - Refcount fixes for amd_iommu_v2 driver - Intel VT-d driver: - Enable idxd device DMA with pasid through iommu dma ops. - Lift RESV_DIRECT check from VT-d driver to core. - Miscellaneous cleanups and fixes. - ARM-SMMU drivers: - Device-tree binding updates: - Add additional compatible strings for Qualcomm SoCs - Allow ASIDs to be configured in the DT to work around Qualcomm's broken hypervisor - Fix clocks for Qualcomm's MSM8998 SoC - SMMUv2: - Support for Qualcomm's legacy firmware implementation featured on at least MSM8956 and MSM8976. - Match compatible strings for Qualcomm SM6350 and SM6375 SoC variants - SMMUv3: - Use 'ida' instead of a bitmap for VMID allocation - Rockchip IOMMU: - Lift page-table allocation restrictions on newer hardware - Mediatek IOMMU: - Add MT8188 IOMMU Support - Renesas IOMMU: - Allow PCIe devices - Usual set of cleanups an smaller fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmTx7IMACgkQK/BELZcB GuMxUA/+P/wYvAKCbDpXyszIpyCTx37BkeRTBaVqG0vEKLG6439i+PIm3oudQK+6 0y+1clJi0Ddu0uv1ck90cIEP1YDuKaKdrOVeE7TtlK+6LKYxTyeN+mz4csMIbahI 6JMrWzrIEPIyMBHzAepQiGDCsmDkrCngPj0WmA7+EQZSSHVYp+TLe6OLzNs74vDF zCITkYNq6aKyg/dNJpMRy6VOHvw9PUiwRvm7ko7WONP4VCtpW4g3Jpkerf19zoV2 s0nwZuGn3o7F0aFOpRJPPKQNfQnNjOjHdxjcsGBafD9qqAk4TLvnZH24njKtPidJ P8CiAu//HxhDyUPTgTIrDroVOGVG7s85XO+WesjPkEI3vnNjXy+qEIinQBJ3oIaI ppDLSnArEhfSRgt6dXvPCJ/g4+WGS9jNV85GCa7XBtal2Msu8G89NKC97mpmjCkb lnGmCF9t7Tkt/fLWxw4GADBN3m2tOib1GQMvPYAF2WM3jH5aRq2UliIRuCHZkzwv EF3SiFQQqab6oogU9tF/A1QLUKQ8QfYOdabqL9z2COgF5tS00VC6b/6VTNkKeBHe qIiOpI7IWo76tFJule5gRaUth9nVkjpEo6kL9I6rEldOlFJrX6uaHTta6/isY3gx vkN98V/OThRUbDwMD122YVKNNjZE2MNsTeptXqB3jHvl3UWiLsQ= =RV+G -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull iommu updates from Joerg Roedel: "Core changes: - Consolidate probe_device path - Make the PCI-SAC IOVA allocation trick PCI-only AMD IOMMU: - Consolidate PPR log handling - Interrupt handling improvements - Refcount fixes for amd_iommu_v2 driver Intel VT-d driver: - Enable idxd device DMA with pasid through iommu dma ops - Lift RESV_DIRECT check from VT-d driver to core - Miscellaneous cleanups and fixes ARM-SMMU drivers: - Device-tree binding updates: - Add additional compatible strings for Qualcomm SoCs - Allow ASIDs to be configured in the DT to work around Qualcomm's broken hypervisor - Fix clocks for Qualcomm's MSM8998 SoC - SMMUv2: - Support for Qualcomm's legacy firmware implementation featured on at least MSM8956 and MSM8976 - Match compatible strings for Qualcomm SM6350 and SM6375 SoC variants - SMMUv3: - Use 'ida' instead of a bitmap for VMID allocation - Rockchip IOMMU: - Lift page-table allocation restrictions on newer hardware - Mediatek IOMMU: - Add MT8188 IOMMU Support - Renesas IOMMU: - Allow PCIe devices .. and the usual set of cleanups an smaller fixes" * tag 'iommu-updates-v6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (64 commits) iommu: Explicitly include correct DT includes iommu/amd: Remove unused declarations iommu/arm-smmu-qcom: Add SM6375 SMMUv2 iommu/arm-smmu-qcom: Add SM6350 DPU compatible iommu/arm-smmu-qcom: Add SM6375 DPU compatible iommu/arm-smmu-qcom: Sort the compatible list alphabetically dt-bindings: arm-smmu: Fix MSM8998 clocks description iommu/vt-d: Remove unused extern declaration dmar_parse_dev_scope() iommu/vt-d: Fix to convert mm pfn to dma pfn iommu/vt-d: Fix to flush cache of PASID directory table iommu/vt-d: Remove rmrr check in domain attaching device path iommu: Prevent RESV_DIRECT devices from blocking domains dmaengine/idxd: Re-enable kernel workqueue under DMA API iommu/vt-d: Add set_dev_pasid callback for dma domain iommu/vt-d: Prepare for set_dev_pasid callback iommu/vt-d: Make prq draining code generic iommu/vt-d: Remove pasid_mutex iommu/vt-d: Add domain_flush_pasid_iotlb() iommu: Move global PASID allocation from SVA to core iommu: Generalize PASID 0 for normal DMA w/o PASID ...
This commit is contained in:
Коммит
0468be89b3
|
@ -270,6 +270,47 @@ allOf:
|
|||
contains:
|
||||
enum:
|
||||
- qcom,msm8998-smmu-v2
|
||||
then:
|
||||
anyOf:
|
||||
- properties:
|
||||
clock-names:
|
||||
items:
|
||||
- const: bus
|
||||
clocks:
|
||||
items:
|
||||
- description: bus clock required for downstream bus access and for
|
||||
the smmu ptw
|
||||
- properties:
|
||||
clock-names:
|
||||
items:
|
||||
- const: iface
|
||||
- const: mem
|
||||
- const: mem_iface
|
||||
clocks:
|
||||
items:
|
||||
- description: interface clock required to access smmu's registers
|
||||
through the TCU's programming interface.
|
||||
- description: bus clock required for memory access
|
||||
- description: bus clock required for GPU memory access
|
||||
- properties:
|
||||
clock-names:
|
||||
items:
|
||||
- const: iface-mm
|
||||
- const: iface-smmu
|
||||
- const: bus-smmu
|
||||
clocks:
|
||||
items:
|
||||
- description: interface clock required to access mnoc's registers
|
||||
through the TCU's programming interface.
|
||||
- description: interface clock required to access smmu's registers
|
||||
through the TCU's programming interface.
|
||||
- description: bus clock required for the smmu ptw
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,sdm630-smmu-v2
|
||||
- qcom,sm6375-smmu-v2
|
||||
then:
|
||||
|
|
|
@ -78,6 +78,9 @@ properties:
|
|||
- mediatek,mt8173-m4u # generation two
|
||||
- mediatek,mt8183-m4u # generation two
|
||||
- mediatek,mt8186-iommu-mm # generation two
|
||||
- mediatek,mt8188-iommu-vdo # generation two
|
||||
- mediatek,mt8188-iommu-vpp # generation two
|
||||
- mediatek,mt8188-iommu-infra # generation two
|
||||
- mediatek,mt8192-m4u # generation two
|
||||
- mediatek,mt8195-iommu-vdo # generation two
|
||||
- mediatek,mt8195-iommu-vpp # generation two
|
||||
|
@ -123,6 +126,7 @@ properties:
|
|||
description: |
|
||||
This is the mtk_m4u_id according to the HW. Specifies the mtk_m4u_id as
|
||||
defined in
|
||||
dt-binding/memory/mediatek,mt8188-memory-port.h for mt8188,
|
||||
dt-binding/memory/mt2701-larb-port.h for mt2701 and mt7623,
|
||||
dt-binding/memory/mt2712-larb-port.h for mt2712,
|
||||
dt-binding/memory/mt6779-larb-port.h for mt6779,
|
||||
|
@ -155,6 +159,8 @@ allOf:
|
|||
- mediatek,mt6795-m4u
|
||||
- mediatek,mt8173-m4u
|
||||
- mediatek,mt8186-iommu-mm
|
||||
- mediatek,mt8188-iommu-vdo
|
||||
- mediatek,mt8188-iommu-vpp
|
||||
- mediatek,mt8192-m4u
|
||||
- mediatek,mt8195-iommu-vdo
|
||||
- mediatek,mt8195-iommu-vpp
|
||||
|
@ -168,6 +174,8 @@ allOf:
|
|||
compatible:
|
||||
enum:
|
||||
- mediatek,mt8186-iommu-mm
|
||||
- mediatek,mt8188-iommu-vdo
|
||||
- mediatek,mt8188-iommu-vpp
|
||||
- mediatek,mt8192-m4u
|
||||
- mediatek,mt8195-iommu-vdo
|
||||
- mediatek,mt8195-iommu-vpp
|
||||
|
@ -194,7 +202,9 @@ allOf:
|
|||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: mediatek,mt8195-iommu-infra
|
||||
enum:
|
||||
- mediatek,mt8188-iommu-infra
|
||||
- mediatek,mt8195-iommu-infra
|
||||
|
||||
then:
|
||||
required:
|
||||
|
|
|
@ -17,11 +17,16 @@ description: |
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- qcom,msm8916-iommu
|
||||
- qcom,msm8953-iommu
|
||||
- const: qcom,msm-iommu-v1
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,msm8916-iommu
|
||||
- qcom,msm8953-iommu
|
||||
- const: qcom,msm-iommu-v1
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,msm8976-iommu
|
||||
- const: qcom,msm-iommu-v2
|
||||
|
||||
clocks:
|
||||
items:
|
||||
|
@ -64,6 +69,8 @@ patternProperties:
|
|||
enum:
|
||||
- qcom,msm-iommu-v1-ns
|
||||
- qcom,msm-iommu-v1-sec
|
||||
- qcom,msm-iommu-v2-ns
|
||||
- qcom,msm-iommu-v2-sec
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
@ -71,6 +78,11 @@ patternProperties:
|
|||
reg:
|
||||
maxItems: 1
|
||||
|
||||
qcom,ctx-asid:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description:
|
||||
The ASID number associated to the context bank.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- interrupts
|
||||
|
|
|
@ -13356,6 +13356,7 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
|
|||
S: Supported
|
||||
F: Documentation/devicetree/bindings/iommu/mediatek*
|
||||
F: drivers/iommu/mtk_iommu*
|
||||
F: include/dt-bindings/memory/mediatek,mt*-port.h
|
||||
F: include/dt-bindings/memory/mt*-port.h
|
||||
|
||||
MEDIATEK JPEG DRIVER
|
||||
|
|
|
@ -1584,7 +1584,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
|
|||
* If we have reason to believe the IOMMU driver missed the initial
|
||||
* iommu_probe_device() call for dev, replay it to get things in order.
|
||||
*/
|
||||
if (!err && dev->bus && !device_iommu_mapped(dev))
|
||||
if (!err && dev->bus)
|
||||
err = iommu_probe_device(dev);
|
||||
|
||||
/* Ignore all other errors apart from EPROBE_DEFER */
|
||||
|
|
|
@ -299,21 +299,6 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
|
|||
}
|
||||
}
|
||||
|
||||
static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
union wqcfg wqcfg;
|
||||
unsigned int offset;
|
||||
|
||||
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
|
||||
wqcfg.priv = priv;
|
||||
wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
|
||||
iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
|
||||
static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
|
@ -1421,15 +1406,14 @@ int drv_enable_wq(struct idxd_wq *wq)
|
|||
}
|
||||
|
||||
/*
|
||||
* In the event that the WQ is configurable for pasid and priv bits.
|
||||
* For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
|
||||
* However, for non-kernel wq, the driver should only set the pasid_en bit for
|
||||
* shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
|
||||
* In the event that the WQ is configurable for pasid, the driver
|
||||
* should setup the pasid, pasid_en bit. This is true for both kernel
|
||||
* and user shared workqueues. There is no need to setup priv bit in
|
||||
* that in-kernel DMA will also do user privileged requests.
|
||||
* A dedicated wq that is not 'kernel' type will configure pasid and
|
||||
* pasid_en later on so there is no need to setup.
|
||||
*/
|
||||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
|
||||
int priv = 0;
|
||||
|
||||
if (wq_pasid_enabled(wq)) {
|
||||
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
|
||||
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
|
||||
|
@ -1437,10 +1421,6 @@ int drv_enable_wq(struct idxd_wq *wq)
|
|||
__idxd_wq_set_pasid_locked(wq, pasid);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_idxd_wq_kernel(wq))
|
||||
priv = 1;
|
||||
__idxd_wq_set_priv_locked(wq, priv);
|
||||
}
|
||||
|
||||
rc = 0;
|
||||
|
@ -1548,6 +1528,15 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
|
|||
if (rc < 0)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* System PASID is preserved across device disable/enable cycle, but
|
||||
* genconfig register content gets cleared during device reset. We
|
||||
* need to re-enable user interrupts for kernel work queue completion
|
||||
* IRQ to function.
|
||||
*/
|
||||
if (idxd->pasid != IOMMU_PASID_INVALID)
|
||||
idxd_set_user_intr(idxd, 1);
|
||||
|
||||
rc = idxd_device_evl_setup(idxd);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
|
||||
|
|
|
@ -75,9 +75,10 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
|
|||
hw->xfer_size = len;
|
||||
/*
|
||||
* For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
|
||||
* field instead. This field should be set to 1 for kernel descriptors.
|
||||
* field instead. This field should be set to 0 for kernel descriptors
|
||||
* since kernel DMA on VT-d supports "user" privilege only.
|
||||
*/
|
||||
hw->priv = 1;
|
||||
hw->priv = 0;
|
||||
hw->completion_addr = compl;
|
||||
}
|
||||
|
||||
|
|
|
@ -473,6 +473,15 @@ static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
|
|||
return container_of(ie, struct idxd_device, ie);
|
||||
}
|
||||
|
||||
static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
|
||||
{
|
||||
union gencfg_reg reg;
|
||||
|
||||
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
reg.user_int_en = enable;
|
||||
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
}
|
||||
|
||||
extern struct bus_type dsa_bus_type;
|
||||
|
||||
extern bool support_enqcmd;
|
||||
|
|
|
@ -550,14 +550,59 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
|
|||
|
||||
static int idxd_enable_system_pasid(struct idxd_device *idxd)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
struct pci_dev *pdev = idxd->pdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct iommu_domain *domain;
|
||||
ioasid_t pasid;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Attach a global PASID to the DMA domain so that we can use ENQCMDS
|
||||
* to submit work on buffers mapped by DMA API.
|
||||
*/
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
if (!domain)
|
||||
return -EPERM;
|
||||
|
||||
pasid = iommu_alloc_global_pasid(dev);
|
||||
if (pasid == IOMMU_PASID_INVALID)
|
||||
return -ENOSPC;
|
||||
|
||||
/*
|
||||
* DMA domain is owned by the driver, it should support all valid
|
||||
* types such as DMA-FQ, identity, etc.
|
||||
*/
|
||||
ret = iommu_attach_device_pasid(domain, dev, pasid);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to attach device pasid %d, domain type %d",
|
||||
pasid, domain->type);
|
||||
iommu_free_global_pasid(pasid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Since we set user privilege for kernel DMA, enable completion IRQ */
|
||||
idxd_set_user_intr(idxd, 1);
|
||||
idxd->pasid = pasid;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void idxd_disable_system_pasid(struct idxd_device *idxd)
|
||||
{
|
||||
struct pci_dev *pdev = idxd->pdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
iommu_sva_unbind_device(idxd->sva);
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
if (!domain)
|
||||
return;
|
||||
|
||||
iommu_detach_device_pasid(domain, dev, idxd->pasid);
|
||||
iommu_free_global_pasid(idxd->pasid);
|
||||
|
||||
idxd_set_user_intr(idxd, 0);
|
||||
idxd->sva = NULL;
|
||||
idxd->pasid = IOMMU_PASID_INVALID;
|
||||
}
|
||||
|
||||
static int idxd_enable_sva(struct pci_dev *pdev)
|
||||
|
@ -600,8 +645,9 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||
} else {
|
||||
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
|
||||
|
||||
if (idxd_enable_system_pasid(idxd))
|
||||
dev_warn(dev, "No in-kernel DMA with PASID.\n");
|
||||
rc = idxd_enable_system_pasid(idxd);
|
||||
if (rc)
|
||||
dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
|
||||
else
|
||||
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
|
|
|
@ -948,13 +948,6 @@ static ssize_t wq_name_store(struct device *dev,
|
|||
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* This is temporarily placed here until we have SVM support for
|
||||
* dmaengine.
|
||||
*/
|
||||
if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
input = kstrndup(buf, count, GFP_KERNEL);
|
||||
if (!input)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -12,13 +12,14 @@
|
|||
#include "amd_iommu_types.h"
|
||||
|
||||
irqreturn_t amd_iommu_int_thread(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||
void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
|
||||
void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
|
||||
void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
|
||||
int amd_iommu_init_devices(void);
|
||||
void amd_iommu_uninit_devices(void);
|
||||
void amd_iommu_init_notifier(void);
|
||||
void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
|
||||
|
|
|
@ -120,10 +120,13 @@
|
|||
#define PASID_MASK 0x0000ffff
|
||||
|
||||
/* MMIO status bits */
|
||||
#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK BIT(0)
|
||||
#define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0)
|
||||
#define MMIO_STATUS_EVT_INT_MASK BIT(1)
|
||||
#define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2)
|
||||
#define MMIO_STATUS_EVT_RUN_MASK BIT(3)
|
||||
#define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5)
|
||||
#define MMIO_STATUS_PPR_INT_MASK BIT(6)
|
||||
#define MMIO_STATUS_PPR_RUN_MASK BIT(7)
|
||||
#define MMIO_STATUS_GALOG_RUN_MASK BIT(8)
|
||||
#define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9)
|
||||
#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
|
||||
|
@ -381,15 +384,15 @@
|
|||
*/
|
||||
#define DTE_FLAG_V BIT_ULL(0)
|
||||
#define DTE_FLAG_TV BIT_ULL(1)
|
||||
#define DTE_FLAG_GIOV BIT_ULL(54)
|
||||
#define DTE_FLAG_GV BIT_ULL(55)
|
||||
#define DTE_GLX_SHIFT (56)
|
||||
#define DTE_GLX_MASK (3)
|
||||
#define DTE_FLAG_IR BIT_ULL(61)
|
||||
#define DTE_FLAG_IW BIT_ULL(62)
|
||||
|
||||
#define DTE_FLAG_IOTLB BIT_ULL(32)
|
||||
#define DTE_FLAG_GIOV BIT_ULL(54)
|
||||
#define DTE_FLAG_GV BIT_ULL(55)
|
||||
#define DTE_FLAG_MASK (0x3ffULL << 32)
|
||||
#define DTE_GLX_SHIFT (56)
|
||||
#define DTE_GLX_MASK (3)
|
||||
#define DEV_DOMID_MASK 0xffffULL
|
||||
|
||||
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
|
||||
|
@ -702,12 +705,21 @@ struct amd_iommu {
|
|||
/* event buffer virtual address */
|
||||
u8 *evt_buf;
|
||||
|
||||
/* Name for event log interrupt */
|
||||
unsigned char evt_irq_name[16];
|
||||
|
||||
/* Base of the PPR log, if present */
|
||||
u8 *ppr_log;
|
||||
|
||||
/* Name for PPR log interrupt */
|
||||
unsigned char ppr_irq_name[16];
|
||||
|
||||
/* Base of the GA log, if present */
|
||||
u8 *ga_log;
|
||||
|
||||
/* Name for GA log interrupt */
|
||||
unsigned char ga_irq_name[16];
|
||||
|
||||
/* Tail of the GA log, if present */
|
||||
u8 *ga_log_tail;
|
||||
|
||||
|
|
|
@ -483,6 +483,10 @@ static void iommu_disable(struct amd_iommu *iommu)
|
|||
iommu_feature_disable(iommu, CONTROL_GALOG_EN);
|
||||
iommu_feature_disable(iommu, CONTROL_GAINT_EN);
|
||||
|
||||
/* Disable IOMMU PPR logging */
|
||||
iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
|
||||
iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
|
||||
|
||||
/* Disable IOMMU hardware itself */
|
||||
iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
|
||||
|
||||
|
@ -752,38 +756,62 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
|
|||
return iommu->cmd_buf ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt handler has processed all pending events and adjusted head
|
||||
* and tail pointer. Reset overflow mask and restart logging again.
|
||||
*/
|
||||
static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
|
||||
u8 cntrl_intr, u8 cntrl_log,
|
||||
u32 status_run_mask, u32 status_overflow_mask)
|
||||
{
|
||||
u32 status;
|
||||
|
||||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
if (status & status_run_mask)
|
||||
return;
|
||||
|
||||
pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
|
||||
|
||||
iommu_feature_disable(iommu, cntrl_log);
|
||||
iommu_feature_disable(iommu, cntrl_intr);
|
||||
|
||||
writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, cntrl_intr);
|
||||
iommu_feature_enable(iommu, cntrl_log);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function restarts event logging in case the IOMMU experienced
|
||||
* an event log buffer overflow.
|
||||
*/
|
||||
void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
|
||||
amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
|
||||
CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
|
||||
MMIO_STATUS_EVT_OVERFLOW_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function restarts event logging in case the IOMMU experienced
|
||||
* an GA log overflow.
|
||||
* GA log overflow.
|
||||
*/
|
||||
void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
|
||||
{
|
||||
u32 status;
|
||||
amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
|
||||
CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
|
||||
MMIO_STATUS_GALOG_OVERFLOW_MASK);
|
||||
}
|
||||
|
||||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
if (status & MMIO_STATUS_GALOG_RUN_MASK)
|
||||
return;
|
||||
|
||||
pr_info_ratelimited("IOMMU GA Log restarting\n");
|
||||
|
||||
iommu_feature_disable(iommu, CONTROL_GALOG_EN);
|
||||
iommu_feature_disable(iommu, CONTROL_GAINT_EN);
|
||||
|
||||
writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
|
||||
iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_GAINT_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_GALOG_EN);
|
||||
/*
|
||||
* This function restarts ppr logging in case the IOMMU experienced
|
||||
* PPR log overflow.
|
||||
*/
|
||||
void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
|
||||
CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
|
||||
MMIO_STATUS_PPR_OVERFLOW_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -906,6 +934,8 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
|
|||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_PPR_EN);
|
||||
|
||||
entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
|
||||
|
||||
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
|
||||
|
@ -916,7 +946,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
|
|||
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_PPR_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
|
||||
}
|
||||
|
||||
static void __init free_ppr_log(struct amd_iommu *iommu)
|
||||
|
@ -2311,6 +2341,7 @@ static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq
|
|||
struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
|
||||
|
||||
irqd->chip = &intcapxt_controller;
|
||||
irqd->hwirq = info->hwirq;
|
||||
irqd->chip_data = info->data;
|
||||
__irq_set_handler(i, handle_edge_irq, 0, "edge");
|
||||
}
|
||||
|
@ -2337,22 +2368,14 @@ static void intcapxt_unmask_irq(struct irq_data *irqd)
|
|||
xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
|
||||
xt.destid_24_31 = cfg->dest_apicid >> 24;
|
||||
|
||||
/**
|
||||
* Current IOMMU implementation uses the same IRQ for all
|
||||
* 3 IOMMU interrupts.
|
||||
*/
|
||||
writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
|
||||
writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
|
||||
writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
|
||||
writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
|
||||
}
|
||||
|
||||
static void intcapxt_mask_irq(struct irq_data *irqd)
|
||||
{
|
||||
struct amd_iommu *iommu = irqd->chip_data;
|
||||
|
||||
writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
|
||||
writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
|
||||
writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
|
||||
writeq(0, iommu->mmio_base + irqd->hwirq);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2415,7 +2438,8 @@ static struct irq_domain *iommu_get_irqdomain(void)
|
|||
return iommu_irqdomain;
|
||||
}
|
||||
|
||||
static int iommu_setup_intcapxt(struct amd_iommu *iommu)
|
||||
static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
|
||||
int hwirq, irq_handler_t thread_fn)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
struct irq_alloc_info info;
|
||||
|
@ -2429,6 +2453,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
|
|||
init_irq_alloc_info(&info, NULL);
|
||||
info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
|
||||
info.data = iommu;
|
||||
info.hwirq = hwirq;
|
||||
|
||||
irq = irq_domain_alloc_irqs(domain, 1, node, &info);
|
||||
if (irq < 0) {
|
||||
|
@ -2437,7 +2462,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
|
|||
}
|
||||
|
||||
ret = request_threaded_irq(irq, amd_iommu_int_handler,
|
||||
amd_iommu_int_thread, 0, "AMD-Vi", iommu);
|
||||
thread_fn, 0, devname, iommu);
|
||||
if (ret) {
|
||||
irq_domain_free_irqs(irq, 1);
|
||||
irq_domain_remove(domain);
|
||||
|
@ -2447,6 +2472,37 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int iommu_setup_intcapxt(struct amd_iommu *iommu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
|
||||
"AMD-Vi%d-Evt", iommu->index);
|
||||
ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
|
||||
MMIO_INTCAPXT_EVT_OFFSET,
|
||||
amd_iommu_int_thread_evtlog);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
|
||||
"AMD-Vi%d-PPR", iommu->index);
|
||||
ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
|
||||
MMIO_INTCAPXT_PPR_OFFSET,
|
||||
amd_iommu_int_thread_pprlog);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
|
||||
"AMD-Vi%d-GA", iommu->index);
|
||||
ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
|
||||
MMIO_INTCAPXT_GALOG_OFFSET,
|
||||
amd_iommu_int_thread_galog);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iommu_init_irq(struct amd_iommu *iommu)
|
||||
{
|
||||
int ret;
|
||||
|
@ -2472,8 +2528,6 @@ enable_faults:
|
|||
|
||||
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
|
||||
|
||||
if (iommu->ppr_log != NULL)
|
||||
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2889,8 +2943,6 @@ static void enable_iommus_vapic(void)
|
|||
static void enable_iommus(void)
|
||||
{
|
||||
early_enable_iommus();
|
||||
enable_iommus_vapic();
|
||||
enable_iommus_v2();
|
||||
}
|
||||
|
||||
static void disable_iommus(void)
|
||||
|
@ -3154,6 +3206,13 @@ static int amd_iommu_enable_interrupts(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt handler is ready to process interrupts. Enable
|
||||
* PPR and GA log interrupt for all IOMMUs.
|
||||
*/
|
||||
enable_iommus_vapic();
|
||||
enable_iommus_v2();
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -3233,8 +3292,6 @@ static int __init state_next(void)
|
|||
register_syscore_ops(&amd_iommu_syscore_ops);
|
||||
ret = amd_iommu_init_pci();
|
||||
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
|
||||
enable_iommus_vapic();
|
||||
enable_iommus_v2();
|
||||
break;
|
||||
case IOMMU_PCI_INIT:
|
||||
ret = amd_iommu_enable_interrupts();
|
||||
|
|
|
@ -841,50 +841,27 @@ static inline void
|
|||
amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
|
||||
#endif /* !CONFIG_IRQ_REMAP */
|
||||
|
||||
#define AMD_IOMMU_INT_MASK \
|
||||
(MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
|
||||
MMIO_STATUS_EVT_INT_MASK | \
|
||||
MMIO_STATUS_PPR_INT_MASK | \
|
||||
MMIO_STATUS_GALOG_OVERFLOW_MASK | \
|
||||
MMIO_STATUS_GALOG_INT_MASK)
|
||||
|
||||
irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
||||
static void amd_iommu_handle_irq(void *data, const char *evt_type,
|
||||
u32 int_mask, u32 overflow_mask,
|
||||
void (*int_handler)(struct amd_iommu *),
|
||||
void (*overflow_handler)(struct amd_iommu *))
|
||||
{
|
||||
struct amd_iommu *iommu = (struct amd_iommu *) data;
|
||||
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
u32 mask = int_mask | overflow_mask;
|
||||
|
||||
while (status & AMD_IOMMU_INT_MASK) {
|
||||
while (status & mask) {
|
||||
/* Enable interrupt sources again */
|
||||
writel(AMD_IOMMU_INT_MASK,
|
||||
iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
if (status & MMIO_STATUS_EVT_INT_MASK) {
|
||||
pr_devel("Processing IOMMU Event Log\n");
|
||||
iommu_poll_events(iommu);
|
||||
if (int_handler) {
|
||||
pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
|
||||
iommu->index, evt_type);
|
||||
int_handler(iommu);
|
||||
}
|
||||
|
||||
if (status & MMIO_STATUS_PPR_INT_MASK) {
|
||||
pr_devel("Processing IOMMU PPR Log\n");
|
||||
iommu_poll_ppr_log(iommu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
if (status & (MMIO_STATUS_GALOG_INT_MASK |
|
||||
MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
|
||||
pr_devel("Processing IOMMU GA Log\n");
|
||||
iommu_poll_ga_log(iommu);
|
||||
}
|
||||
|
||||
if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
|
||||
pr_info_ratelimited("IOMMU GA Log overflow\n");
|
||||
amd_iommu_restart_ga_log(iommu);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
|
||||
pr_info_ratelimited("IOMMU event log overflow\n");
|
||||
amd_iommu_restart_event_logging(iommu);
|
||||
}
|
||||
if ((status & overflow_mask) && overflow_handler)
|
||||
overflow_handler(iommu);
|
||||
|
||||
/*
|
||||
* Hardware bug: ERBT1312
|
||||
|
@ -901,6 +878,43 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
|||
*/
|
||||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
}
|
||||
}
|
||||
|
||||
irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
|
||||
{
|
||||
amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
|
||||
MMIO_STATUS_EVT_OVERFLOW_MASK,
|
||||
iommu_poll_events, amd_iommu_restart_event_logging);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
|
||||
{
|
||||
amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
|
||||
MMIO_STATUS_PPR_OVERFLOW_MASK,
|
||||
iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
|
||||
{
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
|
||||
MMIO_STATUS_GALOG_OVERFLOW_MASK,
|
||||
iommu_poll_ga_log, amd_iommu_restart_ga_log);
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
||||
{
|
||||
amd_iommu_int_thread_evtlog(irq, data);
|
||||
amd_iommu_int_thread_pprlog(irq, data);
|
||||
amd_iommu_int_thread_galog(irq, data);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -262,8 +262,8 @@ static void put_pasid_state(struct pasid_state *pasid_state)
|
|||
|
||||
static void put_pasid_state_wait(struct pasid_state *pasid_state)
|
||||
{
|
||||
refcount_dec(&pasid_state->count);
|
||||
wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
|
||||
if (!refcount_dec_and_test(&pasid_state->count))
|
||||
wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
|
||||
free_pasid_state(pasid_state);
|
||||
}
|
||||
|
||||
|
@ -327,6 +327,9 @@ static void free_pasid_states(struct device_state *dev_state)
|
|||
|
||||
put_pasid_state(pasid_state);
|
||||
|
||||
/* Clear the pasid state so that the pasid can be re-used */
|
||||
clear_pasid_state(dev_state, pasid_state->pasid);
|
||||
|
||||
/*
|
||||
* This will call the mn_release function and
|
||||
* unbind the PASID
|
||||
|
|
|
@ -1276,7 +1276,7 @@ static __maybe_unused int apple_dart_resume(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
|
||||
static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
|
||||
|
||||
static const struct of_device_id apple_dart_of_match[] = {
|
||||
{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
|
||||
|
|
|
@ -80,7 +80,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
|
|||
* be some overlap between use of both ASIDs, until we invalidate the
|
||||
* TLB.
|
||||
*/
|
||||
arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
|
||||
arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
|
||||
|
||||
/* Invalidate TLB entries previously associated with that context */
|
||||
arm_smmu_tlb_inv_asid(smmu, asid);
|
||||
|
|
|
@ -1059,7 +1059,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
|
|||
/*
|
||||
* This function handles the following cases:
|
||||
*
|
||||
* (1) Install primary CD, for normal DMA traffic (SSID = 0).
|
||||
* (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
|
||||
* (2) Install a secondary CD, for SID+SSID traffic.
|
||||
* (3) Update ASID of a CD. Atomically write the first 64 bits of the
|
||||
* CD, then invalidate the old entry and mappings.
|
||||
|
@ -1607,7 +1607,7 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
|
|||
|
||||
sid = FIELD_GET(PRIQ_0_SID, evt[0]);
|
||||
ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
|
||||
ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
|
||||
ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : IOMMU_NO_PASID;
|
||||
last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
|
||||
grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
|
||||
|
||||
|
@ -1748,7 +1748,7 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
|
|||
*/
|
||||
*cmd = (struct arm_smmu_cmdq_ent) {
|
||||
.opcode = CMDQ_OP_ATC_INV,
|
||||
.substream_valid = !!ssid,
|
||||
.substream_valid = (ssid != IOMMU_NO_PASID),
|
||||
.atc.ssid = ssid,
|
||||
};
|
||||
|
||||
|
@ -1795,7 +1795,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
|
|||
struct arm_smmu_cmdq_ent cmd;
|
||||
struct arm_smmu_cmdq_batch cmds;
|
||||
|
||||
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
|
||||
arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd);
|
||||
|
||||
cmds.num = 0;
|
||||
for (i = 0; i < master->num_streams; i++) {
|
||||
|
@ -1875,7 +1875,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
|
|||
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
|
||||
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
|
||||
}
|
||||
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
|
||||
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
|
||||
}
|
||||
|
||||
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
|
||||
|
@ -1968,7 +1968,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
|
|||
* Unfortunately, this can't be leaf-only since we may have
|
||||
* zapped an entire table.
|
||||
*/
|
||||
arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size);
|
||||
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size);
|
||||
}
|
||||
|
||||
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
|
||||
|
@ -2055,24 +2055,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
|||
return &smmu_domain->domain;
|
||||
}
|
||||
|
||||
static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
|
||||
{
|
||||
int idx, size = 1 << span;
|
||||
|
||||
do {
|
||||
idx = find_first_zero_bit(map, size);
|
||||
if (idx == size)
|
||||
return -ENOSPC;
|
||||
} while (test_and_set_bit(idx, map));
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void arm_smmu_bitmap_free(unsigned long *map, int idx)
|
||||
{
|
||||
clear_bit(idx, map);
|
||||
}
|
||||
|
||||
static void arm_smmu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
@ -2093,7 +2075,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
|
|||
} else {
|
||||
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
|
||||
if (cfg->vmid)
|
||||
arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
|
||||
ida_free(&smmu->vmid_map, cfg->vmid);
|
||||
}
|
||||
|
||||
kfree(smmu_domain);
|
||||
|
@ -2142,7 +2124,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
|
|||
* the master has been added to the devices list for this domain.
|
||||
* This isn't an issue because the STE hasn't been installed yet.
|
||||
*/
|
||||
ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
|
||||
ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd);
|
||||
if (ret)
|
||||
goto out_free_cd_tables;
|
||||
|
||||
|
@ -2167,7 +2149,9 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
|
|||
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
|
||||
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
|
||||
|
||||
vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
|
||||
/* Reserve VMID 0 for stage-2 bypass STEs */
|
||||
vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
|
||||
GFP_KERNEL);
|
||||
if (vmid < 0)
|
||||
return vmid;
|
||||
|
||||
|
@ -2328,7 +2312,7 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
|
|||
pdev = to_pci_dev(master->dev);
|
||||
|
||||
atomic_inc(&smmu_domain->nr_ats_masters);
|
||||
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
|
||||
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
|
||||
if (pci_enable_ats(pdev, stu))
|
||||
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
|
||||
}
|
||||
|
@ -3098,8 +3082,8 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
|
|||
reg |= STRTAB_BASE_RA;
|
||||
smmu->strtab_cfg.strtab_base = reg;
|
||||
|
||||
/* Allocate the first VMID for stage-2 bypass STEs */
|
||||
set_bit(0, smmu->vmid_map);
|
||||
ida_init(&smmu->vmid_map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3923,6 +3907,7 @@ static void arm_smmu_device_remove(struct platform_device *pdev)
|
|||
iommu_device_sysfs_remove(&smmu->iommu);
|
||||
arm_smmu_device_disable(smmu);
|
||||
iopf_queue_free(smmu->evtq.iopf);
|
||||
ida_destroy(&smmu->vmid_map);
|
||||
}
|
||||
|
||||
static void arm_smmu_device_shutdown(struct platform_device *pdev)
|
||||
|
|
|
@ -670,7 +670,7 @@ struct arm_smmu_device {
|
|||
|
||||
#define ARM_SMMU_MAX_VMIDS (1 << 16)
|
||||
unsigned int vmid_bits;
|
||||
DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
|
||||
struct ida vmid_map;
|
||||
|
||||
unsigned int ssid_bits;
|
||||
unsigned int sid_bits;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/firmware/qcom/qcom_scm.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
|
|
|
@ -251,10 +251,12 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
|
|||
{ .compatible = "qcom,sc7280-mss-pil" },
|
||||
{ .compatible = "qcom,sc8180x-mdss" },
|
||||
{ .compatible = "qcom,sc8280xp-mdss" },
|
||||
{ .compatible = "qcom,sm8150-mdss" },
|
||||
{ .compatible = "qcom,sm8250-mdss" },
|
||||
{ .compatible = "qcom,sdm845-mdss" },
|
||||
{ .compatible = "qcom,sdm845-mss-pil" },
|
||||
{ .compatible = "qcom,sm6350-mdss" },
|
||||
{ .compatible = "qcom,sm6375-mdss" },
|
||||
{ .compatible = "qcom,sm8150-mdss" },
|
||||
{ .compatible = "qcom,sm8250-mdss" },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -528,6 +530,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
|
|||
{ .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
|
||||
{ .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
|
||||
{ .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
|
||||
{ .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
|
||||
{ .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
|
||||
{ .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
|
||||
{ .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
|
|
@ -22,8 +22,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
@ -51,14 +50,15 @@ struct qcom_iommu_dev {
|
|||
struct clk_bulk_data clks[CLK_NUM];
|
||||
void __iomem *local_base;
|
||||
u32 sec_id;
|
||||
u8 num_ctxs;
|
||||
struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
|
||||
u8 max_asid;
|
||||
struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */
|
||||
};
|
||||
|
||||
struct qcom_iommu_ctx {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
bool secure_init;
|
||||
bool secured_ctx;
|
||||
u8 asid; /* asid and ctx bank # are 1:1 */
|
||||
struct iommu_domain *domain;
|
||||
};
|
||||
|
@ -94,7 +94,7 @@ static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid
|
|||
struct qcom_iommu_dev *qcom_iommu = d->iommu;
|
||||
if (!qcom_iommu)
|
||||
return NULL;
|
||||
return qcom_iommu->ctxs[asid - 1];
|
||||
return qcom_iommu->ctxs[asid];
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -273,6 +273,19 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
|||
ctx->secure_init = true;
|
||||
}
|
||||
|
||||
/* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */
|
||||
if (ctx->secured_ctx) {
|
||||
ctx->domain = domain;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Disable context bank before programming */
|
||||
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
|
||||
|
||||
/* Clear context bank fault address fault status registers */
|
||||
iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
|
||||
iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
|
||||
|
||||
/* TTBRs */
|
||||
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
|
||||
pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
|
||||
|
@ -527,11 +540,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|||
qcom_iommu = platform_get_drvdata(iommu_pdev);
|
||||
|
||||
/* make sure the asid specified in dt is valid, so we don't have
|
||||
* to sanity check this elsewhere, since 'asid - 1' is used to
|
||||
* index into qcom_iommu->ctxs:
|
||||
* to sanity check this elsewhere:
|
||||
*/
|
||||
if (WARN_ON(asid < 1) ||
|
||||
WARN_ON(asid > qcom_iommu->num_ctxs)) {
|
||||
if (WARN_ON(asid > qcom_iommu->max_asid) ||
|
||||
WARN_ON(qcom_iommu->ctxs[asid] == NULL)) {
|
||||
put_device(&iommu_pdev->dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -617,7 +629,8 @@ free_mem:
|
|||
|
||||
static int get_asid(const struct device_node *np)
|
||||
{
|
||||
u32 reg;
|
||||
u32 reg, val;
|
||||
int asid;
|
||||
|
||||
/* read the "reg" property directly to get the relative address
|
||||
* of the context bank, and calculate the asid from that:
|
||||
|
@ -625,7 +638,17 @@ static int get_asid(const struct device_node *np)
|
|||
if (of_property_read_u32_index(np, "reg", 0, ®))
|
||||
return -ENODEV;
|
||||
|
||||
return reg / 0x1000; /* context banks are 0x1000 apart */
|
||||
/*
|
||||
* Context banks are 0x1000 apart but, in some cases, the ASID
|
||||
* number doesn't match to this logic and needs to be passed
|
||||
* from the DT configuration explicitly.
|
||||
*/
|
||||
if (!of_property_read_u32(np, "qcom,ctx-asid", &val))
|
||||
asid = val;
|
||||
else
|
||||
asid = reg / 0x1000;
|
||||
|
||||
return asid;
|
||||
}
|
||||
|
||||
static int qcom_iommu_ctx_probe(struct platform_device *pdev)
|
||||
|
@ -633,7 +656,6 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
|
|||
struct qcom_iommu_ctx *ctx;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
|
||||
struct resource *res;
|
||||
int ret, irq;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
|
@ -643,19 +665,22 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
|
|||
ctx->dev = dev;
|
||||
platform_set_drvdata(pdev, ctx);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ctx->base = devm_ioremap_resource(dev, res);
|
||||
ctx->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(ctx->base))
|
||||
return PTR_ERR(ctx->base);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return -ENODEV;
|
||||
return irq;
|
||||
|
||||
if (of_device_is_compatible(dev->of_node, "qcom,msm-iommu-v2-sec"))
|
||||
ctx->secured_ctx = true;
|
||||
|
||||
/* clear IRQs before registering fault handler, just in case the
|
||||
* boot-loader left us a surprise:
|
||||
*/
|
||||
iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
|
||||
if (!ctx->secured_ctx)
|
||||
iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
|
||||
|
||||
ret = devm_request_irq(dev, irq,
|
||||
qcom_iommu_fault,
|
||||
|
@ -677,7 +702,7 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
|
|||
|
||||
dev_dbg(dev, "found asid %u\n", ctx->asid);
|
||||
|
||||
qcom_iommu->ctxs[ctx->asid - 1] = ctx;
|
||||
qcom_iommu->ctxs[ctx->asid] = ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -689,12 +714,14 @@ static void qcom_iommu_ctx_remove(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
qcom_iommu->ctxs[ctx->asid - 1] = NULL;
|
||||
qcom_iommu->ctxs[ctx->asid] = NULL;
|
||||
}
|
||||
|
||||
static const struct of_device_id ctx_of_match[] = {
|
||||
{ .compatible = "qcom,msm-iommu-v1-ns" },
|
||||
{ .compatible = "qcom,msm-iommu-v1-sec" },
|
||||
{ .compatible = "qcom,msm-iommu-v2-ns" },
|
||||
{ .compatible = "qcom,msm-iommu-v2-sec" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
|
@ -712,7 +739,8 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
|
|||
struct device_node *child;
|
||||
|
||||
for_each_child_of_node(qcom_iommu->dev->of_node, child) {
|
||||
if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
|
||||
if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec") ||
|
||||
of_device_is_compatible(child, "qcom,msm-iommu-v2-sec")) {
|
||||
of_node_put(child);
|
||||
return true;
|
||||
}
|
||||
|
@ -736,11 +764,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
|
|||
for_each_child_of_node(dev->of_node, child)
|
||||
max_asid = max(max_asid, get_asid(child));
|
||||
|
||||
qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
|
||||
qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1),
|
||||
GFP_KERNEL);
|
||||
if (!qcom_iommu)
|
||||
return -ENOMEM;
|
||||
qcom_iommu->num_ctxs = max_asid;
|
||||
qcom_iommu->max_asid = max_asid;
|
||||
qcom_iommu->dev = dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
@ -856,6 +884,7 @@ static const struct dev_pm_ops qcom_iommu_pm_ops = {
|
|||
|
||||
static const struct of_device_id qcom_iommu_of_match[] = {
|
||||
{ .compatible = "qcom,msm-iommu-v1" },
|
||||
{ .compatible = "qcom,msm-iommu-v2" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
|
|
|
@ -660,7 +660,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
|||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long shift, iova_len, iova = 0;
|
||||
unsigned long shift, iova_len, iova;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
|
||||
cookie->msi_iova += size;
|
||||
|
@ -675,15 +675,29 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
|||
if (domain->geometry.force_aperture)
|
||||
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
|
||||
|
||||
/* Try to get PCI devices a SAC address */
|
||||
if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
|
||||
/*
|
||||
* Try to use all the 32-bit PCI addresses first. The original SAC vs.
|
||||
* DAC reasoning loses relevance with PCIe, but enough hardware and
|
||||
* firmware bugs are still lurking out there that it's safest not to
|
||||
* venture into the 64-bit space until necessary.
|
||||
*
|
||||
* If your device goes wrong after seeing the notice then likely either
|
||||
* its driver is not setting DMA masks accurately, the hardware has
|
||||
* some inherent bug in handling >32-bit addresses, or not all the
|
||||
* expected address bits are wired up between the device and the IOMMU.
|
||||
*/
|
||||
if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
|
||||
iova = alloc_iova_fast(iovad, iova_len,
|
||||
DMA_BIT_MASK(32) >> shift, false);
|
||||
if (iova)
|
||||
goto done;
|
||||
|
||||
if (!iova)
|
||||
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
|
||||
true);
|
||||
dev->iommu->pci_32bit_workaround = false;
|
||||
dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
|
||||
}
|
||||
|
||||
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
|
||||
done:
|
||||
return (dma_addr_t)iova << shift;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,10 @@ int iommu_dma_init_fq(struct iommu_domain *domain);
|
|||
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
|
||||
extern bool iommu_dma_forcedac;
|
||||
static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
|
||||
{
|
||||
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
|
||||
}
|
||||
|
||||
#else /* CONFIG_IOMMU_DMA */
|
||||
|
||||
|
@ -38,5 +42,9 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
|
|||
{
|
||||
}
|
||||
|
||||
static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif /* __DMA_IOMMU_H */
|
||||
|
|
|
@ -114,13 +114,17 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
|
|||
|
||||
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
|
||||
are never going to work. */
|
||||
static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
|
||||
static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
|
||||
{
|
||||
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
|
||||
}
|
||||
static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
|
||||
{
|
||||
return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
|
||||
}
|
||||
static inline unsigned long page_to_dma_pfn(struct page *pg)
|
||||
{
|
||||
return mm_to_dma_pfn(page_to_pfn(pg));
|
||||
return mm_to_dma_pfn_start(page_to_pfn(pg));
|
||||
}
|
||||
static inline unsigned long virt_to_dma_pfn(void *p)
|
||||
{
|
||||
|
@ -878,7 +882,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
|
|||
}
|
||||
/* For request-without-pasid, get the pasid from context entry */
|
||||
if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID)
|
||||
pasid = PASID_RID2PASID;
|
||||
pasid = IOMMU_NO_PASID;
|
||||
|
||||
dir_index = pasid >> PASID_PDE_SHIFT;
|
||||
pde = &dir[dir_index];
|
||||
|
@ -1360,6 +1364,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
|
|||
|
||||
static void domain_update_iotlb(struct dmar_domain *domain)
|
||||
{
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
struct device_domain_info *info;
|
||||
bool has_iotlb_device = false;
|
||||
unsigned long flags;
|
||||
|
@ -1371,6 +1376,14 @@ static void domain_update_iotlb(struct dmar_domain *domain)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
|
||||
info = dev_iommu_priv_get(dev_pasid->dev);
|
||||
if (info->ats_enabled) {
|
||||
has_iotlb_device = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
domain->has_iotlb_device = has_iotlb_device;
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
@ -1450,12 +1463,13 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
|
|||
qdep = info->ats_qdep;
|
||||
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
|
||||
qdep, addr, mask);
|
||||
quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep);
|
||||
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
|
||||
}
|
||||
|
||||
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
||||
u64 addr, unsigned mask)
|
||||
{
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
struct device_domain_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1465,6 +1479,36 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
|||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_for_each_entry(info, &domain->devices, link)
|
||||
__iommu_flush_dev_iotlb(info, addr, mask);
|
||||
|
||||
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
|
||||
info = dev_iommu_priv_get(dev_pasid->dev);
|
||||
|
||||
if (!info->ats_enabled)
|
||||
continue;
|
||||
|
||||
qi_flush_dev_iotlb_pasid(info->iommu,
|
||||
PCI_DEVID(info->bus, info->devfn),
|
||||
info->pfsid, dev_pasid->pasid,
|
||||
info->ats_qdep, addr,
|
||||
mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain, u64 addr,
|
||||
unsigned long npages, bool ih)
|
||||
{
|
||||
u16 did = domain_id_iommu(domain, iommu);
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain)
|
||||
qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
|
||||
|
||||
if (!list_empty(&domain->devices))
|
||||
qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1485,7 +1529,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
|||
ih = 1 << 6;
|
||||
|
||||
if (domain->use_first_level) {
|
||||
qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih);
|
||||
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
|
||||
} else {
|
||||
unsigned long bitmask = aligned_pages - 1;
|
||||
|
||||
|
@ -1555,7 +1599,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
|
|||
u16 did = domain_id_iommu(dmar_domain, iommu);
|
||||
|
||||
if (dmar_domain->use_first_level)
|
||||
qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
|
||||
domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
|
@ -1727,6 +1771,7 @@ static struct dmar_domain *alloc_domain(unsigned int type)
|
|||
domain->use_first_level = true;
|
||||
domain->has_iotlb_device = false;
|
||||
INIT_LIST_HEAD(&domain->devices);
|
||||
INIT_LIST_HEAD(&domain->dev_pasids);
|
||||
spin_lock_init(&domain->lock);
|
||||
xa_init(&domain->iommu_array);
|
||||
|
||||
|
@ -1941,7 +1986,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|||
context_pdts(pds);
|
||||
|
||||
/* Setup the RID_PASID field: */
|
||||
context_set_sm_rid2pasid(context, PASID_RID2PASID);
|
||||
context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
|
||||
|
||||
/*
|
||||
* Setup the Device-TLB enable bit and Page request
|
||||
|
@ -2363,8 +2408,8 @@ static int __init si_domain_init(int hw)
|
|||
|
||||
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
|
||||
ret = iommu_domain_identity_map(si_domain,
|
||||
mm_to_dma_pfn(start_pfn),
|
||||
mm_to_dma_pfn(end_pfn));
|
||||
mm_to_dma_pfn_start(start_pfn),
|
||||
mm_to_dma_pfn_end(end_pfn));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -2385,8 +2430,8 @@ static int __init si_domain_init(int hw)
|
|||
continue;
|
||||
|
||||
ret = iommu_domain_identity_map(si_domain,
|
||||
mm_to_dma_pfn(start >> PAGE_SHIFT),
|
||||
mm_to_dma_pfn(end >> PAGE_SHIFT));
|
||||
mm_to_dma_pfn_start(start >> PAGE_SHIFT),
|
||||
mm_to_dma_pfn_end(end >> PAGE_SHIFT));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -2421,13 +2466,13 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
|
|||
/* Setup the PASID entry for requests without PASID: */
|
||||
if (hw_pass_through && domain_type_is_si(domain))
|
||||
ret = intel_pasid_setup_pass_through(iommu, domain,
|
||||
dev, PASID_RID2PASID);
|
||||
dev, IOMMU_NO_PASID);
|
||||
else if (domain->use_first_level)
|
||||
ret = domain_setup_first_level(iommu, domain, dev,
|
||||
PASID_RID2PASID);
|
||||
IOMMU_NO_PASID);
|
||||
else
|
||||
ret = intel_pasid_setup_second_level(iommu, domain,
|
||||
dev, PASID_RID2PASID);
|
||||
dev, IOMMU_NO_PASID);
|
||||
if (ret) {
|
||||
dev_err(dev, "Setup RID2PASID failed\n");
|
||||
device_block_translation(dev);
|
||||
|
@ -2447,30 +2492,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool device_has_rmrr(struct device *dev)
|
||||
{
|
||||
struct dmar_rmrr_unit *rmrr;
|
||||
struct device *tmp;
|
||||
int i;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_rmrr_units(rmrr) {
|
||||
/*
|
||||
* Return TRUE if this RMRR contains the device that
|
||||
* is passed in.
|
||||
*/
|
||||
for_each_active_dev_scope(rmrr->devices,
|
||||
rmrr->devices_cnt, i, tmp)
|
||||
if (tmp == dev ||
|
||||
is_downstream_to_pci_bridge(dev, tmp)) {
|
||||
rcu_read_unlock();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* device_rmrr_is_relaxable - Test whether the RMRR of this device
|
||||
* is relaxable (ie. is allowed to be not enforced under some conditions)
|
||||
|
@ -2500,34 +2521,6 @@ static bool device_rmrr_is_relaxable(struct device *dev)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* There are a couple cases where we need to restrict the functionality of
|
||||
* devices associated with RMRRs. The first is when evaluating a device for
|
||||
* identity mapping because problems exist when devices are moved in and out
|
||||
* of domains and their respective RMRR information is lost. This means that
|
||||
* a device with associated RMRRs will never be in a "passthrough" domain.
|
||||
* The second is use of the device through the IOMMU API. This interface
|
||||
* expects to have full control of the IOVA space for the device. We cannot
|
||||
* satisfy both the requirement that RMRR access is maintained and have an
|
||||
* unencumbered IOVA space. We also have no ability to quiesce the device's
|
||||
* use of the RMRR space or even inform the IOMMU API user of the restriction.
|
||||
* We therefore prevent devices associated with an RMRR from participating in
|
||||
* the IOMMU API, which eliminates them from device assignment.
|
||||
*
|
||||
* In both cases, devices which have relaxable RMRRs are not concerned by this
|
||||
* restriction. See device_rmrr_is_relaxable comment.
|
||||
*/
|
||||
static bool device_is_rmrr_locked(struct device *dev)
|
||||
{
|
||||
if (!device_has_rmrr(dev))
|
||||
return false;
|
||||
|
||||
if (device_rmrr_is_relaxable(dev))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the required default domain type for a specific device.
|
||||
*
|
||||
|
@ -3561,8 +3554,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
|
|||
unsigned long val, void *v)
|
||||
{
|
||||
struct memory_notify *mhp = v;
|
||||
unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
|
||||
unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
|
||||
unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn);
|
||||
unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn +
|
||||
mhp->nr_pages - 1);
|
||||
|
||||
switch (val) {
|
||||
|
@ -3757,7 +3750,6 @@ static int __init probe_acpi_namespace_devices(void)
|
|||
for_each_active_dev_scope(drhd->devices,
|
||||
drhd->devices_cnt, i, dev) {
|
||||
struct acpi_device_physical_node *pn;
|
||||
struct iommu_group *group;
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (dev->bus != &acpi_bus_type)
|
||||
|
@ -3767,12 +3759,6 @@ static int __init probe_acpi_namespace_devices(void)
|
|||
mutex_lock(&adev->physical_node_lock);
|
||||
list_for_each_entry(pn,
|
||||
&adev->physical_node_list, node) {
|
||||
group = iommu_group_get(pn->dev);
|
||||
if (group) {
|
||||
iommu_group_put(group);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = iommu_probe_device(pn->dev);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -3969,7 +3955,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
|
|||
if (!dev_is_real_dma_subdevice(info->dev)) {
|
||||
if (dev_is_pci(info->dev) && sm_supported(iommu))
|
||||
intel_pasid_tear_down_entry(iommu, info->dev,
|
||||
PASID_RID2PASID, false);
|
||||
IOMMU_NO_PASID, false);
|
||||
|
||||
iommu_disable_pci_caps(info);
|
||||
domain_context_clear(info);
|
||||
|
@ -3998,7 +3984,7 @@ static void device_block_translation(struct device *dev)
|
|||
if (!dev_is_real_dma_subdevice(dev)) {
|
||||
if (sm_supported(iommu))
|
||||
intel_pasid_tear_down_entry(iommu, dev,
|
||||
PASID_RID2PASID, false);
|
||||
IOMMU_NO_PASID, false);
|
||||
else
|
||||
domain_context_clear(info);
|
||||
}
|
||||
|
@ -4140,12 +4126,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
|
|||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
int ret;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
|
||||
device_is_rmrr_locked(dev)) {
|
||||
dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (info->domain)
|
||||
device_block_translation(dev);
|
||||
|
||||
|
@ -4272,7 +4252,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
|
|||
unsigned long i;
|
||||
|
||||
nrpages = aligned_nrpages(gather->start, size);
|
||||
start_pfn = mm_to_dma_pfn(iova_pfn);
|
||||
start_pfn = mm_to_dma_pfn_start(iova_pfn);
|
||||
|
||||
xa_for_each(&dmar_domain->iommu_array, i, info)
|
||||
iommu_flush_iotlb_psi(info->iommu, dmar_domain,
|
||||
|
@ -4332,7 +4312,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
|
|||
|
||||
list_for_each_entry(info, &domain->devices, link)
|
||||
intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
|
||||
PASID_RID2PASID);
|
||||
IOMMU_NO_PASID);
|
||||
}
|
||||
|
||||
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
|
||||
|
@ -4714,23 +4694,96 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
|||
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
|
||||
struct dev_pasid_info *curr, *dev_pasid = NULL;
|
||||
struct dmar_domain *dmar_domain;
|
||||
struct iommu_domain *domain;
|
||||
unsigned long flags;
|
||||
|
||||
/* Domain type specific cleanup: */
|
||||
domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
|
||||
if (domain) {
|
||||
switch (domain->type) {
|
||||
case IOMMU_DOMAIN_SVA:
|
||||
intel_svm_remove_dev_pasid(dev, pasid);
|
||||
break;
|
||||
default:
|
||||
/* should never reach here */
|
||||
WARN_ON(1);
|
||||
if (WARN_ON_ONCE(!domain))
|
||||
goto out_tear_down;
|
||||
|
||||
/*
|
||||
* The SVA implementation needs to handle its own stuffs like the mm
|
||||
* notification. Before consolidating that code into iommu core, let
|
||||
* the intel sva code handle it.
|
||||
*/
|
||||
if (domain->type == IOMMU_DOMAIN_SVA) {
|
||||
intel_svm_remove_dev_pasid(dev, pasid);
|
||||
goto out_tear_down;
|
||||
}
|
||||
|
||||
dmar_domain = to_dmar_domain(domain);
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
|
||||
if (curr->dev == dev && curr->pasid == pasid) {
|
||||
list_del(&curr->link_domain);
|
||||
dev_pasid = curr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
WARN_ON_ONCE(!dev_pasid);
|
||||
spin_unlock_irqrestore(&dmar_domain->lock, flags);
|
||||
|
||||
domain_detach_iommu(dmar_domain, iommu);
|
||||
kfree(dev_pasid);
|
||||
out_tear_down:
|
||||
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
|
||||
intel_drain_pasid_prq(dev, pasid);
|
||||
}
|
||||
|
||||
static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (context_copied(iommu, info->bus, info->devfn))
|
||||
return -EBUSY;
|
||||
|
||||
ret = prepare_domain_attach_device(domain, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
|
||||
if (!dev_pasid)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = domain_attach_iommu(dmar_domain, iommu);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
if (domain_type_is_si(dmar_domain))
|
||||
ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
|
||||
dev, pasid);
|
||||
else if (dmar_domain->use_first_level)
|
||||
ret = domain_setup_first_level(iommu, dmar_domain,
|
||||
dev, pasid);
|
||||
else
|
||||
ret = intel_pasid_setup_second_level(iommu, dmar_domain,
|
||||
dev, pasid);
|
||||
if (ret)
|
||||
goto out_detach_iommu;
|
||||
|
||||
dev_pasid->dev = dev;
|
||||
dev_pasid->pasid = pasid;
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
|
||||
spin_unlock_irqrestore(&dmar_domain->lock, flags);
|
||||
|
||||
return 0;
|
||||
out_detach_iommu:
|
||||
domain_detach_iommu(dmar_domain, iommu);
|
||||
out_free:
|
||||
kfree(dev_pasid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
|
||||
|
@ -4770,6 +4823,7 @@ const struct iommu_ops intel_iommu_ops = {
|
|||
#endif
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.set_dev_pasid = intel_iommu_set_dev_pasid,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
|
@ -5006,7 +5060,7 @@ void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
|
|||
return;
|
||||
|
||||
sid = PCI_DEVID(info->bus, info->devfn);
|
||||
if (pasid == PASID_RID2PASID) {
|
||||
if (pasid == IOMMU_NO_PASID) {
|
||||
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
|
||||
qdep, address, mask);
|
||||
} else {
|
||||
|
|
|
@ -595,6 +595,7 @@ struct dmar_domain {
|
|||
|
||||
spinlock_t lock; /* Protect device tracking lists */
|
||||
struct list_head devices; /* all devices' list */
|
||||
struct list_head dev_pasids; /* all attached pasids */
|
||||
|
||||
struct dma_pte *pgd; /* virtual address */
|
||||
int gaw; /* max guest address width */
|
||||
|
@ -717,6 +718,12 @@ struct device_domain_info {
|
|||
struct pasid_table *pasid_table; /* pasid table */
|
||||
};
|
||||
|
||||
struct dev_pasid_info {
|
||||
struct list_head link_domain; /* link to domain siblings */
|
||||
struct device *dev;
|
||||
ioasid_t pasid;
|
||||
};
|
||||
|
||||
static inline void __iommu_flush_cache(
|
||||
struct intel_iommu *iommu, void *addr, int size)
|
||||
{
|
||||
|
@ -844,6 +851,7 @@ int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
|
|||
struct iommu_page_response *msg);
|
||||
struct iommu_domain *intel_svm_domain_alloc(void);
|
||||
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
|
||||
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
|
||||
|
||||
struct intel_svm_dev {
|
||||
struct list_head list;
|
||||
|
@ -862,6 +870,7 @@ struct intel_svm {
|
|||
};
|
||||
#else
|
||||
static inline void intel_svm_check(struct intel_iommu *iommu) {}
|
||||
static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
|
||||
static inline struct iommu_domain *intel_svm_domain_alloc(void)
|
||||
{
|
||||
return NULL;
|
||||
|
|
|
@ -129,7 +129,7 @@ int intel_pasid_alloc_table(struct device *dev)
|
|||
info->pasid_table = pasid_table;
|
||||
|
||||
if (!ecap_coherent(info->iommu->ecap))
|
||||
clflush_cache_range(pasid_table->table, size);
|
||||
clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -438,7 +438,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
|
|||
* SVA usage, device could do DMA with multiple PASIDs. It is more
|
||||
* efficient to flush devTLB specific to the PASID.
|
||||
*/
|
||||
if (pasid == PASID_RID2PASID)
|
||||
if (pasid == IOMMU_NO_PASID)
|
||||
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
|
||||
else
|
||||
qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#ifndef __INTEL_PASID_H
|
||||
#define __INTEL_PASID_H
|
||||
|
||||
#define PASID_RID2PASID 0x0
|
||||
#define PASID_MIN 0x1
|
||||
#define PASID_MAX 0x100000
|
||||
#define PASID_PTE_MASK 0x3F
|
||||
#define PASID_PTE_PRESENT 1
|
||||
|
|
|
@ -26,8 +26,6 @@
|
|||
#include "trace.h"
|
||||
|
||||
static irqreturn_t prq_event_thread(int irq, void *d);
|
||||
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
|
||||
#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
|
||||
|
||||
static DEFINE_XARRAY_ALLOC(pasid_private_array);
|
||||
static int pasid_private_add(ioasid_t pasid, void *priv)
|
||||
|
@ -259,8 +257,6 @@ static const struct mmu_notifier_ops intel_mmuops = {
|
|||
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(pasid_mutex);
|
||||
|
||||
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
|
||||
struct intel_svm **rsvm,
|
||||
struct intel_svm_dev **rsdev)
|
||||
|
@ -268,10 +264,6 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
|
|||
struct intel_svm_dev *sdev = NULL;
|
||||
struct intel_svm *svm;
|
||||
|
||||
/* The caller should hold the pasid_mutex lock */
|
||||
if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
|
||||
return -EINVAL;
|
||||
|
||||
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -371,37 +363,23 @@ free_svm:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Caller must hold pasid_mutex */
|
||||
static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
|
||||
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
|
||||
{
|
||||
struct intel_svm_dev *sdev;
|
||||
struct intel_iommu *iommu;
|
||||
struct intel_svm *svm;
|
||||
struct mm_struct *mm;
|
||||
int ret = -EINVAL;
|
||||
|
||||
iommu = device_to_iommu(dev, NULL, NULL);
|
||||
if (!iommu)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
|
||||
return;
|
||||
mm = svm->mm;
|
||||
|
||||
if (sdev) {
|
||||
list_del_rcu(&sdev->list);
|
||||
/*
|
||||
* Flush the PASID cache and IOTLB for this device.
|
||||
* Note that we do depend on the hardware *not* using
|
||||
* the PASID any more. Just as we depend on other
|
||||
* devices never using PASIDs that they have no right
|
||||
* to use. We have a *shared* PASID table, because it's
|
||||
* large and has to be physically contiguous. So it's
|
||||
* hard to be as defensive as we might like.
|
||||
*/
|
||||
intel_pasid_tear_down_entry(iommu, dev, svm->pasid, false);
|
||||
intel_svm_drain_prq(dev, svm->pasid);
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
if (list_empty(&svm->devs)) {
|
||||
|
@ -418,8 +396,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
|
|||
kfree(svm);
|
||||
}
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Page request queue descriptor */
|
||||
|
@ -460,7 +436,7 @@ static bool is_canonical_address(u64 addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* intel_svm_drain_prq - Drain page requests and responses for a pasid
|
||||
* intel_drain_pasid_prq - Drain page requests and responses for a pasid
|
||||
* @dev: target device
|
||||
* @pasid: pasid for draining
|
||||
*
|
||||
|
@ -474,7 +450,7 @@ static bool is_canonical_address(u64 addr)
|
|||
* described in VT-d spec CH7.10 to drain all page requests and page
|
||||
* responses pending in the hardware.
|
||||
*/
|
||||
static void intel_svm_drain_prq(struct device *dev, u32 pasid)
|
||||
void intel_drain_pasid_prq(struct device *dev, u32 pasid)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
struct dmar_domain *domain;
|
||||
|
@ -520,19 +496,7 @@ prq_retry:
|
|||
goto prq_retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* A work in IO page fault workqueue may try to lock pasid_mutex now.
|
||||
* Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
|
||||
* all works in the workqueue to finish may cause deadlock.
|
||||
*
|
||||
* It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
|
||||
* Unlock it to allow the works to be handled while waiting for
|
||||
* them to finish.
|
||||
*/
|
||||
lockdep_assert_held(&pasid_mutex);
|
||||
mutex_unlock(&pasid_mutex);
|
||||
iopf_queue_flush_dev(dev);
|
||||
mutex_lock(&pasid_mutex);
|
||||
|
||||
/*
|
||||
* Perform steps described in VT-d spec CH7.10 to drain page
|
||||
|
@ -827,26 +791,14 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
mutex_lock(&pasid_mutex);
|
||||
intel_svm_unbind_mm(dev, pasid);
|
||||
mutex_unlock(&pasid_mutex);
|
||||
}
|
||||
|
||||
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct mm_struct *mm = domain->mm;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&pasid_mutex);
|
||||
ret = intel_svm_bind_mm(iommu, dev, mm);
|
||||
mutex_unlock(&pasid_mutex);
|
||||
|
||||
return ret;
|
||||
return intel_svm_bind_mm(iommu, dev, mm);
|
||||
}
|
||||
|
||||
static void intel_svm_domain_free(struct iommu_domain *domain)
|
||||
|
|
|
@ -10,34 +10,30 @@
|
|||
#include "iommu-sva.h"
|
||||
|
||||
static DEFINE_MUTEX(iommu_sva_lock);
|
||||
static DEFINE_IDA(iommu_global_pasid_ida);
|
||||
|
||||
/* Allocate a PASID for the mm within range (inclusive) */
|
||||
static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
|
||||
static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
|
||||
{
|
||||
ioasid_t pasid;
|
||||
int ret = 0;
|
||||
|
||||
if (min == IOMMU_PASID_INVALID ||
|
||||
max == IOMMU_PASID_INVALID ||
|
||||
min == 0 || max < min)
|
||||
return -EINVAL;
|
||||
|
||||
if (!arch_pgtable_dma_compat(mm))
|
||||
return -EBUSY;
|
||||
|
||||
mutex_lock(&iommu_sva_lock);
|
||||
/* Is a PASID already associated with this mm? */
|
||||
if (mm_valid_pasid(mm)) {
|
||||
if (mm->pasid < min || mm->pasid > max)
|
||||
if (mm->pasid >= dev->iommu->max_pasids)
|
||||
ret = -EOVERFLOW;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
pasid = iommu_alloc_global_pasid(dev);
|
||||
if (pasid == IOMMU_PASID_INVALID) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
|
||||
mm->pasid = ret;
|
||||
}
|
||||
mm->pasid = pasid;
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_unlock(&iommu_sva_lock);
|
||||
|
@ -64,15 +60,10 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
|
|||
{
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_sva *handle;
|
||||
ioasid_t max_pasids;
|
||||
int ret;
|
||||
|
||||
max_pasids = dev->iommu->max_pasids;
|
||||
if (!max_pasids)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
/* Allocate mm->pasid if necessary. */
|
||||
ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1);
|
||||
ret = iommu_sva_alloc_pasid(mm, dev);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@ -217,5 +208,5 @@ void mm_pasid_drop(struct mm_struct *mm)
|
|||
if (likely(!mm_valid_pasid(mm)))
|
||||
return;
|
||||
|
||||
ida_free(&iommu_global_pasid_ida, mm->pasid);
|
||||
iommu_free_global_pasid(mm->pasid);
|
||||
}
|
||||
|
|
|
@ -107,9 +107,6 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!iommu || IS_ERR(iommu))
|
||||
return -ENODEV;
|
||||
|
||||
ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
|
||||
&link->kobj, dev_name(link));
|
||||
if (ret)
|
||||
|
@ -122,14 +119,9 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_device_link);
|
||||
|
||||
void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
|
||||
{
|
||||
if (!iommu || IS_ERR(iommu))
|
||||
return;
|
||||
|
||||
sysfs_remove_link(&link->kobj, "iommu");
|
||||
sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_device_unlink);
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
|
||||
static struct kset *iommu_group_kset;
|
||||
static DEFINE_IDA(iommu_group_ida);
|
||||
static DEFINE_IDA(iommu_global_pasid_ida);
|
||||
|
||||
static unsigned int iommu_def_domain_type __read_mostly;
|
||||
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
|
||||
|
@ -129,9 +130,12 @@ static int iommu_setup_default_domain(struct iommu_group *group,
|
|||
int target_type);
|
||||
static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
|
||||
static ssize_t iommu_group_store_type(struct iommu_group *group,
|
||||
const char *buf, size_t count);
|
||||
static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
|
||||
struct device *dev);
|
||||
static void __iommu_group_free_device(struct iommu_group *group,
|
||||
struct group_device *grp_dev);
|
||||
|
||||
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
|
||||
struct iommu_group_attribute iommu_group_attr_##_name = \
|
||||
|
@ -377,12 +381,110 @@ static u32 dev_iommu_get_max_pasids(struct device *dev)
|
|||
return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
|
||||
}
|
||||
|
||||
/*
|
||||
* Init the dev->iommu and dev->iommu_group in the struct device and get the
|
||||
* driver probed
|
||||
*/
|
||||
static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
|
||||
{
|
||||
struct iommu_device *iommu_dev;
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
if (!dev_iommu_get(dev))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!try_module_get(ops->owner)) {
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
iommu_dev = ops->probe_device(dev);
|
||||
if (IS_ERR(iommu_dev)) {
|
||||
ret = PTR_ERR(iommu_dev);
|
||||
goto err_module_put;
|
||||
}
|
||||
|
||||
ret = iommu_device_link(iommu_dev, dev);
|
||||
if (ret)
|
||||
goto err_release;
|
||||
|
||||
group = ops->device_group(dev);
|
||||
if (WARN_ON_ONCE(group == NULL))
|
||||
group = ERR_PTR(-EINVAL);
|
||||
if (IS_ERR(group)) {
|
||||
ret = PTR_ERR(group);
|
||||
goto err_unlink;
|
||||
}
|
||||
dev->iommu_group = group;
|
||||
|
||||
dev->iommu->iommu_dev = iommu_dev;
|
||||
dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
|
||||
if (ops->is_attach_deferred)
|
||||
dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
|
||||
return 0;
|
||||
|
||||
err_unlink:
|
||||
iommu_device_unlink(iommu_dev, dev);
|
||||
err_release:
|
||||
if (ops->release_device)
|
||||
ops->release_device(dev);
|
||||
err_module_put:
|
||||
module_put(ops->owner);
|
||||
err_free:
|
||||
dev_iommu_free(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iommu_deinit_device(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
lockdep_assert_held(&group->mutex);
|
||||
|
||||
iommu_device_unlink(dev->iommu->iommu_dev, dev);
|
||||
|
||||
/*
|
||||
* release_device() must stop using any attached domain on the device.
|
||||
* If there are still other devices in the group they are not effected
|
||||
* by this callback.
|
||||
*
|
||||
* The IOMMU driver must set the device to either an identity or
|
||||
* blocking translation and stop using any domain pointer, as it is
|
||||
* going to be freed.
|
||||
*/
|
||||
if (ops->release_device)
|
||||
ops->release_device(dev);
|
||||
|
||||
/*
|
||||
* If this is the last driver to use the group then we must free the
|
||||
* domains before we do the module_put().
|
||||
*/
|
||||
if (list_empty(&group->devices)) {
|
||||
if (group->default_domain) {
|
||||
iommu_domain_free(group->default_domain);
|
||||
group->default_domain = NULL;
|
||||
}
|
||||
if (group->blocking_domain) {
|
||||
iommu_domain_free(group->blocking_domain);
|
||||
group->blocking_domain = NULL;
|
||||
}
|
||||
group->domain = NULL;
|
||||
}
|
||||
|
||||
/* Caller must put iommu_group */
|
||||
dev->iommu_group = NULL;
|
||||
module_put(ops->owner);
|
||||
dev_iommu_free(dev);
|
||||
}
|
||||
|
||||
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
struct iommu_device *iommu_dev;
|
||||
struct iommu_group *group;
|
||||
static DEFINE_MUTEX(iommu_probe_device_lock);
|
||||
struct group_device *gdev;
|
||||
int ret;
|
||||
|
||||
if (!ops)
|
||||
|
@ -395,55 +497,66 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
|
|||
* but for now enforcing a simple global ordering is fine.
|
||||
*/
|
||||
mutex_lock(&iommu_probe_device_lock);
|
||||
if (!dev_iommu_get(dev)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unlock;
|
||||
|
||||
/* Device is probed already if in a group */
|
||||
if (dev->iommu_group) {
|
||||
ret = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!try_module_get(ops->owner)) {
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
iommu_dev = ops->probe_device(dev);
|
||||
if (IS_ERR(iommu_dev)) {
|
||||
ret = PTR_ERR(iommu_dev);
|
||||
goto out_module_put;
|
||||
}
|
||||
|
||||
dev->iommu->iommu_dev = iommu_dev;
|
||||
dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
|
||||
if (ops->is_attach_deferred)
|
||||
dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
|
||||
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
if (IS_ERR(group)) {
|
||||
ret = PTR_ERR(group);
|
||||
goto out_release;
|
||||
}
|
||||
ret = iommu_init_device(dev, ops);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
group = dev->iommu_group;
|
||||
gdev = iommu_group_alloc_device(group, dev);
|
||||
mutex_lock(&group->mutex);
|
||||
if (group_list && !group->default_domain && list_empty(&group->entry))
|
||||
list_add_tail(&group->entry, group_list);
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
if (IS_ERR(gdev)) {
|
||||
ret = PTR_ERR(gdev);
|
||||
goto err_put_group;
|
||||
}
|
||||
|
||||
/*
|
||||
* The gdev must be in the list before calling
|
||||
* iommu_setup_default_domain()
|
||||
*/
|
||||
list_add_tail(&gdev->list, &group->devices);
|
||||
WARN_ON(group->default_domain && !group->domain);
|
||||
if (group->default_domain)
|
||||
iommu_create_device_direct_mappings(group->default_domain, dev);
|
||||
if (group->domain) {
|
||||
ret = __iommu_device_set_domain(group, dev, group->domain, 0);
|
||||
if (ret)
|
||||
goto err_remove_gdev;
|
||||
} else if (!group->default_domain && !group_list) {
|
||||
ret = iommu_setup_default_domain(group, 0);
|
||||
if (ret)
|
||||
goto err_remove_gdev;
|
||||
} else if (!group->default_domain) {
|
||||
/*
|
||||
* With a group_list argument we defer the default_domain setup
|
||||
* to the caller by providing a de-duplicated list of groups
|
||||
* that need further setup.
|
||||
*/
|
||||
if (list_empty(&group->entry))
|
||||
list_add_tail(&group->entry, group_list);
|
||||
}
|
||||
mutex_unlock(&group->mutex);
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
iommu_device_link(iommu_dev, dev);
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
iommu_dma_set_pci_32bit_workaround(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_release:
|
||||
if (ops->release_device)
|
||||
ops->release_device(dev);
|
||||
|
||||
out_module_put:
|
||||
module_put(ops->owner);
|
||||
|
||||
err_free:
|
||||
dev_iommu_free(dev);
|
||||
|
||||
err_unlock:
|
||||
err_remove_gdev:
|
||||
list_del(&gdev->list);
|
||||
__iommu_group_free_device(group, gdev);
|
||||
err_put_group:
|
||||
iommu_deinit_device(dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
out_unlock:
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -452,80 +565,21 @@ err_unlock:
|
|||
int iommu_probe_device(struct device *dev)
|
||||
{
|
||||
const struct iommu_ops *ops;
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
ret = __iommu_probe_device(dev, NULL);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (!group) {
|
||||
ret = -ENODEV;
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
|
||||
if (group->default_domain)
|
||||
iommu_create_device_direct_mappings(group->default_domain, dev);
|
||||
|
||||
if (group->domain) {
|
||||
ret = __iommu_device_set_domain(group, dev, group->domain, 0);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
} else if (!group->default_domain) {
|
||||
ret = iommu_setup_default_domain(group, 0);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
return ret;
|
||||
|
||||
ops = dev_iommu_ops(dev);
|
||||
if (ops->probe_finalize)
|
||||
ops->probe_finalize(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
err_release:
|
||||
iommu_release_device(dev);
|
||||
|
||||
err_out:
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a device from a group's device list and return the group device
|
||||
* if successful.
|
||||
*/
|
||||
static struct group_device *
|
||||
__iommu_group_remove_device(struct iommu_group *group, struct device *dev)
|
||||
{
|
||||
struct group_device *device;
|
||||
|
||||
lockdep_assert_held(&group->mutex);
|
||||
for_each_group_device(group, device) {
|
||||
if (device->dev == dev) {
|
||||
list_del(&device->list);
|
||||
return device;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release a device from its group and decrements the iommu group reference
|
||||
* count.
|
||||
*/
|
||||
static void __iommu_group_release_device(struct iommu_group *group,
|
||||
struct group_device *grp_dev)
|
||||
static void __iommu_group_free_device(struct iommu_group *group,
|
||||
struct group_device *grp_dev)
|
||||
{
|
||||
struct device *dev = grp_dev->dev;
|
||||
|
||||
|
@ -534,54 +588,57 @@ static void __iommu_group_release_device(struct iommu_group *group,
|
|||
|
||||
trace_remove_device_from_group(group->id, dev);
|
||||
|
||||
kfree(grp_dev->name);
|
||||
kfree(grp_dev);
|
||||
dev->iommu_group = NULL;
|
||||
kobject_put(group->devices_kobj);
|
||||
}
|
||||
|
||||
static void iommu_release_device(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
struct group_device *device;
|
||||
const struct iommu_ops *ops;
|
||||
|
||||
if (!dev->iommu || !group)
|
||||
return;
|
||||
|
||||
iommu_device_unlink(dev->iommu->iommu_dev, dev);
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
device = __iommu_group_remove_device(group, dev);
|
||||
|
||||
/*
|
||||
* If the group has become empty then ownership must have been released,
|
||||
* and the current domain must be set back to NULL or the default
|
||||
* domain.
|
||||
* If the group has become empty then ownership must have been
|
||||
* released, and the current domain must be set back to NULL or
|
||||
* the default domain.
|
||||
*/
|
||||
if (list_empty(&group->devices))
|
||||
WARN_ON(group->owner_cnt ||
|
||||
group->domain != group->default_domain);
|
||||
|
||||
/*
|
||||
* release_device() must stop using any attached domain on the device.
|
||||
* If there are still other devices in the group they are not effected
|
||||
* by this callback.
|
||||
*
|
||||
* The IOMMU driver must set the device to either an identity or
|
||||
* blocking translation and stop using any domain pointer, as it is
|
||||
* going to be freed.
|
||||
*/
|
||||
ops = dev_iommu_ops(dev);
|
||||
if (ops->release_device)
|
||||
ops->release_device(dev);
|
||||
kfree(grp_dev->name);
|
||||
kfree(grp_dev);
|
||||
}
|
||||
|
||||
/* Remove the iommu_group from the struct device. */
|
||||
static void __iommu_group_remove_device(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
struct group_device *device;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
for_each_group_device(group, device) {
|
||||
if (device->dev != dev)
|
||||
continue;
|
||||
|
||||
list_del(&device->list);
|
||||
__iommu_group_free_device(group, device);
|
||||
if (dev->iommu && dev->iommu->iommu_dev)
|
||||
iommu_deinit_device(dev);
|
||||
else
|
||||
dev->iommu_group = NULL;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
if (device)
|
||||
__iommu_group_release_device(group, device);
|
||||
/*
|
||||
* Pairs with the get in iommu_init_device() or
|
||||
* iommu_group_add_device()
|
||||
*/
|
||||
iommu_group_put(group);
|
||||
}
|
||||
|
||||
module_put(ops->owner);
|
||||
dev_iommu_free(dev);
|
||||
static void iommu_release_device(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
|
||||
if (group)
|
||||
__iommu_group_remove_device(dev);
|
||||
|
||||
/* Free any fwspec if no iommu_driver was ever attached */
|
||||
if (dev->iommu)
|
||||
dev_iommu_free(dev);
|
||||
}
|
||||
|
||||
static int __init iommu_set_def_domain_type(char *str)
|
||||
|
@ -842,10 +899,9 @@ static void iommu_group_release(struct kobject *kobj)
|
|||
|
||||
ida_free(&iommu_group_ida, group->id);
|
||||
|
||||
if (group->default_domain)
|
||||
iommu_domain_free(group->default_domain);
|
||||
if (group->blocking_domain)
|
||||
iommu_domain_free(group->blocking_domain);
|
||||
/* Domains are free'd by iommu_deinit_device() */
|
||||
WARN_ON(group->default_domain);
|
||||
WARN_ON(group->blocking_domain);
|
||||
|
||||
kfree(group->name);
|
||||
kfree(group);
|
||||
|
@ -1003,14 +1059,12 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
|
|||
unsigned long pg_size;
|
||||
int ret = 0;
|
||||
|
||||
if (!iommu_is_dma_domain(domain))
|
||||
return 0;
|
||||
|
||||
BUG_ON(!domain->pgsize_bitmap);
|
||||
|
||||
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
|
||||
pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0;
|
||||
INIT_LIST_HEAD(&mappings);
|
||||
|
||||
if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size))
|
||||
return -EINVAL;
|
||||
|
||||
iommu_get_resv_regions(dev, &mappings);
|
||||
|
||||
/* We need to consider overlapping regions for different devices */
|
||||
|
@ -1018,13 +1072,17 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
|
|||
dma_addr_t start, end, addr;
|
||||
size_t map_size = 0;
|
||||
|
||||
if (entry->type == IOMMU_RESV_DIRECT)
|
||||
dev->iommu->require_direct = 1;
|
||||
|
||||
if ((entry->type != IOMMU_RESV_DIRECT &&
|
||||
entry->type != IOMMU_RESV_DIRECT_RELAXABLE) ||
|
||||
!iommu_is_dma_domain(domain))
|
||||
continue;
|
||||
|
||||
start = ALIGN(entry->start, pg_size);
|
||||
end = ALIGN(entry->start + entry->length, pg_size);
|
||||
|
||||
if (entry->type != IOMMU_RESV_DIRECT &&
|
||||
entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
|
||||
continue;
|
||||
|
||||
for (addr = start; addr <= end; addr += pg_size) {
|
||||
phys_addr_t phys_addr;
|
||||
|
||||
|
@ -1058,22 +1116,16 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_add_device - add a device to an iommu group
|
||||
* @group: the group into which to add the device (reference should be held)
|
||||
* @dev: the device
|
||||
*
|
||||
* This function is called by an iommu driver to add a device into a
|
||||
* group. Adding a device increments the group reference count.
|
||||
*/
|
||||
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
|
||||
/* This is undone by __iommu_group_free_device() */
|
||||
static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
|
||||
struct device *dev)
|
||||
{
|
||||
int ret, i = 0;
|
||||
struct group_device *device;
|
||||
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
if (!device)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
device->dev = dev;
|
||||
|
||||
|
@ -1104,18 +1156,11 @@ rename:
|
|||
goto err_free_name;
|
||||
}
|
||||
|
||||
kobject_get(group->devices_kobj);
|
||||
|
||||
dev->iommu_group = group;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
list_add_tail(&device->list, &group->devices);
|
||||
mutex_unlock(&group->mutex);
|
||||
trace_add_device_to_group(group->id, dev);
|
||||
|
||||
dev_info(dev, "Adding to iommu group %d\n", group->id);
|
||||
|
||||
return 0;
|
||||
return device;
|
||||
|
||||
err_free_name:
|
||||
kfree(device->name);
|
||||
|
@ -1124,7 +1169,32 @@ err_remove_link:
|
|||
err_free_device:
|
||||
kfree(device);
|
||||
dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_add_device - add a device to an iommu group
|
||||
* @group: the group into which to add the device (reference should be held)
|
||||
* @dev: the device
|
||||
*
|
||||
* This function is called by an iommu driver to add a device into a
|
||||
* group. Adding a device increments the group reference count.
|
||||
*/
|
||||
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
|
||||
{
|
||||
struct group_device *gdev;
|
||||
|
||||
gdev = iommu_group_alloc_device(group, dev);
|
||||
if (IS_ERR(gdev))
|
||||
return PTR_ERR(gdev);
|
||||
|
||||
iommu_group_ref_get(group);
|
||||
dev->iommu_group = group;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
list_add_tail(&gdev->list, &group->devices);
|
||||
mutex_unlock(&group->mutex);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_add_device);
|
||||
|
||||
|
@ -1138,19 +1208,13 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
|
|||
void iommu_group_remove_device(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
struct group_device *device;
|
||||
|
||||
if (!group)
|
||||
return;
|
||||
|
||||
dev_info(dev, "Removing from iommu group %d\n", group->id);
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
device = __iommu_group_remove_device(group, dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
if (device)
|
||||
__iommu_group_release_device(group, device);
|
||||
__iommu_group_remove_device(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
|
||||
|
||||
|
@ -1708,45 +1772,6 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
|
|||
return dom;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
|
||||
* @dev: target device
|
||||
*
|
||||
* This function is intended to be called by IOMMU drivers and extended to
|
||||
* support common, bus-defined algorithms when determining or creating the
|
||||
* IOMMU group for a device. On success, the caller will hold a reference
|
||||
* to the returned IOMMU group, which will already include the provided
|
||||
* device. The reference should be released with iommu_group_put().
|
||||
*/
|
||||
static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
||||
{
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (group)
|
||||
return group;
|
||||
|
||||
group = ops->device_group(dev);
|
||||
if (WARN_ON_ONCE(group == NULL))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (IS_ERR(group))
|
||||
return group;
|
||||
|
||||
ret = iommu_group_add_device(group, dev);
|
||||
if (ret)
|
||||
goto out_put_group;
|
||||
|
||||
return group;
|
||||
|
||||
out_put_group:
|
||||
iommu_group_put(group);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
|
||||
{
|
||||
return group->default_domain;
|
||||
|
@ -1755,16 +1780,8 @@ struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
|
|||
static int probe_iommu_group(struct device *dev, void *data)
|
||||
{
|
||||
struct list_head *group_list = data;
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
/* Device is probed already if in a group */
|
||||
group = iommu_group_get(dev);
|
||||
if (group) {
|
||||
iommu_group_put(group);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __iommu_probe_device(dev, group_list);
|
||||
if (ret == -ENODEV)
|
||||
ret = 0;
|
||||
|
@ -1840,11 +1857,6 @@ int bus_iommu_probe(const struct bus_type *bus)
|
|||
LIST_HEAD(group_list);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* This code-path does not allocate the default domain when
|
||||
* creating the iommu group, so do it after the groups are
|
||||
* created.
|
||||
*/
|
||||
ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1857,6 +1869,11 @@ int bus_iommu_probe(const struct bus_type *bus)
|
|||
/* Remove item from the list */
|
||||
list_del_init(&group->entry);
|
||||
|
||||
/*
|
||||
* We go to the trouble of deferred default domain creation so
|
||||
* that the cross-group default domain type and the setup of the
|
||||
* IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios.
|
||||
*/
|
||||
ret = iommu_setup_default_domain(group, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&group->mutex);
|
||||
|
@ -2191,6 +2208,21 @@ static int __iommu_device_set_domain(struct iommu_group *group,
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the device requires IOMMU_RESV_DIRECT then we cannot allow
|
||||
* the blocking domain to be attached as it does not contain the
|
||||
* required 1:1 mapping. This test effectively excludes the device
|
||||
* being used with iommu_group_claim_dma_owner() which will block
|
||||
* vfio and iommufd as well.
|
||||
*/
|
||||
if (dev->iommu->require_direct &&
|
||||
(new_domain->type == IOMMU_DOMAIN_BLOCKED ||
|
||||
new_domain == group->blocking_domain)) {
|
||||
dev_warn(dev,
|
||||
"Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->iommu->attach_deferred) {
|
||||
if (new_domain == group->default_domain)
|
||||
return 0;
|
||||
|
@ -3282,7 +3314,7 @@ static void __iommu_release_dma_ownership(struct iommu_group *group)
|
|||
|
||||
/**
|
||||
* iommu_group_release_dma_owner() - Release DMA ownership of a group
|
||||
* @dev: The device
|
||||
* @group: The group
|
||||
*
|
||||
* Release the DMA ownership claimed by iommu_group_claim_dma_owner().
|
||||
*/
|
||||
|
@ -3296,7 +3328,7 @@ EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
|
|||
|
||||
/**
|
||||
* iommu_device_release_dma_owner() - Release DMA ownership of a device
|
||||
* @group: The device.
|
||||
* @dev: The device.
|
||||
*
|
||||
* Release the DMA ownership claimed by iommu_device_claim_dma_owner().
|
||||
*/
|
||||
|
@ -3479,3 +3511,30 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
|||
|
||||
return domain;
|
||||
}
|
||||
|
||||
ioasid_t iommu_alloc_global_pasid(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* max_pasids == 0 means that the device does not support PASID */
|
||||
if (!dev->iommu->max_pasids)
|
||||
return IOMMU_PASID_INVALID;
|
||||
|
||||
/*
|
||||
* max_pasids is set up by vendor driver based on number of PASID bits
|
||||
* supported but the IDA allocation is inclusive.
|
||||
*/
|
||||
ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID,
|
||||
dev->iommu->max_pasids - 1, GFP_KERNEL);
|
||||
return ret < 0 ? IOMMU_PASID_INVALID : ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid);
|
||||
|
||||
void iommu_free_global_pasid(ioasid_t pasid)
|
||||
{
|
||||
if (WARN_ON(pasid == IOMMU_PASID_INVALID))
|
||||
return;
|
||||
|
||||
ida_free(&iommu_global_pasid_ida, pasid);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
|
||||
|
|
|
@ -14,11 +14,12 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/io-pgtable.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -253,17 +254,13 @@ static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
|
|||
/* Wait for any pending TLB invalidations to complete */
|
||||
static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
|
||||
{
|
||||
unsigned int count = 0;
|
||||
u32 val;
|
||||
|
||||
while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
|
||||
cpu_relax();
|
||||
if (++count == TLB_LOOP_TIMEOUT) {
|
||||
dev_err_ratelimited(domain->mmu->dev,
|
||||
if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val,
|
||||
!(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT,
|
||||
false, domain, IMCTR))
|
||||
dev_err_ratelimited(domain->mmu->dev,
|
||||
"TLB sync timed out -- MMU may be deadlocked\n");
|
||||
return;
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
|
||||
|
@ -723,6 +720,10 @@ static bool ipmmu_device_is_allowed(struct device *dev)
|
|||
if (soc_device_match(soc_denylist))
|
||||
return false;
|
||||
|
||||
/* Check whether this device is a PCI device */
|
||||
if (dev_is_pci(dev))
|
||||
return true;
|
||||
|
||||
/* Check whether this device can work with the IPMMU */
|
||||
for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
|
||||
if (!strcmp(dev_name(dev), devices_allowlist[i]))
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* Copyright (c) 2015-2016 MediaTek Inc.
|
||||
* Author: Yong Wu <yong.wu@mediatek.com>
|
||||
*/
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/clk.h>
|
||||
|
@ -27,6 +28,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/soc/mediatek/infracfg.h>
|
||||
#include <linux/soc/mediatek/mtk_sip_svc.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <soc/mediatek/smi.h>
|
||||
|
||||
|
@ -143,6 +145,7 @@
|
|||
#define PGTABLE_PA_35_EN BIT(17)
|
||||
#define TF_PORT_TO_ADDR_MT8173 BIT(18)
|
||||
#define INT_ID_PORT_WIDTH_6 BIT(19)
|
||||
#define CFG_IFA_MASTER_IN_ATF BIT(20)
|
||||
|
||||
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
|
||||
((((pdata)->flags) & (mask)) == (_x))
|
||||
|
@ -167,6 +170,7 @@ enum mtk_iommu_plat {
|
|||
M4U_MT8173,
|
||||
M4U_MT8183,
|
||||
M4U_MT8186,
|
||||
M4U_MT8188,
|
||||
M4U_MT8192,
|
||||
M4U_MT8195,
|
||||
M4U_MT8365,
|
||||
|
@ -258,6 +262,8 @@ struct mtk_iommu_data {
|
|||
struct device *smicomm_dev;
|
||||
|
||||
struct mtk_iommu_bank_data *bank;
|
||||
struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */
|
||||
|
||||
struct regmap *pericfg;
|
||||
struct mutex mutex; /* Protect m4u_group/m4u_dom above */
|
||||
|
||||
|
@ -577,41 +583,55 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
|
|||
unsigned int larbid, portid;
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
const struct mtk_iommu_iova_region *region;
|
||||
u32 peri_mmuen, peri_mmuen_msk;
|
||||
unsigned long portid_msk = 0;
|
||||
struct arm_smccc_res res;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; ++i) {
|
||||
larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
|
||||
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
|
||||
portid_msk |= BIT(portid);
|
||||
}
|
||||
|
||||
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
|
||||
larb_mmu = &data->larb_imu[larbid];
|
||||
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
|
||||
/* All ports should be in the same larb. just use 0 here */
|
||||
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
|
||||
larb_mmu = &data->larb_imu[larbid];
|
||||
region = data->plat_data->iova_region + regionid;
|
||||
|
||||
region = data->plat_data->iova_region + regionid;
|
||||
for_each_set_bit(portid, &portid_msk, 32)
|
||||
larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
|
||||
|
||||
dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n",
|
||||
enable ? "enable" : "disable", dev_name(larb_mmu->dev),
|
||||
portid, regionid, larb_mmu->bank[portid]);
|
||||
dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
|
||||
enable ? "enable" : "disable", dev_name(larb_mmu->dev),
|
||||
portid_msk, regionid, upper_32_bits(region->iova_base));
|
||||
|
||||
if (enable)
|
||||
larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
|
||||
else
|
||||
larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
|
||||
} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
|
||||
peri_mmuen_msk = BIT(portid);
|
||||
if (enable)
|
||||
larb_mmu->mmu |= portid_msk;
|
||||
else
|
||||
larb_mmu->mmu &= ~portid_msk;
|
||||
} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
|
||||
if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
|
||||
arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL,
|
||||
IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU,
|
||||
portid_msk, enable, 0, 0, 0, 0, &res);
|
||||
ret = res.a0;
|
||||
} else {
|
||||
/* PCI dev has only one output id, enable the next writing bit for PCIe */
|
||||
if (dev_is_pci(dev))
|
||||
peri_mmuen_msk |= BIT(portid + 1);
|
||||
if (dev_is_pci(dev)) {
|
||||
if (fwspec->num_ids != 1) {
|
||||
dev_err(dev, "PCI dev can only have one port.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
portid_msk |= BIT(portid + 1);
|
||||
}
|
||||
|
||||
peri_mmuen = enable ? peri_mmuen_msk : 0;
|
||||
ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1,
|
||||
peri_mmuen_msk, peri_mmuen);
|
||||
if (ret)
|
||||
dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n",
|
||||
enable ? "enable" : "disable",
|
||||
dev_name(data->dev), peri_mmuen_msk, ret);
|
||||
(u32)portid_msk, enable ? (u32)portid_msk : 0);
|
||||
}
|
||||
if (ret)
|
||||
dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n",
|
||||
enable ? "enable" : "disable",
|
||||
dev_name(data->dev), portid_msk, ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -620,15 +640,14 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
|
|||
struct mtk_iommu_data *data,
|
||||
unsigned int region_id)
|
||||
{
|
||||
struct mtk_iommu_domain *share_dom = data->share_dom;
|
||||
const struct mtk_iommu_iova_region *region;
|
||||
struct mtk_iommu_domain *m4u_dom;
|
||||
|
||||
/* Always use bank0 in sharing pgtable case */
|
||||
m4u_dom = data->bank[0].m4u_dom;
|
||||
if (m4u_dom) {
|
||||
dom->iop = m4u_dom->iop;
|
||||
dom->cfg = m4u_dom->cfg;
|
||||
dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap;
|
||||
/* Always use share domain in sharing pgtable case */
|
||||
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
|
||||
dom->iop = share_dom->iop;
|
||||
dom->cfg = share_dom->cfg;
|
||||
dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
|
||||
goto update_iova_region;
|
||||
}
|
||||
|
||||
|
@ -658,6 +677,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
|
|||
/* Update our support page sizes bitmap */
|
||||
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
|
||||
|
||||
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
|
||||
data->share_dom = dom;
|
||||
|
||||
update_iova_region:
|
||||
/* Update the iova region for this domain */
|
||||
region = data->plat_data->iova_region + region_id;
|
||||
|
@ -708,7 +730,9 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
|||
/* Data is in the frstdata in sharing pgtable case. */
|
||||
frstdata = mtk_iommu_get_frst_data(hw_list);
|
||||
|
||||
mutex_lock(&frstdata->mutex);
|
||||
ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
|
||||
mutex_unlock(&frstdata->mutex);
|
||||
if (ret) {
|
||||
mutex_unlock(&dom->mutex);
|
||||
return ret;
|
||||
|
@ -1318,7 +1342,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
|
|||
dev_err_probe(dev, ret, "mm dts parse fail\n");
|
||||
goto out_runtime_disable;
|
||||
}
|
||||
} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
|
||||
} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
|
||||
!MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
|
||||
p = data->plat_data->pericfg_comp_str;
|
||||
data->pericfg = syscon_regmap_lookup_by_compatible(p);
|
||||
if (IS_ERR(data->pericfg)) {
|
||||
|
@ -1570,6 +1595,67 @@ static const struct mtk_iommu_plat_data mt8186_data_mm = {
|
|||
.iova_region_larb_msk = mt8186_larb_region_msk,
|
||||
};
|
||||
|
||||
static const struct mtk_iommu_plat_data mt8188_data_infra = {
|
||||
.m4u_plat = M4U_MT8188,
|
||||
.flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO |
|
||||
MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT |
|
||||
PGTABLE_PA_35_EN | CFG_IFA_MASTER_IN_ATF,
|
||||
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
|
||||
.banks_num = 1,
|
||||
.banks_enable = {true},
|
||||
.iova_region = single_domain,
|
||||
.iova_region_nr = ARRAY_SIZE(single_domain),
|
||||
};
|
||||
|
||||
static const u32 mt8188_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
|
||||
[0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */
|
||||
[1] = {0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, ~0, ~0, ~0}, /* Region1: larb19(21)/21(22)/23 */
|
||||
[2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */
|
||||
~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
|
||||
~0, ~0, ~0, ~0, ~0, 0, 0, 0,
|
||||
0, ~0},
|
||||
[3] = {0},
|
||||
[4] = {[24] = BIT(0) | BIT(1)}, /* Only larb27(24) port0/1 */
|
||||
[5] = {[24] = BIT(2) | BIT(3)}, /* Only larb27(24) port2/3 */
|
||||
};
|
||||
|
||||
static const struct mtk_iommu_plat_data mt8188_data_vdo = {
|
||||
.m4u_plat = M4U_MT8188,
|
||||
.flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
|
||||
WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
|
||||
PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
|
||||
.hw_list = &m4ulist,
|
||||
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
|
||||
.banks_num = 1,
|
||||
.banks_enable = {true},
|
||||
.iova_region = mt8192_multi_dom,
|
||||
.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
|
||||
.iova_region_larb_msk = mt8188_larb_region_msk,
|
||||
.larbid_remap = {{2}, {0}, {21}, {0}, {19}, {9, 10,
|
||||
11 /* 11a */, 25 /* 11c */},
|
||||
{13, 0, 29 /* 16b */, 30 /* 17b */, 0}, {5}},
|
||||
};
|
||||
|
||||
static const struct mtk_iommu_plat_data mt8188_data_vpp = {
|
||||
.m4u_plat = M4U_MT8188,
|
||||
.flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
|
||||
WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
|
||||
PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
|
||||
.hw_list = &m4ulist,
|
||||
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
|
||||
.banks_num = 1,
|
||||
.banks_enable = {true},
|
||||
.iova_region = mt8192_multi_dom,
|
||||
.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
|
||||
.iova_region_larb_msk = mt8188_larb_region_msk,
|
||||
.larbid_remap = {{1}, {3}, {23}, {7}, {MTK_INVALID_LARBID},
|
||||
{12, 15, 24 /* 11b */}, {14, MTK_INVALID_LARBID,
|
||||
16 /* 16a */, 17 /* 17a */, MTK_INVALID_LARBID,
|
||||
27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
|
||||
};
|
||||
|
||||
static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
|
||||
[0] = {~0, ~0}, /* Region0: larb0/1 */
|
||||
[1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
|
||||
|
@ -1678,6 +1764,9 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
|
|||
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
|
||||
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
|
||||
{ .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */
|
||||
{ .compatible = "mediatek,mt8188-iommu-infra", .data = &mt8188_data_infra},
|
||||
{ .compatible = "mediatek,mt8188-iommu-vdo", .data = &mt8188_data_vdo},
|
||||
{ .compatible = "mediatek,mt8188-iommu-vpp", .data = &mt8188_data_vpp},
|
||||
{ .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
|
||||
{ .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
|
||||
{ .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo},
|
||||
|
|
|
@ -159,7 +159,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
|||
* If we have reason to believe the IOMMU driver missed the initial
|
||||
* probe for dev, replay it to get things in order.
|
||||
*/
|
||||
if (!err && dev->bus && !device_iommu_mapped(dev))
|
||||
if (!err && dev->bus)
|
||||
err = iommu_probe_device(dev);
|
||||
|
||||
/* Ignore all other errors apart from EPROBE_DEFER */
|
||||
|
|
|
@ -98,9 +98,8 @@ struct rk_iommu_ops {
|
|||
phys_addr_t (*pt_address)(u32 dte);
|
||||
u32 (*mk_dtentries)(dma_addr_t pt_dma);
|
||||
u32 (*mk_ptentries)(phys_addr_t page, int prot);
|
||||
phys_addr_t (*dte_addr_phys)(u32 addr);
|
||||
u32 (*dma_addr_dte)(dma_addr_t dt_dma);
|
||||
u64 dma_bit_mask;
|
||||
gfp_t gfp_flags;
|
||||
};
|
||||
|
||||
struct rk_iommu {
|
||||
|
@ -278,8 +277,8 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
|
|||
/*
|
||||
* In v2:
|
||||
* 31:12 - Page address bit 31:0
|
||||
* 11:9 - Page address bit 34:32
|
||||
* 8:4 - Page address bit 39:35
|
||||
* 11: 8 - Page address bit 35:32
|
||||
* 7: 4 - Page address bit 39:36
|
||||
* 3 - Security
|
||||
* 2 - Writable
|
||||
* 1 - Readable
|
||||
|
@ -506,7 +505,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
|
|||
|
||||
/*
|
||||
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
|
||||
* and verifying that upper 5 nybbles are read back.
|
||||
* and verifying that upper 5 (v1) or 7 (v2) nybbles are read back.
|
||||
*/
|
||||
for (i = 0; i < iommu->num_mmu; i++) {
|
||||
dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
|
||||
|
@ -531,33 +530,6 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline phys_addr_t rk_dte_addr_phys(u32 addr)
|
||||
{
|
||||
return (phys_addr_t)addr;
|
||||
}
|
||||
|
||||
static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
|
||||
{
|
||||
return dt_dma;
|
||||
}
|
||||
|
||||
#define DT_HI_MASK GENMASK_ULL(39, 32)
|
||||
#define DTE_BASE_HI_MASK GENMASK(11, 4)
|
||||
#define DT_SHIFT 28
|
||||
|
||||
static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
|
||||
{
|
||||
u64 addr64 = addr;
|
||||
return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
|
||||
((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
|
||||
}
|
||||
|
||||
static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
|
||||
{
|
||||
return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
|
||||
((dt_dma & DT_HI_MASK) >> DT_SHIFT);
|
||||
}
|
||||
|
||||
static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
|
||||
{
|
||||
void __iomem *base = iommu->bases[index];
|
||||
|
@ -577,7 +549,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
|
|||
page_offset = rk_iova_page_offset(iova);
|
||||
|
||||
mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
|
||||
mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
|
||||
mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr);
|
||||
|
||||
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
|
||||
dte_addr = phys_to_virt(dte_addr_phys);
|
||||
|
@ -756,7 +728,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
|
|||
if (rk_dte_is_pt_valid(dte))
|
||||
goto done;
|
||||
|
||||
page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
|
||||
page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
|
||||
if (!page_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -967,7 +939,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
|
|||
|
||||
for (i = 0; i < iommu->num_mmu; i++) {
|
||||
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
|
||||
rk_ops->dma_addr_dte(rk_domain->dt_dma));
|
||||
rk_ops->mk_dtentries(rk_domain->dt_dma));
|
||||
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
|
||||
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
|
||||
}
|
||||
|
@ -1105,7 +1077,7 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
|
|||
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
|
||||
* Allocate one 4 KiB page for each table.
|
||||
*/
|
||||
rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
|
||||
rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
|
||||
if (!rk_domain->dt)
|
||||
goto err_free_domain;
|
||||
|
||||
|
@ -1405,18 +1377,16 @@ static struct rk_iommu_ops iommu_data_ops_v1 = {
|
|||
.pt_address = &rk_dte_pt_address,
|
||||
.mk_dtentries = &rk_mk_dte,
|
||||
.mk_ptentries = &rk_mk_pte,
|
||||
.dte_addr_phys = &rk_dte_addr_phys,
|
||||
.dma_addr_dte = &rk_dma_addr_dte,
|
||||
.dma_bit_mask = DMA_BIT_MASK(32),
|
||||
.gfp_flags = GFP_DMA32,
|
||||
};
|
||||
|
||||
static struct rk_iommu_ops iommu_data_ops_v2 = {
|
||||
.pt_address = &rk_dte_pt_address_v2,
|
||||
.mk_dtentries = &rk_mk_dte_v2,
|
||||
.mk_ptentries = &rk_mk_pte_v2,
|
||||
.dte_addr_phys = &rk_dte_addr_phys_v2,
|
||||
.dma_addr_dte = &rk_dma_addr_dte_v2,
|
||||
.dma_bit_mask = DMA_BIT_MASK(40),
|
||||
.gfp_flags = 0,
|
||||
};
|
||||
|
||||
static const struct of_device_id rk_iommu_dt_ids[] = {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -148,6 +149,7 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
|
|||
|
||||
dom->domain.geometry.aperture_start = 0;
|
||||
dom->domain.geometry.aperture_end = SZ_256M - 1;
|
||||
dom->domain.geometry.force_aperture = true;
|
||||
|
||||
return &dom->domain;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <linux/iommu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/interval_tree.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
|
|
@ -0,0 +1,489 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
|
||||
/*
|
||||
* Copyright (c) 2022 MediaTek Inc.
|
||||
* Author: Chengci Xu <chengci.xu@mediatek.com>
|
||||
*/
|
||||
#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
|
||||
#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
|
||||
|
||||
#include <dt-bindings/memory/mtk-memory-port.h>
|
||||
|
||||
/*
|
||||
* MM IOMMU larbs:
|
||||
* From below, for example larb11 has larb11a/larb11b/larb11c,
|
||||
* the index of larb is not in order. So we reindexed these larbs from a
|
||||
* software view.
|
||||
*/
|
||||
#define SMI_L0_ID 0
|
||||
#define SMI_L1_ID 1
|
||||
#define SMI_L2_ID 2
|
||||
#define SMI_L3_ID 3
|
||||
#define SMI_L4_ID 4
|
||||
#define SMI_L5_ID 5
|
||||
#define SMI_L6_ID 6
|
||||
#define SMI_L7_ID 7
|
||||
#define SMI_L9_ID 8
|
||||
#define SMI_L10_ID 9
|
||||
#define SMI_L11A_ID 10
|
||||
#define SMI_L11B_ID 11
|
||||
#define SMI_L11C_ID 12
|
||||
#define SMI_L12_ID 13
|
||||
#define SMI_L13_ID 14
|
||||
#define SMI_L14_ID 15
|
||||
#define SMI_L15_ID 16
|
||||
#define SMI_L16A_ID 17
|
||||
#define SMI_L16B_ID 18
|
||||
#define SMI_L17A_ID 19
|
||||
#define SMI_L17B_ID 20
|
||||
#define SMI_L19_ID 21
|
||||
#define SMI_L21_ID 22
|
||||
#define SMI_L23_ID 23
|
||||
#define SMI_L27_ID 24
|
||||
#define SMI_L28_ID 25
|
||||
|
||||
/*
|
||||
* MM IOMMU supports 16GB dma address. We separate it to four ranges:
|
||||
* 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
|
||||
* locate in anyone region. BUT:
|
||||
* a) Make sure all the ports inside a larb are in one range.
|
||||
* b) The iova of any master can NOT cross the 4G/8G/12G boundary.
|
||||
*
|
||||
* This is the suggested mapping in this SoC:
|
||||
*
|
||||
* modules dma-address-region larbs-ports
|
||||
* disp 0 ~ 4G larb0/1/2/3
|
||||
* vcodec 4G ~ 8G larb19(21)[1]/21(22)/23
|
||||
* cam/mdp 8G ~ 12G the other larbs.
|
||||
* N/A 12G ~ 16G
|
||||
* CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1
|
||||
* CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3
|
||||
*
|
||||
* This SoC have two MM IOMMU HWs, this is the connected information:
|
||||
* iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21
|
||||
* iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27
|
||||
*
|
||||
* [1]: This is larb19, but the index is 21 from the SW view.
|
||||
*/
|
||||
|
||||
/* MM IOMMU ports */
|
||||
/* LARB 0 -- VDO-0 */
|
||||
#define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0)
|
||||
#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
|
||||
#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2)
|
||||
#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3)
|
||||
#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4)
|
||||
#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5)
|
||||
#define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6)
|
||||
|
||||
/* LARB 1 -- VD0-0 */
|
||||
#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0)
|
||||
#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1)
|
||||
#define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2)
|
||||
#define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3)
|
||||
#define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4)
|
||||
#define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5)
|
||||
#define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6)
|
||||
|
||||
/* LARB 2 -- VDO-1 */
|
||||
#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
|
||||
#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1)
|
||||
#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2)
|
||||
#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3)
|
||||
#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4)
|
||||
|
||||
/* LARB 3 -- VDO-1 */
|
||||
#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0)
|
||||
#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1)
|
||||
#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2)
|
||||
#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3)
|
||||
#define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4)
|
||||
#define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5)
|
||||
#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6)
|
||||
|
||||
/* LARB 4 -- VPP-0 */
|
||||
#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0)
|
||||
#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1)
|
||||
#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2)
|
||||
#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3)
|
||||
#define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4)
|
||||
#define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5)
|
||||
#define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6)
|
||||
|
||||
/* LARB 5 -- VPP-1 */
|
||||
#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0)
|
||||
#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1)
|
||||
#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2)
|
||||
#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3)
|
||||
#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4)
|
||||
#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5)
|
||||
#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6)
|
||||
#define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7)
|
||||
|
||||
/* LARB 6 -- VPP-1 */
|
||||
#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0)
|
||||
#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1)
|
||||
#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2)
|
||||
#define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3)
|
||||
|
||||
/* LARB 7 -- WPE */
|
||||
#define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0)
|
||||
#define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1)
|
||||
#define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2)
|
||||
|
||||
/* LARB 9 -- IMG-M */
|
||||
#define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0)
|
||||
#define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1)
|
||||
#define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2)
|
||||
#define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3)
|
||||
#define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4)
|
||||
#define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5)
|
||||
#define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6)
|
||||
#define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7)
|
||||
#define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8)
|
||||
#define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9)
|
||||
#define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10)
|
||||
#define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11)
|
||||
#define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12)
|
||||
#define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13)
|
||||
#define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14)
|
||||
#define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15)
|
||||
#define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16)
|
||||
#define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17)
|
||||
#define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18)
|
||||
#define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19)
|
||||
#define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20)
|
||||
#define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21)
|
||||
#define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22)
|
||||
#define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23)
|
||||
#define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24)
|
||||
|
||||
/* LARB 10 -- IMG-D */
|
||||
#define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0)
|
||||
#define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1)
|
||||
#define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2)
|
||||
#define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3)
|
||||
#define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4)
|
||||
#define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5)
|
||||
#define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6)
|
||||
#define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7)
|
||||
#define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8)
|
||||
#define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9)
|
||||
#define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10)
|
||||
#define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11)
|
||||
#define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12)
|
||||
#define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13)
|
||||
#define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14)
|
||||
#define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15)
|
||||
#define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16)
|
||||
#define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17)
|
||||
#define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18)
|
||||
#define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19)
|
||||
|
||||
/* LARB 11A -- IMG-D */
|
||||
#define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0)
|
||||
#define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1)
|
||||
#define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2)
|
||||
#define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3)
|
||||
#define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4)
|
||||
#define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5)
|
||||
#define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6)
|
||||
#define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7)
|
||||
#define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8)
|
||||
#define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9)
|
||||
#define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10)
|
||||
#define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11)
|
||||
#define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12)
|
||||
#define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13)
|
||||
#define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14)
|
||||
#define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15)
|
||||
#define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16)
|
||||
#define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17)
|
||||
#define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18)
|
||||
#define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19)
|
||||
#define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20)
|
||||
#define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21)
|
||||
#define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22)
|
||||
#define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23)
|
||||
#define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24)
|
||||
#define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25)
|
||||
#define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26)
|
||||
#define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27)
|
||||
#define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28)
|
||||
#define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29)
|
||||
|
||||
/* LARB 11B -- IMG-D */
|
||||
#define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0)
|
||||
#define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1)
|
||||
#define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2)
|
||||
#define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3)
|
||||
#define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4)
|
||||
#define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5)
|
||||
#define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6)
|
||||
#define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7)
|
||||
#define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8)
|
||||
#define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9)
|
||||
#define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10)
|
||||
#define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11)
|
||||
#define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12)
|
||||
#define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13)
|
||||
#define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14)
|
||||
#define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15)
|
||||
#define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16)
|
||||
#define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17)
|
||||
#define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18)
|
||||
#define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19)
|
||||
#define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20)
|
||||
#define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21)
|
||||
#define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22)
|
||||
#define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23)
|
||||
#define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24)
|
||||
#define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25)
|
||||
#define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26)
|
||||
#define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27)
|
||||
#define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28)
|
||||
#define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29)
|
||||
|
||||
/* LARB 11C -- IMG-D */
|
||||
#define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0)
|
||||
#define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1)
|
||||
#define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2)
|
||||
#define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3)
|
||||
#define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4)
|
||||
#define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5)
|
||||
#define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6)
|
||||
#define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7)
|
||||
#define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8)
|
||||
#define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9)
|
||||
#define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10)
|
||||
#define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11)
|
||||
#define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12)
|
||||
#define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13)
|
||||
#define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14)
|
||||
#define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15)
|
||||
#define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16)
|
||||
#define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17)
|
||||
#define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18)
|
||||
#define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19)
|
||||
#define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20)
|
||||
#define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21)
|
||||
#define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22)
|
||||
#define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23)
|
||||
#define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24)
|
||||
#define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25)
|
||||
#define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26)
|
||||
#define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27)
|
||||
#define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28)
|
||||
#define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29)
|
||||
|
||||
/* LARB 12 -- IPE */
|
||||
#define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0)
|
||||
#define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1)
|
||||
#define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2)
|
||||
#define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3)
|
||||
#define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4)
|
||||
#define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5)
|
||||
#define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6)
|
||||
#define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7)
|
||||
#define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8)
|
||||
#define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9)
|
||||
#define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10)
|
||||
#define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11)
|
||||
#define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12)
|
||||
#define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13)
|
||||
#define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14)
|
||||
#define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15)
|
||||
|
||||
/* LARB 13 -- CAM-1 */
|
||||
#define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0)
|
||||
#define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1)
|
||||
#define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2)
|
||||
#define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3)
|
||||
#define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4)
|
||||
#define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5)
|
||||
#define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6)
|
||||
#define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7)
|
||||
#define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8)
|
||||
#define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9)
|
||||
#define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10)
|
||||
#define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11)
|
||||
#define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12)
|
||||
#define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13)
|
||||
#define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14)
|
||||
#define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15)
|
||||
#define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16)
|
||||
#define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17)
|
||||
#define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18)
|
||||
#define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19)
|
||||
#define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20)
|
||||
#define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21)
|
||||
#define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22)
|
||||
#define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23)
|
||||
|
||||
/* LARB 14 -- CAM-1 */
|
||||
#define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0)
|
||||
#define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1)
|
||||
#define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2)
|
||||
#define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3)
|
||||
#define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4)
|
||||
#define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5)
|
||||
#define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6)
|
||||
#define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7)
|
||||
#define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8)
|
||||
#define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9)
|
||||
#define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10)
|
||||
#define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11)
|
||||
#define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12)
|
||||
#define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13)
|
||||
#define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14)
|
||||
#define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15)
|
||||
#define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16)
|
||||
#define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17)
|
||||
#define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18)
|
||||
#define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19)
|
||||
#define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20)
|
||||
#define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21)
|
||||
#define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22)
|
||||
|
||||
/* LARB 15 -- IMG-D */
|
||||
#define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0)
|
||||
#define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1)
|
||||
#define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2)
|
||||
#define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3)
|
||||
#define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4)
|
||||
#define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5)
|
||||
#define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6)
|
||||
#define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7)
|
||||
#define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8)
|
||||
#define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9)
|
||||
#define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10)
|
||||
#define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11)
|
||||
#define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12)
|
||||
#define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13)
|
||||
#define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14)
|
||||
#define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15)
|
||||
#define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16)
|
||||
#define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17)
|
||||
#define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18)
|
||||
|
||||
/* LARB 16A -- CAM */
|
||||
#define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0)
|
||||
#define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1)
|
||||
#define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2)
|
||||
#define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3)
|
||||
#define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4)
|
||||
#define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5)
|
||||
#define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6)
|
||||
#define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7)
|
||||
#define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8)
|
||||
#define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9)
|
||||
#define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10)
|
||||
#define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11)
|
||||
#define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12)
|
||||
#define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13)
|
||||
#define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14)
|
||||
#define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15)
|
||||
#define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16)
|
||||
|
||||
/* LARB 16B -- CAM */
|
||||
#define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0)
|
||||
#define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1)
|
||||
#define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2)
|
||||
#define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3)
|
||||
#define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4)
|
||||
#define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5)
|
||||
#define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6)
|
||||
#define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7)
|
||||
#define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8)
|
||||
#define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9)
|
||||
#define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10)
|
||||
#define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11)
|
||||
#define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12)
|
||||
#define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13)
|
||||
#define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14)
|
||||
#define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15)
|
||||
#define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16)
|
||||
|
||||
/* LARB 17A -- CAM */
|
||||
#define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0)
|
||||
#define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1)
|
||||
#define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2)
|
||||
#define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3)
|
||||
#define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4)
|
||||
#define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5)
|
||||
#define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6)
|
||||
|
||||
/* LARB 17B -- CAM */
|
||||
#define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0)
|
||||
#define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1)
|
||||
#define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2)
|
||||
#define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3)
|
||||
#define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4)
|
||||
#define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5)
|
||||
#define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6)
|
||||
|
||||
/* LARB 19 -- VENC */
|
||||
#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0)
|
||||
#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1)
|
||||
#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2)
|
||||
#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3)
|
||||
#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4)
|
||||
#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5)
|
||||
#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6)
|
||||
#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7)
|
||||
#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8)
|
||||
#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9)
|
||||
#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10)
|
||||
#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11)
|
||||
#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12)
|
||||
#define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13)
|
||||
#define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14)
|
||||
#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15)
|
||||
#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16)
|
||||
#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17)
|
||||
#define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18)
|
||||
#define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19)
|
||||
#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20)
|
||||
#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21)
|
||||
#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22)
|
||||
#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23)
|
||||
#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24)
|
||||
#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25)
|
||||
#define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26)
|
||||
|
||||
/* LARB 21 -- VDEC-CORE0 */
|
||||
#define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0)
|
||||
#define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1)
|
||||
#define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2)
|
||||
#define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3)
|
||||
#define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4)
|
||||
#define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5)
|
||||
#define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6)
|
||||
#define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7)
|
||||
#define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8)
|
||||
#define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9)
|
||||
#define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10)
|
||||
|
||||
/* LARB 23 -- VDEC-SOC */
|
||||
#define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0)
|
||||
#define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1)
|
||||
#define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2)
|
||||
#define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3)
|
||||
#define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4)
|
||||
#define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5)
|
||||
#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6)
|
||||
#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7)
|
||||
#define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8)
|
||||
|
||||
/* LARB 27 -- CCU */
|
||||
#define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0)
|
||||
#define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1)
|
||||
#define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2)
|
||||
#define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3)
|
||||
|
||||
/* LARB 28 -- AXI-CCU */
|
||||
#define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0)
|
||||
|
||||
/* infra/peri */
|
||||
#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0)
|
||||
|
||||
#endif
|
|
@ -32,7 +32,6 @@ struct task_struct;
|
|||
struct pci_dev;
|
||||
|
||||
extern int amd_iommu_detect(void);
|
||||
extern int amd_iommu_init_hardware(void);
|
||||
|
||||
/**
|
||||
* amd_iommu_init_device() - Init device for use with IOMMUv2 driver
|
||||
|
|
|
@ -106,8 +106,6 @@ static inline bool dmar_rcu_check(void)
|
|||
extern int dmar_table_init(void);
|
||||
extern int dmar_dev_scope_init(void);
|
||||
extern void dmar_register_bus_notifier(void);
|
||||
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
||||
struct dmar_dev_scope **devices, u16 segment);
|
||||
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
|
||||
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
|
||||
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
|
||||
|
|
|
@ -196,6 +196,8 @@ enum iommu_dev_features {
|
|||
IOMMU_DEV_FEAT_IOPF,
|
||||
};
|
||||
|
||||
#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
|
||||
#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
|
||||
#define IOMMU_PASID_INVALID (-1U)
|
||||
typedef unsigned int ioasid_t;
|
||||
|
||||
|
@ -414,6 +416,8 @@ struct iommu_fault_param {
|
|||
* @priv: IOMMU Driver private data
|
||||
* @max_pasids: number of PASIDs this device can consume
|
||||
* @attach_deferred: the dma domain attachment is deferred
|
||||
* @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
|
||||
* @require_direct: device requires IOMMU_RESV_DIRECT regions
|
||||
*
|
||||
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
|
||||
* struct iommu_group *iommu_group;
|
||||
|
@ -427,6 +431,8 @@ struct dev_iommu {
|
|||
void *priv;
|
||||
u32 max_pasids;
|
||||
u32 attach_deferred:1;
|
||||
u32 pci_32bit_workaround:1;
|
||||
u32 require_direct:1;
|
||||
};
|
||||
|
||||
int iommu_device_register(struct iommu_device *iommu,
|
||||
|
@ -721,6 +727,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
|
|||
struct iommu_domain *
|
||||
iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
|
||||
unsigned int type);
|
||||
ioasid_t iommu_alloc_global_pasid(struct device *dev);
|
||||
void iommu_free_global_pasid(ioasid_t pasid);
|
||||
#else /* CONFIG_IOMMU_API */
|
||||
|
||||
struct iommu_ops {};
|
||||
|
@ -1082,6 +1090,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
|
||||
{
|
||||
return IOMMU_PASID_INVALID;
|
||||
}
|
||||
|
||||
static inline void iommu_free_global_pasid(ioasid_t pasid) {}
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
/**
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
enum iommu_atf_cmd {
|
||||
IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
|
||||
IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, /* For infra master to enable iommu */
|
||||
IOMMU_ATF_CMD_MAX,
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче