IOMMU Updates for Linux v5.2
Including: - ATS support for ARM-SMMU-v3. - AUX domain support in the IOMMU-API and the Intel VT-d driver. This adds support for multiple DMA address spaces per (PCI-)device. The use-case is to multiplex devices between host and KVM guests in a more flexible way than supported by SR-IOV. - The Rest are smaller cleanups and fixes, two of which needed to be reverted after testing in linux-next. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAlzZWPkACgkQK/BELZcB GuPdRRAAj/RcgVn7fqmNDM02xe6C5PuwBGYkXnC+atDrTQWbFsM0JE3YTWEHJ+66 7RMoYaksRaSBsn3QuX3b6+g6E+exhGoQ0BfkmuF8StUXAsaxvzGxvuk+cP0o4/mK pZkj3BddS4ycRqQPsVEbgJGRzL39dxWHe7p3/FfwgV+HzVonURFozU0HixLAoBhr uS0LpBiG8uGCMvO6yhTmPmfrbsSAcMivb7LlmsaykXPhjBk7kSqNgHNNx5O+HC8m XJdFatkxolkrN6A2FoHdP05sAXCv+uHbAGGGitYziRaXG7GBzm7Vc2LspJIml+y2 898+MiTH1M3P0WPyDa3cfcnRc2BBuJg56emad4CcfduM9sVXI0Ol6slNAYljnSYD 5A0CUxbrLxGUZaf6DAUJ9w5L+LhgEkXzKWEE9Nif46K4I1CFSt/d8nwB6Q5Oc/ie GZwTICRkMwTeqOM/CTyvwJCCwZm47AVv3qwaI0z5oDplH/bbRmNEi5WFJsgcgOnd GS5kmzjFBsljjDVWswgugdm7sdMSl7y88uQK9zUiG8fXgRiVUW/rENfZ1SMmVl1p zBQDndZmtrHm5ybe/NAZ8vaJhk4i1F3rWT0hwRZZKGDIrd/C3egnNyYkc4XeTPGe 3il+dJleIIwOX5Fpa44XTV1rDuVOXpF5LS5NRLjhhd+XqbaXZFI= =HLtu -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v5.2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU updates from Joerg Roedel: - ATS support for ARM-SMMU-v3. - AUX domain support in the IOMMU-API and the Intel VT-d driver. This adds support for multiple DMA address spaces per (PCI-)device. The use-case is to multiplex devices between host and KVM guests in a more flexible way than supported by SR-IOV. - the rest are smaller cleanups and fixes, two of which needed to be reverted after testing in linux-next. * tag 'iommu-updates-v5.2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (45 commits) Revert "iommu/amd: Flush not present cache in iommu_map_page" Revert "iommu/amd: Remove the leftover of bypass support" iommu/vt-d: Fix leak in intel_pasid_alloc_table on error path iommu/vt-d: Make kernel parameter igfx_off work with vIOMMU iommu/vt-d: Set intel_iommu_gfx_mapped correctly iommu/amd: Flush not present cache in iommu_map_page iommu/vt-d: Cleanup: no spaces at the start of a line iommu/vt-d: Don't request page request irq under dmar_global_lock iommu/vt-d: Use struct_size() helper iommu/mediatek: Fix leaked of_node references iommu/amd: Remove amd_iommu_pd_list iommu/arm-smmu: Log CBFRSYNRA register on context fault iommu/arm-smmu-v3: Don't disable SMMU in kdump kernel iommu/arm-smmu-v3: Disable tagged pointers iommu/arm-smmu-v3: Add support for PCI ATS iommu/arm-smmu-v3: Link domains and devices iommu/arm-smmu-v3: Add a master->domain pointer iommu/arm-smmu-v3: Store SteamIDs in master iommu/arm-smmu-v3: Rename arm_smmu_master_data to arm_smmu_master ACPI/IORT: Check ATS capability in root complex nodes ...
This commit is contained in:
Коммит
a13f065550
|
@ -1031,6 +1031,14 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
|
||||||
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
|
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
|
||||||
|
{
|
||||||
|
struct acpi_iort_root_complex *pci_rc;
|
||||||
|
|
||||||
|
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
|
||||||
|
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iort_iommu_configure - Set-up IOMMU configuration for a device.
|
* iort_iommu_configure - Set-up IOMMU configuration for a device.
|
||||||
*
|
*
|
||||||
|
@ -1066,6 +1074,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||||
info.node = node;
|
info.node = node;
|
||||||
err = pci_for_each_dma_alias(to_pci_dev(dev),
|
err = pci_for_each_dma_alias(to_pci_dev(dev),
|
||||||
iort_pci_iommu_init, &info);
|
iort_pci_iommu_init, &info);
|
||||||
|
|
||||||
|
if (!err && iort_pci_rc_supports_ats(node))
|
||||||
|
dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
|
||||||
} else {
|
} else {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
|
|
|
@ -359,6 +359,31 @@ config ARM_SMMU
|
||||||
Say Y here if your SoC includes an IOMMU device implementing
|
Say Y here if your SoC includes an IOMMU device implementing
|
||||||
the ARM SMMU architecture.
|
the ARM SMMU architecture.
|
||||||
|
|
||||||
|
config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
|
||||||
|
bool "Default to disabling bypass on ARM SMMU v1 and v2"
|
||||||
|
depends on ARM_SMMU
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Say Y here to (by default) disable bypass streams such that
|
||||||
|
incoming transactions from devices that are not attached to
|
||||||
|
an iommu domain will report an abort back to the device and
|
||||||
|
will not be allowed to pass through the SMMU.
|
||||||
|
|
||||||
|
Any old kernels that existed before this KConfig was
|
||||||
|
introduced would default to _allowing_ bypass (AKA the
|
||||||
|
equivalent of NO for this config). However the default for
|
||||||
|
this option is YES because the old behavior is insecure.
|
||||||
|
|
||||||
|
There are few reasons to allow unmatched stream bypass, and
|
||||||
|
even fewer good ones. If saying YES here breaks your board
|
||||||
|
you should work on fixing your board. This KConfig option
|
||||||
|
is expected to be removed in the future and we'll simply
|
||||||
|
hardcode the bypass disable in the code.
|
||||||
|
|
||||||
|
NOTE: the kernel command line parameter
|
||||||
|
'arm-smmu.disable_bypass' will continue to override this
|
||||||
|
config.
|
||||||
|
|
||||||
config ARM_SMMU_V3
|
config ARM_SMMU_V3
|
||||||
bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
|
bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
|
||||||
depends on ARM64
|
depends on ARM64
|
||||||
|
|
|
@ -1723,31 +1723,6 @@ static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
|
||||||
*
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
/*
|
|
||||||
* This function adds a protection domain to the global protection domain list
|
|
||||||
*/
|
|
||||||
static void add_domain_to_list(struct protection_domain *domain)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&amd_iommu_pd_lock, flags);
|
|
||||||
list_add(&domain->list, &amd_iommu_pd_list);
|
|
||||||
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function removes a protection domain to the global
|
|
||||||
* protection domain list
|
|
||||||
*/
|
|
||||||
static void del_domain_from_list(struct protection_domain *domain)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&amd_iommu_pd_lock, flags);
|
|
||||||
list_del(&domain->list);
|
|
||||||
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static u16 domain_id_alloc(void)
|
static u16 domain_id_alloc(void)
|
||||||
{
|
{
|
||||||
int id;
|
int id;
|
||||||
|
@ -1838,8 +1813,6 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
|
||||||
if (!dom)
|
if (!dom)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
del_domain_from_list(&dom->domain);
|
|
||||||
|
|
||||||
put_iova_domain(&dom->iovad);
|
put_iova_domain(&dom->iovad);
|
||||||
|
|
||||||
free_pagetable(&dom->domain);
|
free_pagetable(&dom->domain);
|
||||||
|
@ -1880,8 +1853,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
|
||||||
/* Initialize reserved ranges */
|
/* Initialize reserved ranges */
|
||||||
copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
|
copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
|
||||||
|
|
||||||
add_domain_to_list(&dma_dom->domain);
|
|
||||||
|
|
||||||
return dma_dom;
|
return dma_dom;
|
||||||
|
|
||||||
free_dma_dom:
|
free_dma_dom:
|
||||||
|
@ -2122,23 +2093,6 @@ out_err:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: Move this to PCI code */
|
|
||||||
#define PCI_PRI_TLP_OFF (1 << 15)
|
|
||||||
|
|
||||||
static bool pci_pri_tlp_required(struct pci_dev *pdev)
|
|
||||||
{
|
|
||||||
u16 status;
|
|
||||||
int pos;
|
|
||||||
|
|
||||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
|
||||||
if (!pos)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
|
|
||||||
|
|
||||||
return (status & PCI_PRI_TLP_OFF) ? true : false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a device is not yet associated with a domain, this function makes the
|
* If a device is not yet associated with a domain, this function makes the
|
||||||
* device visible in the domain
|
* device visible in the domain
|
||||||
|
@ -2167,7 +2121,7 @@ static int attach_device(struct device *dev,
|
||||||
|
|
||||||
dev_data->ats.enabled = true;
|
dev_data->ats.enabled = true;
|
||||||
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
|
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
|
||||||
dev_data->pri_tlp = pci_pri_tlp_required(pdev);
|
dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
|
||||||
}
|
}
|
||||||
} else if (amd_iommu_iotlb_sup &&
|
} else if (amd_iommu_iotlb_sup &&
|
||||||
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
|
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
|
||||||
|
@ -2897,8 +2851,6 @@ static void protection_domain_free(struct protection_domain *domain)
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
del_domain_from_list(domain);
|
|
||||||
|
|
||||||
if (domain->id)
|
if (domain->id)
|
||||||
domain_id_free(domain->id);
|
domain_id_free(domain->id);
|
||||||
|
|
||||||
|
@ -2928,8 +2880,6 @@ static struct protection_domain *protection_domain_alloc(void)
|
||||||
if (protection_domain_init(domain))
|
if (protection_domain_init(domain))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
add_domain_to_list(domain);
|
|
||||||
|
|
||||||
return domain;
|
return domain;
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
|
|
|
@ -188,12 +188,6 @@ static bool amd_iommu_pc_present __read_mostly;
|
||||||
|
|
||||||
bool amd_iommu_force_isolation __read_mostly;
|
bool amd_iommu_force_isolation __read_mostly;
|
||||||
|
|
||||||
/*
|
|
||||||
* List of protection domains - used during resume
|
|
||||||
*/
|
|
||||||
LIST_HEAD(amd_iommu_pd_list);
|
|
||||||
spinlock_t amd_iommu_pd_lock;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pointer to the device table which is shared by all AMD IOMMUs
|
* Pointer to the device table which is shared by all AMD IOMMUs
|
||||||
* it is indexed by the PCI device id or the HT unit id and contains
|
* it is indexed by the PCI device id or the HT unit id and contains
|
||||||
|
@ -2526,8 +2520,6 @@ static int __init early_amd_iommu_init(void)
|
||||||
*/
|
*/
|
||||||
__set_bit(0, amd_iommu_pd_alloc_bitmap);
|
__set_bit(0, amd_iommu_pd_alloc_bitmap);
|
||||||
|
|
||||||
spin_lock_init(&amd_iommu_pd_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* now the data structures are allocated and basically initialized
|
* now the data structures are allocated and basically initialized
|
||||||
* start the real acpi table scan
|
* start the real acpi table scan
|
||||||
|
|
|
@ -674,12 +674,6 @@ extern struct list_head amd_iommu_list;
|
||||||
*/
|
*/
|
||||||
extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
|
extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
|
||||||
|
|
||||||
/*
|
|
||||||
* Declarations for the global list of all protection domains
|
|
||||||
*/
|
|
||||||
extern spinlock_t amd_iommu_pd_lock;
|
|
||||||
extern struct list_head amd_iommu_pd_list;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Structure defining one entry in the device table
|
* Structure defining one entry in the device table
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -147,6 +147,8 @@ enum arm_smmu_s2cr_privcfg {
|
||||||
#define CBAR_IRPTNDX_SHIFT 24
|
#define CBAR_IRPTNDX_SHIFT 24
|
||||||
#define CBAR_IRPTNDX_MASK 0xff
|
#define CBAR_IRPTNDX_MASK 0xff
|
||||||
|
|
||||||
|
#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
|
||||||
|
|
||||||
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
|
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
|
||||||
#define CBA2R_RW64_32BIT (0 << 0)
|
#define CBA2R_RW64_32BIT (0 << 0)
|
||||||
#define CBA2R_RW64_64BIT (1 << 0)
|
#define CBA2R_RW64_64BIT (1 << 0)
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/of_iommu.h>
|
#include <linux/of_iommu.h>
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
#include <linux/pci-ats.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
|
||||||
#include <linux/amba/bus.h>
|
#include <linux/amba/bus.h>
|
||||||
|
@ -86,6 +87,7 @@
|
||||||
#define IDR5_VAX_52_BIT 1
|
#define IDR5_VAX_52_BIT 1
|
||||||
|
|
||||||
#define ARM_SMMU_CR0 0x20
|
#define ARM_SMMU_CR0 0x20
|
||||||
|
#define CR0_ATSCHK (1 << 4)
|
||||||
#define CR0_CMDQEN (1 << 3)
|
#define CR0_CMDQEN (1 << 3)
|
||||||
#define CR0_EVTQEN (1 << 2)
|
#define CR0_EVTQEN (1 << 2)
|
||||||
#define CR0_PRIQEN (1 << 1)
|
#define CR0_PRIQEN (1 << 1)
|
||||||
|
@ -294,6 +296,7 @@
|
||||||
#define CMDQ_ERR_CERROR_NONE_IDX 0
|
#define CMDQ_ERR_CERROR_NONE_IDX 0
|
||||||
#define CMDQ_ERR_CERROR_ILL_IDX 1
|
#define CMDQ_ERR_CERROR_ILL_IDX 1
|
||||||
#define CMDQ_ERR_CERROR_ABT_IDX 2
|
#define CMDQ_ERR_CERROR_ABT_IDX 2
|
||||||
|
#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
|
||||||
|
|
||||||
#define CMDQ_0_OP GENMASK_ULL(7, 0)
|
#define CMDQ_0_OP GENMASK_ULL(7, 0)
|
||||||
#define CMDQ_0_SSV (1UL << 11)
|
#define CMDQ_0_SSV (1UL << 11)
|
||||||
|
@ -312,6 +315,12 @@
|
||||||
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
|
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
|
||||||
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
|
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
|
||||||
|
|
||||||
|
#define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
|
||||||
|
#define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
|
||||||
|
#define CMDQ_ATC_0_GLOBAL (1UL << 9)
|
||||||
|
#define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
|
||||||
|
#define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
|
||||||
|
|
||||||
#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
|
#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
|
||||||
#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
|
#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
|
||||||
#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
|
#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
|
||||||
|
@ -433,6 +442,16 @@ struct arm_smmu_cmdq_ent {
|
||||||
u64 addr;
|
u64 addr;
|
||||||
} tlbi;
|
} tlbi;
|
||||||
|
|
||||||
|
#define CMDQ_OP_ATC_INV 0x40
|
||||||
|
#define ATC_INV_SIZE_ALL 52
|
||||||
|
struct {
|
||||||
|
u32 sid;
|
||||||
|
u32 ssid;
|
||||||
|
u64 addr;
|
||||||
|
u8 size;
|
||||||
|
bool global;
|
||||||
|
} atc;
|
||||||
|
|
||||||
#define CMDQ_OP_PRI_RESP 0x41
|
#define CMDQ_OP_PRI_RESP 0x41
|
||||||
struct {
|
struct {
|
||||||
u32 sid;
|
u32 sid;
|
||||||
|
@ -505,19 +524,6 @@ struct arm_smmu_s2_cfg {
|
||||||
u64 vtcr;
|
u64 vtcr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arm_smmu_strtab_ent {
|
|
||||||
/*
|
|
||||||
* An STE is "assigned" if the master emitting the corresponding SID
|
|
||||||
* is attached to a domain. The behaviour of an unassigned STE is
|
|
||||||
* determined by the disable_bypass parameter, whereas an assigned
|
|
||||||
* STE behaves according to s1_cfg/s2_cfg, which themselves are
|
|
||||||
* configured according to the domain type.
|
|
||||||
*/
|
|
||||||
bool assigned;
|
|
||||||
struct arm_smmu_s1_cfg *s1_cfg;
|
|
||||||
struct arm_smmu_s2_cfg *s2_cfg;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct arm_smmu_strtab_cfg {
|
struct arm_smmu_strtab_cfg {
|
||||||
__le64 *strtab;
|
__le64 *strtab;
|
||||||
dma_addr_t strtab_dma;
|
dma_addr_t strtab_dma;
|
||||||
|
@ -591,9 +597,14 @@ struct arm_smmu_device {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* SMMU private data for each master */
|
/* SMMU private data for each master */
|
||||||
struct arm_smmu_master_data {
|
struct arm_smmu_master {
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu;
|
||||||
struct arm_smmu_strtab_ent ste;
|
struct device *dev;
|
||||||
|
struct arm_smmu_domain *domain;
|
||||||
|
struct list_head domain_head;
|
||||||
|
u32 *sids;
|
||||||
|
unsigned int num_sids;
|
||||||
|
bool ats_enabled :1;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* SMMU private data for an IOMMU domain */
|
/* SMMU private data for an IOMMU domain */
|
||||||
|
@ -618,6 +629,9 @@ struct arm_smmu_domain {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct iommu_domain domain;
|
struct iommu_domain domain;
|
||||||
|
|
||||||
|
struct list_head devices;
|
||||||
|
spinlock_t devices_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arm_smmu_option_prop {
|
struct arm_smmu_option_prop {
|
||||||
|
@ -820,6 +834,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
|
||||||
case CMDQ_OP_TLBI_S12_VMALL:
|
case CMDQ_OP_TLBI_S12_VMALL:
|
||||||
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
|
||||||
break;
|
break;
|
||||||
|
case CMDQ_OP_ATC_INV:
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
|
||||||
|
cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
|
||||||
|
cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
|
||||||
|
break;
|
||||||
case CMDQ_OP_PRI_RESP:
|
case CMDQ_OP_PRI_RESP:
|
||||||
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
|
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
|
||||||
cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
|
cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
|
||||||
|
@ -864,6 +886,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
|
||||||
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
|
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
|
||||||
[CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
|
[CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
|
||||||
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
|
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
|
||||||
|
[CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout",
|
||||||
};
|
};
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
|
@ -883,6 +906,14 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
|
||||||
dev_err(smmu->dev, "retrying command fetch\n");
|
dev_err(smmu->dev, "retrying command fetch\n");
|
||||||
case CMDQ_ERR_CERROR_NONE_IDX:
|
case CMDQ_ERR_CERROR_NONE_IDX:
|
||||||
return;
|
return;
|
||||||
|
case CMDQ_ERR_CERROR_ATC_INV_IDX:
|
||||||
|
/*
|
||||||
|
* ATC Invalidation Completion timeout. CONS is still pointing
|
||||||
|
* at the CMD_SYNC. Attempt to complete other pending commands
|
||||||
|
* by repeating the CMD_SYNC, though we might well end up back
|
||||||
|
* here since the ATC invalidation may still be pending.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
case CMDQ_ERR_CERROR_ILL_IDX:
|
case CMDQ_ERR_CERROR_ILL_IDX:
|
||||||
/* Fallthrough */
|
/* Fallthrough */
|
||||||
default:
|
default:
|
||||||
|
@ -999,7 +1030,7 @@ static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
|
static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
|
bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
|
||||||
|
@ -1009,6 +1040,7 @@ static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
|
||||||
: __arm_smmu_cmdq_issue_sync(smmu);
|
: __arm_smmu_cmdq_issue_sync(smmu);
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
|
dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Context descriptor manipulation functions */
|
/* Context descriptor manipulation functions */
|
||||||
|
@ -1025,7 +1057,6 @@ static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
|
||||||
val |= ARM_SMMU_TCR2CD(tcr, EPD0);
|
val |= ARM_SMMU_TCR2CD(tcr, EPD0);
|
||||||
val |= ARM_SMMU_TCR2CD(tcr, EPD1);
|
val |= ARM_SMMU_TCR2CD(tcr, EPD1);
|
||||||
val |= ARM_SMMU_TCR2CD(tcr, IPS);
|
val |= ARM_SMMU_TCR2CD(tcr, IPS);
|
||||||
val |= ARM_SMMU_TCR2CD(tcr, TBI0);
|
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
@ -1085,8 +1116,8 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
||||||
arm_smmu_cmdq_issue_sync(smmu);
|
arm_smmu_cmdq_issue_sync(smmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
|
||||||
__le64 *dst, struct arm_smmu_strtab_ent *ste)
|
__le64 *dst)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* This is hideously complicated, but we only really care about
|
* This is hideously complicated, but we only really care about
|
||||||
|
@ -1106,6 +1137,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||||
*/
|
*/
|
||||||
u64 val = le64_to_cpu(dst[0]);
|
u64 val = le64_to_cpu(dst[0]);
|
||||||
bool ste_live = false;
|
bool ste_live = false;
|
||||||
|
struct arm_smmu_device *smmu = NULL;
|
||||||
|
struct arm_smmu_s1_cfg *s1_cfg = NULL;
|
||||||
|
struct arm_smmu_s2_cfg *s2_cfg = NULL;
|
||||||
|
struct arm_smmu_domain *smmu_domain = NULL;
|
||||||
struct arm_smmu_cmdq_ent prefetch_cmd = {
|
struct arm_smmu_cmdq_ent prefetch_cmd = {
|
||||||
.opcode = CMDQ_OP_PREFETCH_CFG,
|
.opcode = CMDQ_OP_PREFETCH_CFG,
|
||||||
.prefetch = {
|
.prefetch = {
|
||||||
|
@ -1113,6 +1148,25 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (master) {
|
||||||
|
smmu_domain = master->domain;
|
||||||
|
smmu = master->smmu;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (smmu_domain) {
|
||||||
|
switch (smmu_domain->stage) {
|
||||||
|
case ARM_SMMU_DOMAIN_S1:
|
||||||
|
s1_cfg = &smmu_domain->s1_cfg;
|
||||||
|
break;
|
||||||
|
case ARM_SMMU_DOMAIN_S2:
|
||||||
|
case ARM_SMMU_DOMAIN_NESTED:
|
||||||
|
s2_cfg = &smmu_domain->s2_cfg;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (val & STRTAB_STE_0_V) {
|
if (val & STRTAB_STE_0_V) {
|
||||||
switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
|
switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
|
||||||
case STRTAB_STE_0_CFG_BYPASS:
|
case STRTAB_STE_0_CFG_BYPASS:
|
||||||
|
@ -1133,8 +1187,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||||
val = STRTAB_STE_0_V;
|
val = STRTAB_STE_0_V;
|
||||||
|
|
||||||
/* Bypass/fault */
|
/* Bypass/fault */
|
||||||
if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
|
if (!smmu_domain || !(s1_cfg || s2_cfg)) {
|
||||||
if (!ste->assigned && disable_bypass)
|
if (!smmu_domain && disable_bypass)
|
||||||
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
|
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
|
||||||
else
|
else
|
||||||
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
|
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
|
||||||
|
@ -1152,41 +1206,42 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ste->s1_cfg) {
|
if (s1_cfg) {
|
||||||
BUG_ON(ste_live);
|
BUG_ON(ste_live);
|
||||||
dst[1] = cpu_to_le64(
|
dst[1] = cpu_to_le64(
|
||||||
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
|
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
|
||||||
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
|
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
|
||||||
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
|
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
|
||||||
#ifdef CONFIG_PCI_ATS
|
|
||||||
FIELD_PREP(STRTAB_STE_1_EATS, STRTAB_STE_1_EATS_TRANS) |
|
|
||||||
#endif
|
|
||||||
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
|
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
|
||||||
|
|
||||||
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
|
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
|
||||||
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
|
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
|
||||||
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
|
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
|
||||||
|
|
||||||
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
|
val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
|
||||||
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
|
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ste->s2_cfg) {
|
if (s2_cfg) {
|
||||||
BUG_ON(ste_live);
|
BUG_ON(ste_live);
|
||||||
dst[2] = cpu_to_le64(
|
dst[2] = cpu_to_le64(
|
||||||
FIELD_PREP(STRTAB_STE_2_S2VMID, ste->s2_cfg->vmid) |
|
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
|
||||||
FIELD_PREP(STRTAB_STE_2_VTCR, ste->s2_cfg->vtcr) |
|
FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
|
||||||
#ifdef __BIG_ENDIAN
|
#ifdef __BIG_ENDIAN
|
||||||
STRTAB_STE_2_S2ENDI |
|
STRTAB_STE_2_S2ENDI |
|
||||||
#endif
|
#endif
|
||||||
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
|
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
|
||||||
STRTAB_STE_2_S2R);
|
STRTAB_STE_2_S2R);
|
||||||
|
|
||||||
dst[3] = cpu_to_le64(ste->s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
|
dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
|
||||||
|
|
||||||
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
|
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (master->ats_enabled)
|
||||||
|
dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
|
||||||
|
STRTAB_STE_1_EATS_TRANS));
|
||||||
|
|
||||||
arm_smmu_sync_ste_for_sid(smmu, sid);
|
arm_smmu_sync_ste_for_sid(smmu, sid);
|
||||||
dst[0] = cpu_to_le64(val);
|
dst[0] = cpu_to_le64(val);
|
||||||
arm_smmu_sync_ste_for_sid(smmu, sid);
|
arm_smmu_sync_ste_for_sid(smmu, sid);
|
||||||
|
@ -1199,10 +1254,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||||
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
|
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct arm_smmu_strtab_ent ste = { .assigned = false };
|
|
||||||
|
|
||||||
for (i = 0; i < nent; ++i) {
|
for (i = 0; i < nent; ++i) {
|
||||||
arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
|
arm_smmu_write_strtab_ent(NULL, -1, strtab);
|
||||||
strtab += STRTAB_STE_DWORDS;
|
strtab += STRTAB_STE_DWORDS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1390,6 +1444,96 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
|
||||||
return IRQ_WAKE_THREAD;
|
return IRQ_WAKE_THREAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
|
||||||
|
struct arm_smmu_cmdq_ent *cmd)
|
||||||
|
{
|
||||||
|
size_t log2_span;
|
||||||
|
size_t span_mask;
|
||||||
|
/* ATC invalidates are always on 4096-bytes pages */
|
||||||
|
size_t inval_grain_shift = 12;
|
||||||
|
unsigned long page_start, page_end;
|
||||||
|
|
||||||
|
*cmd = (struct arm_smmu_cmdq_ent) {
|
||||||
|
.opcode = CMDQ_OP_ATC_INV,
|
||||||
|
.substream_valid = !!ssid,
|
||||||
|
.atc.ssid = ssid,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!size) {
|
||||||
|
cmd->atc.size = ATC_INV_SIZE_ALL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
page_start = iova >> inval_grain_shift;
|
||||||
|
page_end = (iova + size - 1) >> inval_grain_shift;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In an ATS Invalidate Request, the address must be aligned on the
|
||||||
|
* range size, which must be a power of two number of page sizes. We
|
||||||
|
* thus have to choose between grossly over-invalidating the region, or
|
||||||
|
* splitting the invalidation into multiple commands. For simplicity
|
||||||
|
* we'll go with the first solution, but should refine it in the future
|
||||||
|
* if multiple commands are shown to be more efficient.
|
||||||
|
*
|
||||||
|
* Find the smallest power of two that covers the range. The most
|
||||||
|
* significant differing bit between the start and end addresses,
|
||||||
|
* fls(start ^ end), indicates the required span. For example:
|
||||||
|
*
|
||||||
|
* We want to invalidate pages [8; 11]. This is already the ideal range:
|
||||||
|
* x = 0b1000 ^ 0b1011 = 0b11
|
||||||
|
* span = 1 << fls(x) = 4
|
||||||
|
*
|
||||||
|
* To invalidate pages [7; 10], we need to invalidate [0; 15]:
|
||||||
|
* x = 0b0111 ^ 0b1010 = 0b1101
|
||||||
|
* span = 1 << fls(x) = 16
|
||||||
|
*/
|
||||||
|
log2_span = fls_long(page_start ^ page_end);
|
||||||
|
span_mask = (1ULL << log2_span) - 1;
|
||||||
|
|
||||||
|
page_start &= ~span_mask;
|
||||||
|
|
||||||
|
cmd->atc.addr = page_start << inval_grain_shift;
|
||||||
|
cmd->atc.size = log2_span;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
|
||||||
|
struct arm_smmu_cmdq_ent *cmd)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!master->ats_enabled)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
for (i = 0; i < master->num_sids; i++) {
|
||||||
|
cmd->atc.sid = master->sids[i];
|
||||||
|
arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
return arm_smmu_cmdq_issue_sync(master->smmu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
|
||||||
|
int ssid, unsigned long iova, size_t size)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
unsigned long flags;
|
||||||
|
struct arm_smmu_cmdq_ent cmd;
|
||||||
|
struct arm_smmu_master *master;
|
||||||
|
|
||||||
|
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||||
|
list_for_each_entry(master, &smmu_domain->devices, domain_head)
|
||||||
|
ret |= arm_smmu_atc_inv_master(master, &cmd);
|
||||||
|
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||||
|
|
||||||
|
return ret ? -ETIMEDOUT : 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* IO_PGTABLE API */
|
/* IO_PGTABLE API */
|
||||||
static void arm_smmu_tlb_sync(void *cookie)
|
static void arm_smmu_tlb_sync(void *cookie)
|
||||||
{
|
{
|
||||||
|
@ -1493,6 +1637,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_init(&smmu_domain->init_mutex);
|
mutex_init(&smmu_domain->init_mutex);
|
||||||
|
INIT_LIST_HEAD(&smmu_domain->devices);
|
||||||
|
spin_lock_init(&smmu_domain->devices_lock);
|
||||||
|
|
||||||
return &smmu_domain->domain;
|
return &smmu_domain->domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1688,55 +1835,97 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
||||||
return step;
|
return step;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
struct arm_smmu_master_data *master = fwspec->iommu_priv;
|
|
||||||
struct arm_smmu_device *smmu = master->smmu;
|
struct arm_smmu_device *smmu = master->smmu;
|
||||||
|
|
||||||
for (i = 0; i < fwspec->num_ids; ++i) {
|
for (i = 0; i < master->num_sids; ++i) {
|
||||||
u32 sid = fwspec->ids[i];
|
u32 sid = master->sids[i];
|
||||||
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
|
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
|
||||||
|
|
||||||
/* Bridged PCI devices may end up with duplicated IDs */
|
/* Bridged PCI devices may end up with duplicated IDs */
|
||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
if (fwspec->ids[j] == sid)
|
if (master->sids[j] == sid)
|
||||||
break;
|
break;
|
||||||
if (j < i)
|
if (j < i)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
|
arm_smmu_write_strtab_ent(master, sid, step);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_detach_dev(struct device *dev)
|
static int arm_smmu_enable_ats(struct arm_smmu_master *master)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
int ret;
|
||||||
struct arm_smmu_master_data *master = fwspec->iommu_priv;
|
size_t stu;
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
struct arm_smmu_device *smmu = master->smmu;
|
||||||
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
|
||||||
|
|
||||||
master->ste.assigned = false;
|
if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
|
||||||
arm_smmu_install_ste_for_dev(fwspec);
|
!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
pdev = to_pci_dev(master->dev);
|
||||||
|
if (pdev->untrusted)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
/* Smallest Translation Unit: log2 of the smallest supported granule */
|
||||||
|
stu = __ffs(smmu->pgsize_bitmap);
|
||||||
|
|
||||||
|
ret = pci_enable_ats(pdev, stu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
master->ats_enabled = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
|
||||||
|
{
|
||||||
|
if (!master->ats_enabled || !dev_is_pci(master->dev))
|
||||||
|
return;
|
||||||
|
|
||||||
|
pci_disable_ats(to_pci_dev(master->dev));
|
||||||
|
master->ats_enabled = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct arm_smmu_domain *smmu_domain = master->domain;
|
||||||
|
|
||||||
|
if (!smmu_domain)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||||
|
list_del(&master->domain_head);
|
||||||
|
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||||
|
|
||||||
|
master->domain = NULL;
|
||||||
|
arm_smmu_install_ste_for_dev(master);
|
||||||
|
|
||||||
|
/* Disabling ATS invalidates all ATC entries */
|
||||||
|
arm_smmu_disable_ats(master);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
unsigned long flags;
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu;
|
||||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_master_data *master;
|
struct arm_smmu_master *master;
|
||||||
struct arm_smmu_strtab_ent *ste;
|
|
||||||
|
|
||||||
if (!fwspec)
|
if (!fwspec)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
master = fwspec->iommu_priv;
|
master = fwspec->iommu_priv;
|
||||||
smmu = master->smmu;
|
smmu = master->smmu;
|
||||||
ste = &master->ste;
|
|
||||||
|
|
||||||
/* Already attached to a different domain? */
|
arm_smmu_detach_dev(master);
|
||||||
if (ste->assigned)
|
|
||||||
arm_smmu_detach_dev(dev);
|
|
||||||
|
|
||||||
mutex_lock(&smmu_domain->init_mutex);
|
mutex_lock(&smmu_domain->init_mutex);
|
||||||
|
|
||||||
|
@ -1756,21 +1945,19 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ste->assigned = true;
|
master->domain = smmu_domain;
|
||||||
|
|
||||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
|
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||||
ste->s1_cfg = NULL;
|
list_add(&master->domain_head, &smmu_domain->devices);
|
||||||
ste->s2_cfg = NULL;
|
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||||
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
|
|
||||||
ste->s1_cfg = &smmu_domain->s1_cfg;
|
|
||||||
ste->s2_cfg = NULL;
|
|
||||||
arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
|
|
||||||
} else {
|
|
||||||
ste->s1_cfg = NULL;
|
|
||||||
ste->s2_cfg = &smmu_domain->s2_cfg;
|
|
||||||
}
|
|
||||||
|
|
||||||
arm_smmu_install_ste_for_dev(fwspec);
|
if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
|
||||||
|
arm_smmu_enable_ats(master);
|
||||||
|
|
||||||
|
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
|
||||||
|
arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
|
||||||
|
|
||||||
|
arm_smmu_install_ste_for_dev(master);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&smmu_domain->init_mutex);
|
mutex_unlock(&smmu_domain->init_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1790,12 +1977,18 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
static size_t
|
static size_t
|
||||||
arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
||||||
{
|
{
|
||||||
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
|
int ret;
|
||||||
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
|
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
|
||||||
|
|
||||||
if (!ops)
|
if (!ops)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return ops->unmap(ops, iova, size);
|
ret = ops->unmap(ops, iova, size);
|
||||||
|
if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
|
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
|
||||||
|
@ -1860,7 +2053,7 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu;
|
||||||
struct arm_smmu_master_data *master;
|
struct arm_smmu_master *master;
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
|
|
||||||
|
@ -1882,13 +2075,16 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
if (!master)
|
if (!master)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
master->dev = dev;
|
||||||
master->smmu = smmu;
|
master->smmu = smmu;
|
||||||
|
master->sids = fwspec->ids;
|
||||||
|
master->num_sids = fwspec->num_ids;
|
||||||
fwspec->iommu_priv = master;
|
fwspec->iommu_priv = master;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check the SIDs are in range of the SMMU and our stream table */
|
/* Check the SIDs are in range of the SMMU and our stream table */
|
||||||
for (i = 0; i < fwspec->num_ids; i++) {
|
for (i = 0; i < master->num_sids; i++) {
|
||||||
u32 sid = fwspec->ids[i];
|
u32 sid = master->sids[i];
|
||||||
|
|
||||||
if (!arm_smmu_sid_in_range(smmu, sid))
|
if (!arm_smmu_sid_in_range(smmu, sid))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
@ -1913,7 +2109,7 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
static void arm_smmu_remove_device(struct device *dev)
|
static void arm_smmu_remove_device(struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct arm_smmu_master_data *master;
|
struct arm_smmu_master *master;
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu;
|
||||||
|
|
||||||
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
||||||
|
@ -1921,8 +2117,7 @@ static void arm_smmu_remove_device(struct device *dev)
|
||||||
|
|
||||||
master = fwspec->iommu_priv;
|
master = fwspec->iommu_priv;
|
||||||
smmu = master->smmu;
|
smmu = master->smmu;
|
||||||
if (master && master->ste.assigned)
|
arm_smmu_detach_dev(master);
|
||||||
arm_smmu_detach_dev(dev);
|
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
iommu_device_unlink(&smmu->iommu, dev);
|
iommu_device_unlink(&smmu->iommu, dev);
|
||||||
kfree(master);
|
kfree(master);
|
||||||
|
@ -2454,13 +2649,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
||||||
/* Clear CR0 and sync (disables SMMU and queue processing) */
|
/* Clear CR0 and sync (disables SMMU and queue processing) */
|
||||||
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
|
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
|
||||||
if (reg & CR0_SMMUEN) {
|
if (reg & CR0_SMMUEN) {
|
||||||
if (is_kdump_kernel()) {
|
|
||||||
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
|
|
||||||
arm_smmu_device_disable(smmu);
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
|
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
|
||||||
|
WARN_ON(is_kdump_kernel() && !disable_bypass);
|
||||||
|
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = arm_smmu_device_disable(smmu);
|
ret = arm_smmu_device_disable(smmu);
|
||||||
|
@ -2547,12 +2738,24 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (smmu->features & ARM_SMMU_FEAT_ATS) {
|
||||||
|
enables |= CR0_ATSCHK;
|
||||||
|
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
|
||||||
|
ARM_SMMU_CR0ACK);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(smmu->dev, "failed to enable ATS check\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret = arm_smmu_setup_irqs(smmu);
|
ret = arm_smmu_setup_irqs(smmu);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(smmu->dev, "failed to setup irqs\n");
|
dev_err(smmu->dev, "failed to setup irqs\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (is_kdump_kernel())
|
||||||
|
enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
|
||||||
|
|
||||||
/* Enable the SMMU interface, or ensure bypass */
|
/* Enable the SMMU interface, or ensure bypass */
|
||||||
if (!bypass || disable_bypass) {
|
if (!bypass || disable_bypass) {
|
||||||
|
|
|
@ -110,7 +110,8 @@ static int force_stage;
|
||||||
module_param(force_stage, int, S_IRUGO);
|
module_param(force_stage, int, S_IRUGO);
|
||||||
MODULE_PARM_DESC(force_stage,
|
MODULE_PARM_DESC(force_stage,
|
||||||
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
|
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
|
||||||
static bool disable_bypass;
|
static bool disable_bypass =
|
||||||
|
IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
|
||||||
module_param(disable_bypass, bool, S_IRUGO);
|
module_param(disable_bypass, bool, S_IRUGO);
|
||||||
MODULE_PARM_DESC(disable_bypass,
|
MODULE_PARM_DESC(disable_bypass,
|
||||||
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
|
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
|
||||||
|
@ -569,12 +570,13 @@ static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
|
||||||
|
|
||||||
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||||
{
|
{
|
||||||
u32 fsr, fsynr;
|
u32 fsr, fsynr, cbfrsynra;
|
||||||
unsigned long iova;
|
unsigned long iova;
|
||||||
struct iommu_domain *domain = dev;
|
struct iommu_domain *domain = dev;
|
||||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
|
void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
|
||||||
void __iomem *cb_base;
|
void __iomem *cb_base;
|
||||||
|
|
||||||
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
|
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||||
|
@ -585,10 +587,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||||
|
|
||||||
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
|
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
|
||||||
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
|
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
|
||||||
|
cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
|
||||||
|
|
||||||
dev_err_ratelimited(smmu->dev,
|
dev_err_ratelimited(smmu->dev,
|
||||||
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
|
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
|
||||||
fsr, iova, fsynr, cfg->cbndx);
|
fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
|
||||||
|
|
||||||
writel(fsr, cb_base + ARM_SMMU_CB_FSR);
|
writel(fsr, cb_base + ARM_SMMU_CB_FSR);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
|
@ -145,7 +145,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
|
||||||
for (tmp = dev; tmp; tmp = tmp->bus->self)
|
for (tmp = dev; tmp; tmp = tmp->bus->self)
|
||||||
level++;
|
level++;
|
||||||
|
|
||||||
size = sizeof(*info) + level * sizeof(info->path[0]);
|
size = struct_size(info, path, level);
|
||||||
if (size <= sizeof(dmar_pci_notify_info_buf)) {
|
if (size <= sizeof(dmar_pci_notify_info_buf)) {
|
||||||
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
|
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -2341,32 +2341,33 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||||
struct scatterlist *sg, unsigned long phys_pfn,
|
struct scatterlist *sg, unsigned long phys_pfn,
|
||||||
unsigned long nr_pages, int prot)
|
unsigned long nr_pages, int prot)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
/* Do the real mapping first */
|
/* Do the real mapping first */
|
||||||
ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
|
ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Notify about the new mapping */
|
/* Notify about the new mapping */
|
||||||
if (domain_type_is_vm(domain)) {
|
if (domain_type_is_vm(domain)) {
|
||||||
/* VM typed domains can have more than one IOMMUs */
|
/* VM typed domains can have more than one IOMMUs */
|
||||||
int iommu_id;
|
int iommu_id;
|
||||||
for_each_domain_iommu(iommu_id, domain) {
|
|
||||||
iommu = g_iommus[iommu_id];
|
|
||||||
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* General domains only have one IOMMU */
|
|
||||||
iommu = domain_get_iommu(domain);
|
|
||||||
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
for_each_domain_iommu(iommu_id, domain) {
|
||||||
|
iommu = g_iommus[iommu_id];
|
||||||
|
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* General domains only have one IOMMU */
|
||||||
|
iommu = domain_get_iommu(domain);
|
||||||
|
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||||
|
@ -2485,6 +2486,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
||||||
info->domain = domain;
|
info->domain = domain;
|
||||||
info->iommu = iommu;
|
info->iommu = iommu;
|
||||||
info->pasid_table = NULL;
|
info->pasid_table = NULL;
|
||||||
|
info->auxd_enabled = 0;
|
||||||
|
INIT_LIST_HEAD(&info->auxiliary_domains);
|
||||||
|
|
||||||
if (dev && dev_is_pci(dev)) {
|
if (dev && dev_is_pci(dev)) {
|
||||||
struct pci_dev *pdev = to_pci_dev(info->dev);
|
struct pci_dev *pdev = to_pci_dev(info->dev);
|
||||||
|
@ -3412,9 +3415,12 @@ static int __init init_dmars(void)
|
||||||
iommu_identity_mapping |= IDENTMAP_ALL;
|
iommu_identity_mapping |= IDENTMAP_ALL;
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
|
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
|
||||||
iommu_identity_mapping |= IDENTMAP_GFX;
|
dmar_map_gfx = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if (!dmar_map_gfx)
|
||||||
|
iommu_identity_mapping |= IDENTMAP_GFX;
|
||||||
|
|
||||||
check_tylersburg_isoch();
|
check_tylersburg_isoch();
|
||||||
|
|
||||||
if (iommu_identity_mapping) {
|
if (iommu_identity_mapping) {
|
||||||
|
@ -3496,7 +3502,13 @@ domains_done:
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||||
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
|
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
|
||||||
|
/*
|
||||||
|
* Call dmar_alloc_hwirq() with dmar_global_lock held,
|
||||||
|
* could cause possible lock race condition.
|
||||||
|
*/
|
||||||
|
up_write(&dmar_global_lock);
|
||||||
ret = intel_svm_enable_prq(iommu);
|
ret = intel_svm_enable_prq(iommu);
|
||||||
|
down_write(&dmar_global_lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_iommu;
|
goto free_iommu;
|
||||||
}
|
}
|
||||||
|
@ -3606,45 +3618,40 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if the dev needs to go through non-identity map and unmap process.*/
|
/* Check if the dev needs to go through non-identity map and unmap process.*/
|
||||||
static int iommu_no_mapping(struct device *dev)
|
static bool iommu_need_mapping(struct device *dev)
|
||||||
{
|
{
|
||||||
int found;
|
int found;
|
||||||
|
|
||||||
if (iommu_dummy(dev))
|
if (iommu_dummy(dev))
|
||||||
return 1;
|
return false;
|
||||||
|
|
||||||
if (!iommu_identity_mapping)
|
if (!iommu_identity_mapping)
|
||||||
return 0;
|
return true;
|
||||||
|
|
||||||
found = identity_mapping(dev);
|
found = identity_mapping(dev);
|
||||||
if (found) {
|
if (found) {
|
||||||
if (iommu_should_identity_map(dev, 0))
|
if (iommu_should_identity_map(dev, 0))
|
||||||
return 1;
|
return false;
|
||||||
else {
|
|
||||||
/*
|
/*
|
||||||
* 32 bit DMA is removed from si_domain and fall back
|
* 32 bit DMA is removed from si_domain and fall back to
|
||||||
* to non-identity mapping.
|
* non-identity mapping.
|
||||||
*/
|
*/
|
||||||
dmar_remove_one_dev_info(dev);
|
dmar_remove_one_dev_info(dev);
|
||||||
dev_info(dev, "32bit DMA uses non-identity mapping\n");
|
dev_info(dev, "32bit DMA uses non-identity mapping\n");
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* In case of a detached 64 bit DMA device from vm, the device
|
* In case of a detached 64 bit DMA device from vm, the device
|
||||||
* is put into si_domain for identity mapping.
|
* is put into si_domain for identity mapping.
|
||||||
*/
|
*/
|
||||||
if (iommu_should_identity_map(dev, 0)) {
|
if (iommu_should_identity_map(dev, 0) &&
|
||||||
int ret;
|
!domain_add_dev_info(si_domain, dev)) {
|
||||||
ret = domain_add_dev_info(si_domain, dev);
|
dev_info(dev, "64bit DMA uses identity mapping\n");
|
||||||
if (!ret) {
|
return false;
|
||||||
dev_info(dev, "64bit DMA uses identity mapping\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
||||||
|
@ -3660,9 +3667,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
if (iommu_no_mapping(dev))
|
|
||||||
return paddr;
|
|
||||||
|
|
||||||
domain = get_valid_domain_for_dev(dev);
|
domain = get_valid_domain_for_dev(dev);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return DMA_MAPPING_ERROR;
|
return DMA_MAPPING_ERROR;
|
||||||
|
@ -3711,15 +3715,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
|
||||||
enum dma_data_direction dir,
|
enum dma_data_direction dir,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
return __intel_map_single(dev, page_to_phys(page) + offset, size,
|
if (iommu_need_mapping(dev))
|
||||||
dir, *dev->dma_mask);
|
return __intel_map_single(dev, page_to_phys(page) + offset,
|
||||||
|
size, dir, *dev->dma_mask);
|
||||||
|
return dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
|
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
|
if (iommu_need_mapping(dev))
|
||||||
|
return __intel_map_single(dev, phys_addr, size, dir,
|
||||||
|
*dev->dma_mask);
|
||||||
|
return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|
||||||
|
@ -3730,9 +3739,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|
||||||
unsigned long iova_pfn;
|
unsigned long iova_pfn;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
struct page *freelist;
|
struct page *freelist;
|
||||||
|
struct pci_dev *pdev = NULL;
|
||||||
if (iommu_no_mapping(dev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
domain = find_domain(dev);
|
domain = find_domain(dev);
|
||||||
BUG_ON(!domain);
|
BUG_ON(!domain);
|
||||||
|
@ -3745,11 +3752,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|
||||||
start_pfn = mm_to_dma_pfn(iova_pfn);
|
start_pfn = mm_to_dma_pfn(iova_pfn);
|
||||||
last_pfn = start_pfn + nrpages - 1;
|
last_pfn = start_pfn + nrpages - 1;
|
||||||
|
|
||||||
|
if (dev_is_pci(dev))
|
||||||
|
pdev = to_pci_dev(dev);
|
||||||
|
|
||||||
dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
|
dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
|
||||||
|
|
||||||
freelist = domain_unmap(domain, start_pfn, last_pfn);
|
freelist = domain_unmap(domain, start_pfn, last_pfn);
|
||||||
|
|
||||||
if (intel_iommu_strict) {
|
if (intel_iommu_strict || (pdev && pdev->untrusted)) {
|
||||||
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
|
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
|
||||||
nrpages, !freelist, 0);
|
nrpages, !freelist, 0);
|
||||||
/* free iova */
|
/* free iova */
|
||||||
|
@ -3769,7 +3779,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
intel_unmap(dev, dev_addr, size);
|
if (iommu_need_mapping(dev))
|
||||||
|
intel_unmap(dev, dev_addr, size);
|
||||||
|
else
|
||||||
|
dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
|
||||||
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
if (iommu_need_mapping(dev))
|
||||||
|
intel_unmap(dev, dev_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *intel_alloc_coherent(struct device *dev, size_t size,
|
static void *intel_alloc_coherent(struct device *dev, size_t size,
|
||||||
|
@ -3779,28 +3799,17 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
|
if (!iommu_need_mapping(dev))
|
||||||
|
return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
|
|
||||||
if (!iommu_no_mapping(dev))
|
|
||||||
flags &= ~(GFP_DMA | GFP_DMA32);
|
|
||||||
else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
|
|
||||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
|
|
||||||
flags |= GFP_DMA;
|
|
||||||
else
|
|
||||||
flags |= GFP_DMA32;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (gfpflags_allow_blocking(flags)) {
|
if (gfpflags_allow_blocking(flags)) {
|
||||||
unsigned int count = size >> PAGE_SHIFT;
|
unsigned int count = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
page = dma_alloc_from_contiguous(dev, count, order,
|
page = dma_alloc_from_contiguous(dev, count, order,
|
||||||
flags & __GFP_NOWARN);
|
flags & __GFP_NOWARN);
|
||||||
if (page && iommu_no_mapping(dev) &&
|
|
||||||
page_to_phys(page) + size > dev->coherent_dma_mask) {
|
|
||||||
dma_release_from_contiguous(dev, page, count);
|
|
||||||
page = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
|
@ -3826,6 +3835,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||||
int order;
|
int order;
|
||||||
struct page *page = virt_to_page(vaddr);
|
struct page *page = virt_to_page(vaddr);
|
||||||
|
|
||||||
|
if (!iommu_need_mapping(dev))
|
||||||
|
return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
|
|
||||||
|
@ -3843,6 +3855,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!iommu_need_mapping(dev))
|
||||||
|
return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
|
||||||
|
|
||||||
for_each_sg(sglist, sg, nelems, i) {
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
|
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
|
||||||
}
|
}
|
||||||
|
@ -3850,20 +3865,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
|
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_nontranslate_map_sg(struct device *hddev,
|
|
||||||
struct scatterlist *sglist, int nelems, int dir)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
|
|
||||||
for_each_sg(sglist, sg, nelems, i) {
|
|
||||||
BUG_ON(!sg_page(sg));
|
|
||||||
sg->dma_address = sg_phys(sg);
|
|
||||||
sg->dma_length = sg->length;
|
|
||||||
}
|
|
||||||
return nelems;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
|
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
|
||||||
enum dma_data_direction dir, unsigned long attrs)
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
|
@ -3878,8 +3879,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (iommu_no_mapping(dev))
|
if (!iommu_need_mapping(dev))
|
||||||
return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
|
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
|
||||||
|
|
||||||
domain = get_valid_domain_for_dev(dev);
|
domain = get_valid_domain_for_dev(dev);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
|
@ -3929,7 +3930,7 @@ static const struct dma_map_ops intel_dma_ops = {
|
||||||
.map_page = intel_map_page,
|
.map_page = intel_map_page,
|
||||||
.unmap_page = intel_unmap_page,
|
.unmap_page = intel_unmap_page,
|
||||||
.map_resource = intel_map_resource,
|
.map_resource = intel_map_resource,
|
||||||
.unmap_resource = intel_unmap_page,
|
.unmap_resource = intel_unmap_resource,
|
||||||
.dma_supported = dma_direct_supported,
|
.dma_supported = dma_direct_supported,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4055,9 +4056,7 @@ static void __init init_no_remapping_devices(void)
|
||||||
|
|
||||||
/* This IOMMU has *only* gfx devices. Either bypass it or
|
/* This IOMMU has *only* gfx devices. Either bypass it or
|
||||||
set the gfx_mapped flag, as appropriate */
|
set the gfx_mapped flag, as appropriate */
|
||||||
if (dmar_map_gfx) {
|
if (!dmar_map_gfx) {
|
||||||
intel_iommu_gfx_mapped = 1;
|
|
||||||
} else {
|
|
||||||
drhd->ignored = 1;
|
drhd->ignored = 1;
|
||||||
for_each_active_dev_scope(drhd->devices,
|
for_each_active_dev_scope(drhd->devices,
|
||||||
drhd->devices_cnt, i, dev)
|
drhd->devices_cnt, i, dev)
|
||||||
|
@ -4086,7 +4085,7 @@ static int init_iommu_hw(void)
|
||||||
iommu_disable_protect_mem_regions(iommu);
|
iommu_disable_protect_mem_regions(iommu);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
iommu_flush_write_buffer(iommu);
|
iommu_flush_write_buffer(iommu);
|
||||||
|
|
||||||
iommu_set_root_entry(iommu);
|
iommu_set_root_entry(iommu);
|
||||||
|
@ -4896,6 +4895,9 @@ int __init intel_iommu_init(void)
|
||||||
goto out_free_reserved_range;
|
goto out_free_reserved_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dmar_map_gfx)
|
||||||
|
intel_iommu_gfx_mapped = 1;
|
||||||
|
|
||||||
init_no_remapping_devices();
|
init_no_remapping_devices();
|
||||||
|
|
||||||
ret = init_dmars();
|
ret = init_dmars();
|
||||||
|
@ -5065,35 +5067,139 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
|
||||||
domain_exit(to_dmar_domain(domain));
|
domain_exit(to_dmar_domain(domain));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_iommu_attach_device(struct iommu_domain *domain,
|
/*
|
||||||
struct device *dev)
|
* Check whether a @domain could be attached to the @dev through the
|
||||||
|
* aux-domain attach/detach APIs.
|
||||||
|
*/
|
||||||
|
static inline bool
|
||||||
|
is_aux_domain(struct device *dev, struct iommu_domain *domain)
|
||||||
|
{
|
||||||
|
struct device_domain_info *info = dev->archdata.iommu;
|
||||||
|
|
||||||
|
return info && info->auxd_enabled &&
|
||||||
|
domain->type == IOMMU_DOMAIN_UNMANAGED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void auxiliary_link_device(struct dmar_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
struct device_domain_info *info = dev->archdata.iommu;
|
||||||
|
|
||||||
|
assert_spin_locked(&device_domain_lock);
|
||||||
|
if (WARN_ON(!info))
|
||||||
|
return;
|
||||||
|
|
||||||
|
domain->auxd_refcnt++;
|
||||||
|
list_add(&domain->auxd, &info->auxiliary_domains);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void auxiliary_unlink_device(struct dmar_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
struct device_domain_info *info = dev->archdata.iommu;
|
||||||
|
|
||||||
|
assert_spin_locked(&device_domain_lock);
|
||||||
|
if (WARN_ON(!info))
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_del(&domain->auxd);
|
||||||
|
domain->auxd_refcnt--;
|
||||||
|
|
||||||
|
if (!domain->auxd_refcnt && domain->default_pasid > 0)
|
||||||
|
intel_pasid_free_id(domain->default_pasid);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int aux_domain_add_dev(struct dmar_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u8 bus, devfn;
|
||||||
|
unsigned long flags;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
|
iommu = device_to_iommu(dev, &bus, &devfn);
|
||||||
|
if (!iommu)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (domain->default_pasid <= 0) {
|
||||||
|
int pasid;
|
||||||
|
|
||||||
|
pasid = intel_pasid_alloc_id(domain, PASID_MIN,
|
||||||
|
pci_max_pasids(to_pci_dev(dev)),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (pasid <= 0) {
|
||||||
|
pr_err("Can't allocate default pasid\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
domain->default_pasid = pasid;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&device_domain_lock, flags);
|
||||||
|
/*
|
||||||
|
* iommu->lock must be held to attach domain to iommu and setup the
|
||||||
|
* pasid entry for second level translation.
|
||||||
|
*/
|
||||||
|
spin_lock(&iommu->lock);
|
||||||
|
ret = domain_attach_iommu(domain, iommu);
|
||||||
|
if (ret)
|
||||||
|
goto attach_failed;
|
||||||
|
|
||||||
|
/* Setup the PASID entry for mediated devices: */
|
||||||
|
ret = intel_pasid_setup_second_level(iommu, domain, dev,
|
||||||
|
domain->default_pasid);
|
||||||
|
if (ret)
|
||||||
|
goto table_failed;
|
||||||
|
spin_unlock(&iommu->lock);
|
||||||
|
|
||||||
|
auxiliary_link_device(domain, dev);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
table_failed:
|
||||||
|
domain_detach_iommu(domain, iommu);
|
||||||
|
attach_failed:
|
||||||
|
spin_unlock(&iommu->lock);
|
||||||
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
if (!domain->auxd_refcnt && domain->default_pasid > 0)
|
||||||
|
intel_pasid_free_id(domain->default_pasid);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void aux_domain_remove_dev(struct dmar_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
struct device_domain_info *info;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!is_aux_domain(dev, &domain->domain))
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&device_domain_lock, flags);
|
||||||
|
info = dev->archdata.iommu;
|
||||||
|
iommu = info->iommu;
|
||||||
|
|
||||||
|
auxiliary_unlink_device(domain, dev);
|
||||||
|
|
||||||
|
spin_lock(&iommu->lock);
|
||||||
|
intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
|
||||||
|
domain_detach_iommu(domain, iommu);
|
||||||
|
spin_unlock(&iommu->lock);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int prepare_domain_attach_device(struct iommu_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
int addr_width;
|
int addr_width;
|
||||||
u8 bus, devfn;
|
u8 bus, devfn;
|
||||||
|
|
||||||
if (device_is_rmrr_locked(dev)) {
|
|
||||||
dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
|
|
||||||
return -EPERM;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* normally dev is not mapped */
|
|
||||||
if (unlikely(domain_context_mapped(dev))) {
|
|
||||||
struct dmar_domain *old_domain;
|
|
||||||
|
|
||||||
old_domain = find_domain(dev);
|
|
||||||
if (old_domain) {
|
|
||||||
rcu_read_lock();
|
|
||||||
dmar_remove_one_dev_info(dev);
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
if (!domain_type_is_vm_or_si(old_domain) &&
|
|
||||||
list_empty(&old_domain->devices))
|
|
||||||
domain_exit(old_domain);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
iommu = device_to_iommu(dev, &bus, &devfn);
|
iommu = device_to_iommu(dev, &bus, &devfn);
|
||||||
if (!iommu)
|
if (!iommu)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -5126,7 +5232,58 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
|
||||||
dmar_domain->agaw--;
|
dmar_domain->agaw--;
|
||||||
}
|
}
|
||||||
|
|
||||||
return domain_add_dev_info(dmar_domain, dev);
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_iommu_attach_device(struct iommu_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (device_is_rmrr_locked(dev)) {
|
||||||
|
dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_aux_domain(dev, domain))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
/* normally dev is not mapped */
|
||||||
|
if (unlikely(domain_context_mapped(dev))) {
|
||||||
|
struct dmar_domain *old_domain;
|
||||||
|
|
||||||
|
old_domain = find_domain(dev);
|
||||||
|
if (old_domain) {
|
||||||
|
rcu_read_lock();
|
||||||
|
dmar_remove_one_dev_info(dev);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (!domain_type_is_vm_or_si(old_domain) &&
|
||||||
|
list_empty(&old_domain->devices))
|
||||||
|
domain_exit(old_domain);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = prepare_domain_attach_device(domain, dev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return domain_add_dev_info(to_dmar_domain(domain), dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!is_aux_domain(dev, domain))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
ret = prepare_domain_attach_device(domain, dev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return aux_domain_add_dev(to_dmar_domain(domain), dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_iommu_detach_device(struct iommu_domain *domain,
|
static void intel_iommu_detach_device(struct iommu_domain *domain,
|
||||||
|
@ -5135,6 +5292,12 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
|
||||||
dmar_remove_one_dev_info(dev);
|
dmar_remove_one_dev_info(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
aux_domain_remove_dev(to_dmar_domain(domain), dev);
|
||||||
|
}
|
||||||
|
|
||||||
static int intel_iommu_map(struct iommu_domain *domain,
|
static int intel_iommu_map(struct iommu_domain *domain,
|
||||||
unsigned long iova, phys_addr_t hpa,
|
unsigned long iova, phys_addr_t hpa,
|
||||||
size_t size, int iommu_prot)
|
size_t size, int iommu_prot)
|
||||||
|
@ -5223,6 +5386,42 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
return phys;
|
return phys;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool scalable_mode_support(void)
|
||||||
|
{
|
||||||
|
struct dmar_drhd_unit *drhd;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for_each_active_iommu(iommu, drhd) {
|
||||||
|
if (!sm_supported(iommu)) {
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool iommu_pasid_support(void)
|
||||||
|
{
|
||||||
|
struct dmar_drhd_unit *drhd;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for_each_active_iommu(iommu, drhd) {
|
||||||
|
if (!pasid_supported(iommu)) {
|
||||||
|
ret = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static bool intel_iommu_capable(enum iommu_cap cap)
|
static bool intel_iommu_capable(enum iommu_cap cap)
|
||||||
{
|
{
|
||||||
if (cap == IOMMU_CAP_CACHE_COHERENCY)
|
if (cap == IOMMU_CAP_CACHE_COHERENCY)
|
||||||
|
@ -5307,8 +5506,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
|
||||||
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
|
|
||||||
{
|
{
|
||||||
struct device_domain_info *info;
|
struct device_domain_info *info;
|
||||||
struct context_entry *context;
|
struct context_entry *context;
|
||||||
|
@ -5317,7 +5515,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
|
||||||
u64 ctx_lo;
|
u64 ctx_lo;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
domain = get_valid_domain_for_dev(sdev->dev);
|
domain = get_valid_domain_for_dev(dev);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -5325,7 +5523,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
|
||||||
spin_lock(&iommu->lock);
|
spin_lock(&iommu->lock);
|
||||||
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
info = sdev->dev->archdata.iommu;
|
info = dev->archdata.iommu;
|
||||||
if (!info || !info->pasid_supported)
|
if (!info || !info->pasid_supported)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -5335,14 +5533,13 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
|
||||||
|
|
||||||
ctx_lo = context[0].lo;
|
ctx_lo = context[0].lo;
|
||||||
|
|
||||||
sdev->did = FLPT_DEFAULT_DID;
|
|
||||||
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
|
||||||
|
|
||||||
if (!(ctx_lo & CONTEXT_PASIDE)) {
|
if (!(ctx_lo & CONTEXT_PASIDE)) {
|
||||||
ctx_lo |= CONTEXT_PASIDE;
|
ctx_lo |= CONTEXT_PASIDE;
|
||||||
context[0].lo = ctx_lo;
|
context[0].lo = ctx_lo;
|
||||||
wmb();
|
wmb();
|
||||||
iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
|
iommu->flush.flush_context(iommu,
|
||||||
|
domain->iommu_did[iommu->seq_id],
|
||||||
|
PCI_DEVID(info->bus, info->devfn),
|
||||||
DMA_CCMD_MASK_NOBIT,
|
DMA_CCMD_MASK_NOBIT,
|
||||||
DMA_CCMD_DEVICE_INVL);
|
DMA_CCMD_DEVICE_INVL);
|
||||||
}
|
}
|
||||||
|
@ -5351,12 +5548,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
|
||||||
if (!info->pasid_enabled)
|
if (!info->pasid_enabled)
|
||||||
iommu_enable_dev_iotlb(info);
|
iommu_enable_dev_iotlb(info);
|
||||||
|
|
||||||
if (info->ats_enabled) {
|
|
||||||
sdev->dev_iotlb = 1;
|
|
||||||
sdev->qdep = info->ats_qdep;
|
|
||||||
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
|
|
||||||
sdev->qdep = 0;
|
|
||||||
}
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -5366,6 +5557,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||||
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
|
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
|
||||||
{
|
{
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
@ -5387,12 +5579,142 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_INTEL_IOMMU_SVM */
|
#endif /* CONFIG_INTEL_IOMMU_SVM */
|
||||||
|
|
||||||
|
static int intel_iommu_enable_auxd(struct device *dev)
|
||||||
|
{
|
||||||
|
struct device_domain_info *info;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
unsigned long flags;
|
||||||
|
u8 bus, devfn;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
iommu = device_to_iommu(dev, &bus, &devfn);
|
||||||
|
if (!iommu || dmar_disabled)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!sm_supported(iommu) || !pasid_supported(iommu))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = intel_iommu_enable_pasid(iommu, dev);
|
||||||
|
if (ret)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&device_domain_lock, flags);
|
||||||
|
info = dev->archdata.iommu;
|
||||||
|
info->auxd_enabled = 1;
|
||||||
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_iommu_disable_auxd(struct device *dev)
|
||||||
|
{
|
||||||
|
struct device_domain_info *info;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&device_domain_lock, flags);
|
||||||
|
info = dev->archdata.iommu;
|
||||||
|
if (!WARN_ON(!info))
|
||||||
|
info->auxd_enabled = 0;
|
||||||
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A PCI express designated vendor specific extended capability is defined
|
||||||
|
* in the section 3.7 of Intel scalable I/O virtualization technical spec
|
||||||
|
* for system software and tools to detect endpoint devices supporting the
|
||||||
|
* Intel scalable IO virtualization without host driver dependency.
|
||||||
|
*
|
||||||
|
* Returns the address of the matching extended capability structure within
|
||||||
|
* the device's PCI configuration space or 0 if the device does not support
|
||||||
|
* it.
|
||||||
|
*/
|
||||||
|
static int siov_find_pci_dvsec(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
int pos;
|
||||||
|
u16 vendor, id;
|
||||||
|
|
||||||
|
pos = pci_find_next_ext_capability(pdev, 0, 0x23);
|
||||||
|
while (pos) {
|
||||||
|
pci_read_config_word(pdev, pos + 4, &vendor);
|
||||||
|
pci_read_config_word(pdev, pos + 8, &id);
|
||||||
|
if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
|
||||||
|
return pos;
|
||||||
|
|
||||||
|
pos = pci_find_next_ext_capability(pdev, pos, 0x23);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
if (feat == IOMMU_DEV_FEAT_AUX) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!dev_is_pci(dev) || dmar_disabled ||
|
||||||
|
!scalable_mode_support() || !iommu_pasid_support())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ret = pci_pasid_features(to_pci_dev(dev));
|
||||||
|
if (ret < 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return !!siov_find_pci_dvsec(to_pci_dev(dev));
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
if (feat == IOMMU_DEV_FEAT_AUX)
|
||||||
|
return intel_iommu_enable_auxd(dev);
|
||||||
|
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
if (feat == IOMMU_DEV_FEAT_AUX)
|
||||||
|
return intel_iommu_disable_auxd(dev);
|
||||||
|
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
struct device_domain_info *info = dev->archdata.iommu;
|
||||||
|
|
||||||
|
if (feat == IOMMU_DEV_FEAT_AUX)
|
||||||
|
return scalable_mode_support() && info && info->auxd_enabled;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||||
|
{
|
||||||
|
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||||
|
|
||||||
|
return dmar_domain->default_pasid > 0 ?
|
||||||
|
dmar_domain->default_pasid : -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
const struct iommu_ops intel_iommu_ops = {
|
const struct iommu_ops intel_iommu_ops = {
|
||||||
.capable = intel_iommu_capable,
|
.capable = intel_iommu_capable,
|
||||||
.domain_alloc = intel_iommu_domain_alloc,
|
.domain_alloc = intel_iommu_domain_alloc,
|
||||||
.domain_free = intel_iommu_domain_free,
|
.domain_free = intel_iommu_domain_free,
|
||||||
.attach_dev = intel_iommu_attach_device,
|
.attach_dev = intel_iommu_attach_device,
|
||||||
.detach_dev = intel_iommu_detach_device,
|
.detach_dev = intel_iommu_detach_device,
|
||||||
|
.aux_attach_dev = intel_iommu_aux_attach_device,
|
||||||
|
.aux_detach_dev = intel_iommu_aux_detach_device,
|
||||||
|
.aux_get_pasid = intel_iommu_aux_get_pasid,
|
||||||
.map = intel_iommu_map,
|
.map = intel_iommu_map,
|
||||||
.unmap = intel_iommu_unmap,
|
.unmap = intel_iommu_unmap,
|
||||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||||
|
@ -5401,6 +5723,10 @@ const struct iommu_ops intel_iommu_ops = {
|
||||||
.get_resv_regions = intel_iommu_get_resv_regions,
|
.get_resv_regions = intel_iommu_get_resv_regions,
|
||||||
.put_resv_regions = intel_iommu_put_resv_regions,
|
.put_resv_regions = intel_iommu_put_resv_regions,
|
||||||
.device_group = pci_device_group,
|
.device_group = pci_device_group,
|
||||||
|
.dev_has_feat = intel_iommu_dev_has_feat,
|
||||||
|
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
|
||||||
|
.dev_enable_feat = intel_iommu_dev_enable_feat,
|
||||||
|
.dev_disable_feat = intel_iommu_dev_disable_feat,
|
||||||
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -154,8 +154,10 @@ int intel_pasid_alloc_table(struct device *dev)
|
||||||
order = size ? get_order(size) : 0;
|
order = size ? get_order(size) : 0;
|
||||||
pages = alloc_pages_node(info->iommu->node,
|
pages = alloc_pages_node(info->iommu->node,
|
||||||
GFP_KERNEL | __GFP_ZERO, order);
|
GFP_KERNEL | __GFP_ZERO, order);
|
||||||
if (!pages)
|
if (!pages) {
|
||||||
|
kfree(pasid_table);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
pasid_table->table = page_address(pages);
|
pasid_table->table = page_address(pages);
|
||||||
pasid_table->order = order;
|
pasid_table->order = order;
|
||||||
|
|
|
@ -228,6 +228,7 @@ static LIST_HEAD(global_svm_list);
|
||||||
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
|
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
|
||||||
{
|
{
|
||||||
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
|
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
|
||||||
|
struct device_domain_info *info;
|
||||||
struct intel_svm_dev *sdev;
|
struct intel_svm_dev *sdev;
|
||||||
struct intel_svm *svm = NULL;
|
struct intel_svm *svm = NULL;
|
||||||
struct mm_struct *mm = NULL;
|
struct mm_struct *mm = NULL;
|
||||||
|
@ -291,13 +292,29 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
||||||
}
|
}
|
||||||
sdev->dev = dev;
|
sdev->dev = dev;
|
||||||
|
|
||||||
ret = intel_iommu_enable_pasid(iommu, sdev);
|
ret = intel_iommu_enable_pasid(iommu, dev);
|
||||||
if (ret || !pasid) {
|
if (ret || !pasid) {
|
||||||
/* If they don't actually want to assign a PASID, this is
|
/* If they don't actually want to assign a PASID, this is
|
||||||
* just an enabling check/preparation. */
|
* just an enabling check/preparation. */
|
||||||
kfree(sdev);
|
kfree(sdev);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info = dev->archdata.iommu;
|
||||||
|
if (!info || !info->pasid_supported) {
|
||||||
|
kfree(sdev);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
sdev->did = FLPT_DEFAULT_DID;
|
||||||
|
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
||||||
|
if (info->ats_enabled) {
|
||||||
|
sdev->dev_iotlb = 1;
|
||||||
|
sdev->qdep = info->ats_qdep;
|
||||||
|
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
|
||||||
|
sdev->qdep = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Finish the setup now we know we're keeping it */
|
/* Finish the setup now we know we're keeping it */
|
||||||
sdev->users = 1;
|
sdev->users = 1;
|
||||||
sdev->ops = ops;
|
sdev->ops = ops;
|
||||||
|
|
|
@ -548,8 +548,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||||
goto out_free_table;
|
goto out_free_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
|
bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
|
||||||
sizeof(long), GFP_ATOMIC);
|
|
||||||
if (bitmap == NULL) {
|
if (bitmap == NULL) {
|
||||||
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
|
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
|
@ -616,7 +615,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_bitmap:
|
out_free_bitmap:
|
||||||
kfree(bitmap);
|
bitmap_free(bitmap);
|
||||||
out_free_pages:
|
out_free_pages:
|
||||||
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
|
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
|
||||||
out_free_table:
|
out_free_table:
|
||||||
|
@ -640,7 +639,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
|
||||||
}
|
}
|
||||||
free_pages((unsigned long)iommu->ir_table->base,
|
free_pages((unsigned long)iommu->ir_table->base,
|
||||||
INTR_REMAP_PAGE_ORDER);
|
INTR_REMAP_PAGE_ORDER);
|
||||||
kfree(iommu->ir_table->bitmap);
|
bitmap_free(iommu->ir_table->bitmap);
|
||||||
kfree(iommu->ir_table);
|
kfree(iommu->ir_table);
|
||||||
iommu->ir_table = NULL;
|
iommu->ir_table = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,10 +45,6 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
|
||||||
#endif
|
#endif
|
||||||
static bool iommu_dma_strict __read_mostly = true;
|
static bool iommu_dma_strict __read_mostly = true;
|
||||||
|
|
||||||
struct iommu_callback_data {
|
|
||||||
const struct iommu_ops *ops;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct iommu_group {
|
struct iommu_group {
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
struct kobject *devices_kobj;
|
struct kobject *devices_kobj;
|
||||||
|
@ -1217,9 +1213,6 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct notifier_block *nb;
|
struct notifier_block *nb;
|
||||||
struct iommu_callback_data cb = {
|
|
||||||
.ops = ops,
|
|
||||||
};
|
|
||||||
|
|
||||||
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
|
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
|
||||||
if (!nb)
|
if (!nb)
|
||||||
|
@ -1231,7 +1224,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
|
err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
|
@ -1240,7 +1233,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
/* Clean up */
|
/* Clean up */
|
||||||
bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
|
bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
|
||||||
bus_unregister_notifier(bus, nb);
|
bus_unregister_notifier(bus, nb);
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
@ -2039,3 +2032,203 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
|
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Per device IOMMU features.
|
||||||
|
*/
|
||||||
|
bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
|
if (ops && ops->dev_has_feat)
|
||||||
|
return ops->dev_has_feat(dev, feat);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
|
||||||
|
|
||||||
|
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
|
if (ops && ops->dev_enable_feat)
|
||||||
|
return ops->dev_enable_feat(dev, feat);
|
||||||
|
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The device drivers should do the necessary cleanups before calling this.
|
||||||
|
* For example, before disabling the aux-domain feature, the device driver
|
||||||
|
* should detach all aux-domains. Otherwise, this will return -EBUSY.
|
||||||
|
*/
|
||||||
|
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
|
if (ops && ops->dev_disable_feat)
|
||||||
|
return ops->dev_disable_feat(dev, feat);
|
||||||
|
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
|
||||||
|
|
||||||
|
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
|
if (ops && ops->dev_feat_enabled)
|
||||||
|
return ops->dev_feat_enabled(dev, feat);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Aux-domain specific attach/detach.
|
||||||
|
*
|
||||||
|
* Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
|
||||||
|
* true. Also, as long as domains are attached to a device through this
|
||||||
|
* interface, any tries to call iommu_attach_device() should fail
|
||||||
|
* (iommu_detach_device() can't fail, so we fail when trying to re-attach).
|
||||||
|
* This should make us safe against a device being attached to a guest as a
|
||||||
|
* whole while there are still pasid users on it (aux and sva).
|
||||||
|
*/
|
||||||
|
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||||
|
{
|
||||||
|
int ret = -ENODEV;
|
||||||
|
|
||||||
|
if (domain->ops->aux_attach_dev)
|
||||||
|
ret = domain->ops->aux_attach_dev(domain, dev);
|
||||||
|
|
||||||
|
if (!ret)
|
||||||
|
trace_attach_device_to_domain(dev);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
|
||||||
|
|
||||||
|
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||||
|
{
|
||||||
|
if (domain->ops->aux_detach_dev) {
|
||||||
|
domain->ops->aux_detach_dev(domain, dev);
|
||||||
|
trace_detach_device_from_domain(dev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
|
||||||
|
|
||||||
|
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||||
|
{
|
||||||
|
int ret = -ENODEV;
|
||||||
|
|
||||||
|
if (domain->ops->aux_get_pasid)
|
||||||
|
ret = domain->ops->aux_get_pasid(domain, dev);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iommu_sva_bind_device() - Bind a process address space to a device
|
||||||
|
* @dev: the device
|
||||||
|
* @mm: the mm to bind, caller must hold a reference to it
|
||||||
|
*
|
||||||
|
* Create a bond between device and address space, allowing the device to access
|
||||||
|
* the mm using the returned PASID. If a bond already exists between @device and
|
||||||
|
* @mm, it is returned and an additional reference is taken. Caller must call
|
||||||
|
* iommu_sva_unbind_device() to release each reference.
|
||||||
|
*
|
||||||
|
* iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
|
||||||
|
* initialize the required SVA features.
|
||||||
|
*
|
||||||
|
* On error, returns an ERR_PTR value.
|
||||||
|
*/
|
||||||
|
struct iommu_sva *
|
||||||
|
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||||
|
{
|
||||||
|
struct iommu_group *group;
|
||||||
|
struct iommu_sva *handle = ERR_PTR(-EINVAL);
|
||||||
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
|
if (!ops || !ops->sva_bind)
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
group = iommu_group_get(dev);
|
||||||
|
if (!group)
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
/* Ensure device count and domain don't change while we're binding */
|
||||||
|
mutex_lock(&group->mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To keep things simple, SVA currently doesn't support IOMMU groups
|
||||||
|
* with more than one device. Existing SVA-capable systems are not
|
||||||
|
* affected by the problems that required IOMMU groups (lack of ACS
|
||||||
|
* isolation, device ID aliasing and other hardware issues).
|
||||||
|
*/
|
||||||
|
if (iommu_group_device_count(group) != 1)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
handle = ops->sva_bind(dev, mm, drvdata);
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&group->mutex);
|
||||||
|
iommu_group_put(group);
|
||||||
|
|
||||||
|
return handle;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
|
||||||
|
* @handle: the handle returned by iommu_sva_bind_device()
|
||||||
|
*
|
||||||
|
* Put reference to a bond between device and address space. The device should
|
||||||
|
* not be issuing any more transaction for this PASID. All outstanding page
|
||||||
|
* requests for this PASID must have been flushed to the IOMMU.
|
||||||
|
*
|
||||||
|
* Returns 0 on success, or an error value
|
||||||
|
*/
|
||||||
|
void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||||
|
{
|
||||||
|
struct iommu_group *group;
|
||||||
|
struct device *dev = handle->dev;
|
||||||
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
|
|
||||||
|
if (!ops || !ops->sva_unbind)
|
||||||
|
return;
|
||||||
|
|
||||||
|
group = iommu_group_get(dev);
|
||||||
|
if (!group)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&group->mutex);
|
||||||
|
ops->sva_unbind(handle);
|
||||||
|
mutex_unlock(&group->mutex);
|
||||||
|
|
||||||
|
iommu_group_put(group);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
|
||||||
|
|
||||||
|
int iommu_sva_set_ops(struct iommu_sva *handle,
|
||||||
|
const struct iommu_sva_ops *sva_ops)
|
||||||
|
{
|
||||||
|
if (handle->ops && handle->ops != sva_ops)
|
||||||
|
return -EEXIST;
|
||||||
|
|
||||||
|
handle->ops = sva_ops;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
|
||||||
|
|
||||||
|
int iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
|
||||||
|
|
||||||
|
if (!ops || !ops->sva_get_pasid)
|
||||||
|
return IOMMU_PASID_INVALID;
|
||||||
|
|
||||||
|
return ops->sva_get_pasid(handle);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
|
||||||
|
|
|
@ -632,16 +632,20 @@ static int mtk_iommu_probe(struct platform_device *pdev)
|
||||||
if (!larbnode)
|
if (!larbnode)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!of_device_is_available(larbnode))
|
if (!of_device_is_available(larbnode)) {
|
||||||
|
of_node_put(larbnode);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
|
ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
|
||||||
if (ret)/* The id is consecutive if there is no this property */
|
if (ret)/* The id is consecutive if there is no this property */
|
||||||
id = i;
|
id = i;
|
||||||
|
|
||||||
plarbdev = of_find_device_by_node(larbnode);
|
plarbdev = of_find_device_by_node(larbnode);
|
||||||
if (!plarbdev)
|
if (!plarbdev) {
|
||||||
|
of_node_put(larbnode);
|
||||||
return -EPROBE_DEFER;
|
return -EPROBE_DEFER;
|
||||||
|
}
|
||||||
data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
|
data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
|
||||||
|
|
||||||
component_match_add_release(dev, &match, release_of,
|
component_match_add_release(dev, &match, release_of,
|
||||||
|
|
|
@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
|
||||||
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
|
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
|
||||||
#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
|
#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
|
||||||
#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
|
#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
|
||||||
#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
|
|
||||||
#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
|
#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
|
||||||
SMMU_TLB_FLUSH_VA_MATCH_SECTION)
|
SMMU_TLB_FLUSH_VA_MATCH_SECTION)
|
||||||
#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
|
#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
|
||||||
|
@ -146,8 +145,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
|
||||||
|
|
||||||
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
|
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
|
||||||
SMMU_PDE_NONSECURE)
|
SMMU_PDE_NONSECURE)
|
||||||
#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
|
|
||||||
SMMU_PTE_NONSECURE)
|
|
||||||
|
|
||||||
static unsigned int iova_pd_index(unsigned long iova)
|
static unsigned int iova_pd_index(unsigned long iova)
|
||||||
{
|
{
|
||||||
|
@ -205,8 +202,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
|
||||||
{
|
{
|
||||||
u32 value;
|
u32 value;
|
||||||
|
|
||||||
value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
|
if (smmu->soc->num_asids == 4)
|
||||||
SMMU_TLB_FLUSH_VA_MATCH_ALL;
|
value = (asid & 0x3) << 29;
|
||||||
|
else
|
||||||
|
value = (asid & 0x7f) << 24;
|
||||||
|
|
||||||
|
value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
|
||||||
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
|
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,8 +217,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
|
||||||
{
|
{
|
||||||
u32 value;
|
u32 value;
|
||||||
|
|
||||||
value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
|
if (smmu->soc->num_asids == 4)
|
||||||
SMMU_TLB_FLUSH_VA_SECTION(iova);
|
value = (asid & 0x3) << 29;
|
||||||
|
else
|
||||||
|
value = (asid & 0x7f) << 24;
|
||||||
|
|
||||||
|
value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
|
||||||
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
|
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,8 +232,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
|
||||||
{
|
{
|
||||||
u32 value;
|
u32 value;
|
||||||
|
|
||||||
value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
|
if (smmu->soc->num_asids == 4)
|
||||||
SMMU_TLB_FLUSH_VA_GROUP(iova);
|
value = (asid & 0x3) << 29;
|
||||||
|
else
|
||||||
|
value = (asid & 0x7f) << 24;
|
||||||
|
|
||||||
|
value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
|
||||||
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
|
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,6 +325,9 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
|
||||||
|
|
||||||
/* TODO: free page directory and page tables */
|
/* TODO: free page directory and page tables */
|
||||||
|
|
||||||
|
WARN_ON_ONCE(as->use_count);
|
||||||
|
kfree(as->count);
|
||||||
|
kfree(as->pts);
|
||||||
kfree(as);
|
kfree(as);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -645,6 +657,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as = to_smmu_as(domain);
|
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||||
dma_addr_t pte_dma;
|
dma_addr_t pte_dma;
|
||||||
|
u32 pte_attrs;
|
||||||
u32 *pte;
|
u32 *pte;
|
||||||
|
|
||||||
pte = as_get_pte(as, iova, &pte_dma);
|
pte = as_get_pte(as, iova, &pte_dma);
|
||||||
|
@ -655,8 +668,16 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
if (*pte == 0)
|
if (*pte == 0)
|
||||||
tegra_smmu_pte_get_use(as, iova);
|
tegra_smmu_pte_get_use(as, iova);
|
||||||
|
|
||||||
|
pte_attrs = SMMU_PTE_NONSECURE;
|
||||||
|
|
||||||
|
if (prot & IOMMU_READ)
|
||||||
|
pte_attrs |= SMMU_PTE_READABLE;
|
||||||
|
|
||||||
|
if (prot & IOMMU_WRITE)
|
||||||
|
pte_attrs |= SMMU_PTE_WRITABLE;
|
||||||
|
|
||||||
tegra_smmu_set_pte(as, iova, pte, pte_dma,
|
tegra_smmu_set_pte(as, iova, pte, pte_dma,
|
||||||
__phys_to_pfn(paddr) | SMMU_PTE_ATTR);
|
__phys_to_pfn(paddr) | pte_attrs);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -388,6 +388,24 @@ int mdev_device_remove(struct device *dev, bool force_remove)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mdev_set_iommu_device(struct device *dev, struct device *iommu_device)
|
||||||
|
{
|
||||||
|
struct mdev_device *mdev = to_mdev_device(dev);
|
||||||
|
|
||||||
|
mdev->iommu_device = iommu_device;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mdev_set_iommu_device);
|
||||||
|
|
||||||
|
struct device *mdev_get_iommu_device(struct device *dev)
|
||||||
|
{
|
||||||
|
struct mdev_device *mdev = to_mdev_device(dev);
|
||||||
|
|
||||||
|
return mdev->iommu_device;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mdev_get_iommu_device);
|
||||||
|
|
||||||
static int __init mdev_init(void)
|
static int __init mdev_init(void)
|
||||||
{
|
{
|
||||||
return mdev_bus_register();
|
return mdev_bus_register();
|
||||||
|
|
|
@ -32,6 +32,7 @@ struct mdev_device {
|
||||||
void *driver_data;
|
void *driver_data;
|
||||||
struct list_head next;
|
struct list_head next;
|
||||||
struct kobject *type_kobj;
|
struct kobject *type_kobj;
|
||||||
|
struct device *iommu_device;
|
||||||
bool active;
|
bool active;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -97,6 +97,7 @@ struct vfio_dma {
|
||||||
struct vfio_group {
|
struct vfio_group {
|
||||||
struct iommu_group *iommu_group;
|
struct iommu_group *iommu_group;
|
||||||
struct list_head next;
|
struct list_head next;
|
||||||
|
bool mdev_group; /* An mdev group */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -564,7 +565,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
|
||||||
mutex_lock(&iommu->lock);
|
mutex_lock(&iommu->lock);
|
||||||
|
|
||||||
/* Fail if notifier list is empty */
|
/* Fail if notifier list is empty */
|
||||||
if ((!iommu->external_domain) || (!iommu->notifier.head)) {
|
if (!iommu->notifier.head) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto pin_done;
|
goto pin_done;
|
||||||
}
|
}
|
||||||
|
@ -646,11 +647,6 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
|
||||||
|
|
||||||
mutex_lock(&iommu->lock);
|
mutex_lock(&iommu->lock);
|
||||||
|
|
||||||
if (!iommu->external_domain) {
|
|
||||||
mutex_unlock(&iommu->lock);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
|
do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
|
||||||
for (i = 0; i < npage; i++) {
|
for (i = 0; i < npage; i++) {
|
||||||
struct vfio_dma *dma;
|
struct vfio_dma *dma;
|
||||||
|
@ -1311,13 +1307,109 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct device *vfio_mdev_get_iommu_device(struct device *dev)
|
||||||
|
{
|
||||||
|
struct device *(*fn)(struct device *dev);
|
||||||
|
struct device *iommu_device;
|
||||||
|
|
||||||
|
fn = symbol_get(mdev_get_iommu_device);
|
||||||
|
if (fn) {
|
||||||
|
iommu_device = fn(dev);
|
||||||
|
symbol_put(mdev_get_iommu_device);
|
||||||
|
|
||||||
|
return iommu_device;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vfio_mdev_attach_domain(struct device *dev, void *data)
|
||||||
|
{
|
||||||
|
struct iommu_domain *domain = data;
|
||||||
|
struct device *iommu_device;
|
||||||
|
|
||||||
|
iommu_device = vfio_mdev_get_iommu_device(dev);
|
||||||
|
if (iommu_device) {
|
||||||
|
if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
|
||||||
|
return iommu_aux_attach_device(domain, iommu_device);
|
||||||
|
else
|
||||||
|
return iommu_attach_device(domain, iommu_device);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vfio_mdev_detach_domain(struct device *dev, void *data)
|
||||||
|
{
|
||||||
|
struct iommu_domain *domain = data;
|
||||||
|
struct device *iommu_device;
|
||||||
|
|
||||||
|
iommu_device = vfio_mdev_get_iommu_device(dev);
|
||||||
|
if (iommu_device) {
|
||||||
|
if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
|
||||||
|
iommu_aux_detach_device(domain, iommu_device);
|
||||||
|
else
|
||||||
|
iommu_detach_device(domain, iommu_device);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vfio_iommu_attach_group(struct vfio_domain *domain,
|
||||||
|
struct vfio_group *group)
|
||||||
|
{
|
||||||
|
if (group->mdev_group)
|
||||||
|
return iommu_group_for_each_dev(group->iommu_group,
|
||||||
|
domain->domain,
|
||||||
|
vfio_mdev_attach_domain);
|
||||||
|
else
|
||||||
|
return iommu_attach_group(domain->domain, group->iommu_group);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vfio_iommu_detach_group(struct vfio_domain *domain,
|
||||||
|
struct vfio_group *group)
|
||||||
|
{
|
||||||
|
if (group->mdev_group)
|
||||||
|
iommu_group_for_each_dev(group->iommu_group, domain->domain,
|
||||||
|
vfio_mdev_detach_domain);
|
||||||
|
else
|
||||||
|
iommu_detach_group(domain->domain, group->iommu_group);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool vfio_bus_is_mdev(struct bus_type *bus)
|
||||||
|
{
|
||||||
|
struct bus_type *mdev_bus;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
mdev_bus = symbol_get(mdev_bus_type);
|
||||||
|
if (mdev_bus) {
|
||||||
|
ret = (bus == mdev_bus);
|
||||||
|
symbol_put(mdev_bus_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vfio_mdev_iommu_device(struct device *dev, void *data)
|
||||||
|
{
|
||||||
|
struct device **old = data, *new;
|
||||||
|
|
||||||
|
new = vfio_mdev_get_iommu_device(dev);
|
||||||
|
if (!new || (*old && *old != new))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
*old = new;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int vfio_iommu_type1_attach_group(void *iommu_data,
|
static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
struct iommu_group *iommu_group)
|
struct iommu_group *iommu_group)
|
||||||
{
|
{
|
||||||
struct vfio_iommu *iommu = iommu_data;
|
struct vfio_iommu *iommu = iommu_data;
|
||||||
struct vfio_group *group;
|
struct vfio_group *group;
|
||||||
struct vfio_domain *domain, *d;
|
struct vfio_domain *domain, *d;
|
||||||
struct bus_type *bus = NULL, *mdev_bus;
|
struct bus_type *bus = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
bool resv_msi, msi_remap;
|
bool resv_msi, msi_remap;
|
||||||
phys_addr_t resv_msi_base;
|
phys_addr_t resv_msi_base;
|
||||||
|
@ -1352,23 +1444,30 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
mdev_bus = symbol_get(mdev_bus_type);
|
if (vfio_bus_is_mdev(bus)) {
|
||||||
|
struct device *iommu_device = NULL;
|
||||||
|
|
||||||
if (mdev_bus) {
|
group->mdev_group = true;
|
||||||
if ((bus == mdev_bus) && !iommu_present(bus)) {
|
|
||||||
symbol_put(mdev_bus_type);
|
/* Determine the isolation type */
|
||||||
|
ret = iommu_group_for_each_dev(iommu_group, &iommu_device,
|
||||||
|
vfio_mdev_iommu_device);
|
||||||
|
if (ret || !iommu_device) {
|
||||||
if (!iommu->external_domain) {
|
if (!iommu->external_domain) {
|
||||||
INIT_LIST_HEAD(&domain->group_list);
|
INIT_LIST_HEAD(&domain->group_list);
|
||||||
iommu->external_domain = domain;
|
iommu->external_domain = domain;
|
||||||
} else
|
} else {
|
||||||
kfree(domain);
|
kfree(domain);
|
||||||
|
}
|
||||||
|
|
||||||
list_add(&group->next,
|
list_add(&group->next,
|
||||||
&iommu->external_domain->group_list);
|
&iommu->external_domain->group_list);
|
||||||
mutex_unlock(&iommu->lock);
|
mutex_unlock(&iommu->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
symbol_put(mdev_bus_type);
|
|
||||||
|
bus = iommu_device->bus;
|
||||||
}
|
}
|
||||||
|
|
||||||
domain->domain = iommu_domain_alloc(bus);
|
domain->domain = iommu_domain_alloc(bus);
|
||||||
|
@ -1386,7 +1485,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
goto out_domain;
|
goto out_domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = iommu_attach_group(domain->domain, iommu_group);
|
ret = vfio_iommu_attach_group(domain, group);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_domain;
|
goto out_domain;
|
||||||
|
|
||||||
|
@ -1418,8 +1517,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
list_for_each_entry(d, &iommu->domain_list, next) {
|
list_for_each_entry(d, &iommu->domain_list, next) {
|
||||||
if (d->domain->ops == domain->domain->ops &&
|
if (d->domain->ops == domain->domain->ops &&
|
||||||
d->prot == domain->prot) {
|
d->prot == domain->prot) {
|
||||||
iommu_detach_group(domain->domain, iommu_group);
|
vfio_iommu_detach_group(domain, group);
|
||||||
if (!iommu_attach_group(d->domain, iommu_group)) {
|
if (!vfio_iommu_attach_group(d, group)) {
|
||||||
list_add(&group->next, &d->group_list);
|
list_add(&group->next, &d->group_list);
|
||||||
iommu_domain_free(domain->domain);
|
iommu_domain_free(domain->domain);
|
||||||
kfree(domain);
|
kfree(domain);
|
||||||
|
@ -1427,7 +1526,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = iommu_attach_group(domain->domain, iommu_group);
|
ret = vfio_iommu_attach_group(domain, group);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_domain;
|
goto out_domain;
|
||||||
}
|
}
|
||||||
|
@ -1453,7 +1552,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_detach:
|
out_detach:
|
||||||
iommu_detach_group(domain->domain, iommu_group);
|
vfio_iommu_detach_group(domain, group);
|
||||||
out_domain:
|
out_domain:
|
||||||
iommu_domain_free(domain->domain);
|
iommu_domain_free(domain->domain);
|
||||||
out_free:
|
out_free:
|
||||||
|
@ -1544,7 +1643,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
||||||
if (!group)
|
if (!group)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
iommu_detach_group(domain->domain, iommu_group);
|
vfio_iommu_detach_group(domain, group);
|
||||||
list_del(&group->next);
|
list_del(&group->next);
|
||||||
kfree(group);
|
kfree(group);
|
||||||
/*
|
/*
|
||||||
|
@ -1610,7 +1709,7 @@ static void vfio_release_domain(struct vfio_domain *domain, bool external)
|
||||||
list_for_each_entry_safe(group, group_tmp,
|
list_for_each_entry_safe(group, group_tmp,
|
||||||
&domain->group_list, next) {
|
&domain->group_list, next) {
|
||||||
if (!external)
|
if (!external)
|
||||||
iommu_detach_group(domain->domain, group->iommu_group);
|
vfio_iommu_detach_group(domain, group);
|
||||||
list_del(&group->next);
|
list_del(&group->next);
|
||||||
kfree(group);
|
kfree(group);
|
||||||
}
|
}
|
||||||
|
|
|
@ -489,9 +489,11 @@ struct dmar_domain {
|
||||||
/* Domain ids per IOMMU. Use u16 since
|
/* Domain ids per IOMMU. Use u16 since
|
||||||
* domain ids are 16 bit wide according
|
* domain ids are 16 bit wide according
|
||||||
* to VT-d spec, section 9.3 */
|
* to VT-d spec, section 9.3 */
|
||||||
|
unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */
|
||||||
|
|
||||||
bool has_iotlb_device;
|
bool has_iotlb_device;
|
||||||
struct list_head devices; /* all devices' list */
|
struct list_head devices; /* all devices' list */
|
||||||
|
struct list_head auxd; /* link to device's auxiliary list */
|
||||||
struct iova_domain iovad; /* iova's that belong to this domain */
|
struct iova_domain iovad; /* iova's that belong to this domain */
|
||||||
|
|
||||||
struct dma_pte *pgd; /* virtual address */
|
struct dma_pte *pgd; /* virtual address */
|
||||||
|
@ -510,6 +512,11 @@ struct dmar_domain {
|
||||||
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
|
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
|
||||||
u64 max_addr; /* maximum mapped address */
|
u64 max_addr; /* maximum mapped address */
|
||||||
|
|
||||||
|
int default_pasid; /*
|
||||||
|
* The default pasid used for non-SVM
|
||||||
|
* traffic on mediated devices.
|
||||||
|
*/
|
||||||
|
|
||||||
struct iommu_domain domain; /* generic domain data structure for
|
struct iommu_domain domain; /* generic domain data structure for
|
||||||
iommu core */
|
iommu core */
|
||||||
};
|
};
|
||||||
|
@ -559,6 +566,9 @@ struct device_domain_info {
|
||||||
struct list_head link; /* link to domain siblings */
|
struct list_head link; /* link to domain siblings */
|
||||||
struct list_head global; /* link to global list */
|
struct list_head global; /* link to global list */
|
||||||
struct list_head table; /* link to pasid table */
|
struct list_head table; /* link to pasid table */
|
||||||
|
struct list_head auxiliary_domains; /* auxiliary domains
|
||||||
|
* attached to this device
|
||||||
|
*/
|
||||||
u8 bus; /* PCI bus number */
|
u8 bus; /* PCI bus number */
|
||||||
u8 devfn; /* PCI devfn number */
|
u8 devfn; /* PCI devfn number */
|
||||||
u16 pfsid; /* SRIOV physical function source ID */
|
u16 pfsid; /* SRIOV physical function source ID */
|
||||||
|
@ -568,6 +578,7 @@ struct device_domain_info {
|
||||||
u8 pri_enabled:1;
|
u8 pri_enabled:1;
|
||||||
u8 ats_supported:1;
|
u8 ats_supported:1;
|
||||||
u8 ats_enabled:1;
|
u8 ats_enabled:1;
|
||||||
|
u8 auxd_enabled:1; /* Multiple domains per device */
|
||||||
u8 ats_qdep;
|
u8 ats_qdep;
|
||||||
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
|
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
|
||||||
struct intel_iommu *iommu; /* IOMMU used by this device */
|
struct intel_iommu *iommu; /* IOMMU used by this device */
|
||||||
|
@ -650,6 +661,7 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
|
||||||
int for_each_device_domain(int (*fn)(struct device_domain_info *info,
|
int for_each_device_domain(int (*fn)(struct device_domain_info *info,
|
||||||
void *data), void *data);
|
void *data), void *data);
|
||||||
void iommu_flush_write_buffer(struct intel_iommu *iommu);
|
void iommu_flush_write_buffer(struct intel_iommu *iommu);
|
||||||
|
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||||
int intel_svm_init(struct intel_iommu *iommu);
|
int intel_svm_init(struct intel_iommu *iommu);
|
||||||
|
@ -679,7 +691,6 @@ struct intel_svm {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
|
|
||||||
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
|
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,7 @@ struct bus_type;
|
||||||
struct device;
|
struct device;
|
||||||
struct iommu_domain;
|
struct iommu_domain;
|
||||||
struct notifier_block;
|
struct notifier_block;
|
||||||
|
struct iommu_sva;
|
||||||
|
|
||||||
/* iommu fault flags */
|
/* iommu fault flags */
|
||||||
#define IOMMU_FAULT_READ 0x0
|
#define IOMMU_FAULT_READ 0x0
|
||||||
|
@ -55,6 +56,8 @@ struct notifier_block;
|
||||||
|
|
||||||
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
|
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
|
||||||
struct device *, unsigned long, int, void *);
|
struct device *, unsigned long, int, void *);
|
||||||
|
typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
|
||||||
|
void *);
|
||||||
|
|
||||||
struct iommu_domain_geometry {
|
struct iommu_domain_geometry {
|
||||||
dma_addr_t aperture_start; /* First address that can be mapped */
|
dma_addr_t aperture_start; /* First address that can be mapped */
|
||||||
|
@ -156,6 +159,33 @@ struct iommu_resv_region {
|
||||||
enum iommu_resv_type type;
|
enum iommu_resv_type type;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Per device IOMMU features */
|
||||||
|
enum iommu_dev_features {
|
||||||
|
IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
|
||||||
|
IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
|
||||||
|
};
|
||||||
|
|
||||||
|
#define IOMMU_PASID_INVALID (-1U)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iommu_sva_ops - device driver callbacks for an SVA context
|
||||||
|
*
|
||||||
|
* @mm_exit: called when the mm is about to be torn down by exit_mmap. After
|
||||||
|
* @mm_exit returns, the device must not issue any more transaction
|
||||||
|
* with the PASID given as argument.
|
||||||
|
*
|
||||||
|
* The @mm_exit handler is allowed to sleep. Be careful about the
|
||||||
|
* locks taken in @mm_exit, because they might lead to deadlocks if
|
||||||
|
* they are also held when dropping references to the mm. Consider the
|
||||||
|
* following call chain:
|
||||||
|
* mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
|
||||||
|
* Using mmput_async() prevents this scenario.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct iommu_sva_ops {
|
||||||
|
iommu_mm_exit_handler_t mm_exit;
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_API
|
#ifdef CONFIG_IOMMU_API
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -186,6 +216,14 @@ struct iommu_resv_region {
|
||||||
* @of_xlate: add OF master IDs to iommu grouping
|
* @of_xlate: add OF master IDs to iommu grouping
|
||||||
* @is_attach_deferred: Check if domain attach should be deferred from iommu
|
* @is_attach_deferred: Check if domain attach should be deferred from iommu
|
||||||
* driver init to device driver init (default no)
|
* driver init to device driver init (default no)
|
||||||
|
* @dev_has/enable/disable_feat: per device entries to check/enable/disable
|
||||||
|
* iommu specific features.
|
||||||
|
* @dev_feat_enabled: check enabled feature
|
||||||
|
* @aux_attach/detach_dev: aux-domain specific attach/detach entries.
|
||||||
|
* @aux_get_pasid: get the pasid given an aux-domain
|
||||||
|
* @sva_bind: Bind process address space to device
|
||||||
|
* @sva_unbind: Unbind process address space from device
|
||||||
|
* @sva_get_pasid: Get PASID associated to a SVA handle
|
||||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||||
*/
|
*/
|
||||||
struct iommu_ops {
|
struct iommu_ops {
|
||||||
|
@ -230,6 +268,22 @@ struct iommu_ops {
|
||||||
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
||||||
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
|
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
|
||||||
|
|
||||||
|
/* Per device IOMMU features */
|
||||||
|
bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
|
||||||
|
bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
|
||||||
|
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
|
||||||
|
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
|
||||||
|
|
||||||
|
/* Aux-domain specific attach/detach entries */
|
||||||
|
int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||||
|
void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||||
|
int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
|
||||||
|
|
||||||
|
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
|
||||||
|
void *drvdata);
|
||||||
|
void (*sva_unbind)(struct iommu_sva *handle);
|
||||||
|
int (*sva_get_pasid)(struct iommu_sva *handle);
|
||||||
|
|
||||||
unsigned long pgsize_bitmap;
|
unsigned long pgsize_bitmap;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -392,10 +446,22 @@ struct iommu_fwspec {
|
||||||
const struct iommu_ops *ops;
|
const struct iommu_ops *ops;
|
||||||
struct fwnode_handle *iommu_fwnode;
|
struct fwnode_handle *iommu_fwnode;
|
||||||
void *iommu_priv;
|
void *iommu_priv;
|
||||||
|
u32 flags;
|
||||||
unsigned int num_ids;
|
unsigned int num_ids;
|
||||||
u32 ids[1];
|
u32 ids[1];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* ATS is supported */
|
||||||
|
#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iommu_sva - handle to a device-mm bond
|
||||||
|
*/
|
||||||
|
struct iommu_sva {
|
||||||
|
struct device *dev;
|
||||||
|
const struct iommu_sva_ops *ops;
|
||||||
|
};
|
||||||
|
|
||||||
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
|
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
|
||||||
const struct iommu_ops *ops);
|
const struct iommu_ops *ops);
|
||||||
void iommu_fwspec_free(struct device *dev);
|
void iommu_fwspec_free(struct device *dev);
|
||||||
|
@ -416,6 +482,22 @@ static inline void dev_iommu_fwspec_set(struct device *dev,
|
||||||
int iommu_probe_device(struct device *dev);
|
int iommu_probe_device(struct device *dev);
|
||||||
void iommu_release_device(struct device *dev);
|
void iommu_release_device(struct device *dev);
|
||||||
|
|
||||||
|
bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
|
||||||
|
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
|
||||||
|
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
|
||||||
|
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
|
||||||
|
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
|
||||||
|
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
|
||||||
|
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
|
||||||
|
|
||||||
|
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
|
||||||
|
struct mm_struct *mm,
|
||||||
|
void *drvdata);
|
||||||
|
void iommu_sva_unbind_device(struct iommu_sva *handle);
|
||||||
|
int iommu_sva_set_ops(struct iommu_sva *handle,
|
||||||
|
const struct iommu_sva_ops *ops);
|
||||||
|
int iommu_sva_get_pasid(struct iommu_sva *handle);
|
||||||
|
|
||||||
#else /* CONFIG_IOMMU_API */
|
#else /* CONFIG_IOMMU_API */
|
||||||
|
|
||||||
struct iommu_ops {};
|
struct iommu_ops {};
|
||||||
|
@ -700,6 +782,68 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct iommu_sva *
|
||||||
|
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int iommu_sva_set_ops(struct iommu_sva *handle,
|
||||||
|
const struct iommu_sva_ops *ops)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||||
|
{
|
||||||
|
return IOMMU_PASID_INVALID;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_IOMMU_API */
|
#endif /* CONFIG_IOMMU_API */
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_DEBUGFS
|
#ifdef CONFIG_IOMMU_DEBUGFS
|
||||||
|
|
|
@ -76,6 +76,14 @@ struct iova_domain {
|
||||||
unsigned long start_pfn; /* Lower limit for this domain */
|
unsigned long start_pfn; /* Lower limit for this domain */
|
||||||
unsigned long dma_32bit_pfn;
|
unsigned long dma_32bit_pfn;
|
||||||
unsigned long max32_alloc_size; /* Size of last failed allocation */
|
unsigned long max32_alloc_size; /* Size of last failed allocation */
|
||||||
|
struct iova_fq __percpu *fq; /* Flush Queue */
|
||||||
|
|
||||||
|
atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
|
||||||
|
have been started */
|
||||||
|
|
||||||
|
atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
|
||||||
|
have been finished */
|
||||||
|
|
||||||
struct iova anchor; /* rbtree lookup anchor */
|
struct iova anchor; /* rbtree lookup anchor */
|
||||||
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
||||||
|
|
||||||
|
@ -85,14 +93,6 @@ struct iova_domain {
|
||||||
iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
|
iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
|
||||||
iova entry */
|
iova entry */
|
||||||
|
|
||||||
struct iova_fq __percpu *fq; /* Flush Queue */
|
|
||||||
|
|
||||||
atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
|
|
||||||
have been started */
|
|
||||||
|
|
||||||
atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
|
|
||||||
have been finished */
|
|
||||||
|
|
||||||
struct timer_list fq_timer; /* Timer to regularily empty the
|
struct timer_list fq_timer; /* Timer to regularily empty the
|
||||||
flush-queues */
|
flush-queues */
|
||||||
atomic_t fq_timer_on; /* 1 when timer is active, 0
|
atomic_t fq_timer_on; /* 1 when timer is active, 0
|
||||||
|
|
|
@ -15,6 +15,20 @@
|
||||||
|
|
||||||
struct mdev_device;
|
struct mdev_device;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called by the parent device driver to set the device which represents
|
||||||
|
* this mdev in iommu protection scope. By default, the iommu device is
|
||||||
|
* NULL, that indicates using vendor defined isolation.
|
||||||
|
*
|
||||||
|
* @dev: the mediated device that iommu will isolate.
|
||||||
|
* @iommu_device: a pci device which represents the iommu for @dev.
|
||||||
|
*
|
||||||
|
* Return 0 for success, otherwise negative error value.
|
||||||
|
*/
|
||||||
|
int mdev_set_iommu_device(struct device *dev, struct device *iommu_device);
|
||||||
|
|
||||||
|
struct device *mdev_get_iommu_device(struct device *dev);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct mdev_parent_ops - Structure to be registered for each parent device to
|
* struct mdev_parent_ops - Structure to be registered for each parent device to
|
||||||
* register the device to mdev module.
|
* register the device to mdev module.
|
||||||
|
|
|
@ -1521,21 +1521,6 @@ static inline void pcie_ecrc_get_policy(char *str) { }
|
||||||
|
|
||||||
bool pci_ats_disabled(void);
|
bool pci_ats_disabled(void);
|
||||||
|
|
||||||
#ifdef CONFIG_PCI_ATS
|
|
||||||
/* Address Translation Service */
|
|
||||||
void pci_ats_init(struct pci_dev *dev);
|
|
||||||
int pci_enable_ats(struct pci_dev *dev, int ps);
|
|
||||||
void pci_disable_ats(struct pci_dev *dev);
|
|
||||||
int pci_ats_queue_depth(struct pci_dev *dev);
|
|
||||||
int pci_ats_page_aligned(struct pci_dev *dev);
|
|
||||||
#else
|
|
||||||
static inline void pci_ats_init(struct pci_dev *d) { }
|
|
||||||
static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
|
|
||||||
static inline void pci_disable_ats(struct pci_dev *d) { }
|
|
||||||
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
|
|
||||||
static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PCIE_PTM
|
#ifdef CONFIG_PCIE_PTM
|
||||||
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
|
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
|
||||||
#else
|
#else
|
||||||
|
@ -1728,8 +1713,24 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
|
||||||
static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
|
static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
|
||||||
struct pci_dev *dev)
|
struct pci_dev *dev)
|
||||||
{ return NULL; }
|
{ return NULL; }
|
||||||
|
static inline bool pci_ats_disabled(void) { return true; }
|
||||||
#endif /* CONFIG_PCI */
|
#endif /* CONFIG_PCI */
|
||||||
|
|
||||||
|
#ifdef CONFIG_PCI_ATS
|
||||||
|
/* Address Translation Service */
|
||||||
|
void pci_ats_init(struct pci_dev *dev);
|
||||||
|
int pci_enable_ats(struct pci_dev *dev, int ps);
|
||||||
|
void pci_disable_ats(struct pci_dev *dev);
|
||||||
|
int pci_ats_queue_depth(struct pci_dev *dev);
|
||||||
|
int pci_ats_page_aligned(struct pci_dev *dev);
|
||||||
|
#else
|
||||||
|
static inline void pci_ats_init(struct pci_dev *d) { }
|
||||||
|
static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
|
||||||
|
static inline void pci_disable_ats(struct pci_dev *d) { }
|
||||||
|
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
|
||||||
|
static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Include architecture-dependent settings and functions */
|
/* Include architecture-dependent settings and functions */
|
||||||
|
|
||||||
#include <asm/pci.h>
|
#include <asm/pci.h>
|
||||||
|
|
Загрузка…
Ссылка в новой задаче