iommu: remove DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE

Instead make the global iommu_dma_strict paramete in iommu.c canonical by
exporting helpers to get and set it and use those directly in the drivers.

This make sure that the iommu.strict parameter also works for the AMD and
Intel IOMMU drivers on x86.  As those default to lazy flushing a new
IOMMU_CMD_LINE_STRICT is used to turn the value into a tristate to
represent the default if not overriden by an explicit parameter.

[ported on top of the other iommu_attr changes and added a few small
 missing bits]

Signed-off-by: Robin Murphy <robin.murphy@arm.com>.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20210401155256.298656-19-hch@lst.de
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Robin Murphy 2021-04-01 17:52:54 +02:00 коммит произвёл Joerg Roedel
Родитель 3189713a1b
Коммит a250c23f15
8 изменённых файлов: 43 добавлений и 165 удалений

Просмотреть файл

@ -1771,26 +1771,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
return acpihid_device_group(dev); return acpihid_device_group(dev);
} }
static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
switch (domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
return -ENODEV;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = !amd_iommu_unmap_flush;
return 0;
default:
return -ENODEV;
}
break;
default:
return -EINVAL;
}
}
/***************************************************************************** /*****************************************************************************
* *
* The next functions belong to the dma_ops mapping/unmapping code. * The next functions belong to the dma_ops mapping/unmapping code.
@ -1855,7 +1835,7 @@ int __init amd_iommu_init_dma_ops(void)
pr_info("IO/TLB flush on unmap enabled\n"); pr_info("IO/TLB flush on unmap enabled\n");
else else
pr_info("Lazy IO/TLB flushing enabled\n"); pr_info("Lazy IO/TLB flushing enabled\n");
iommu_set_dma_strict(amd_iommu_unmap_flush);
return 0; return 0;
} }
@ -2257,7 +2237,6 @@ const struct iommu_ops amd_iommu_ops = {
.release_device = amd_iommu_release_device, .release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize, .probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group, .device_group = amd_iommu_device_group,
.domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions, .get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred, .is_attach_deferred = amd_iommu_is_attach_deferred,

Просмотреть файл

@ -2040,7 +2040,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
.iommu_dev = smmu->dev, .iommu_dev = smmu->dev,
}; };
if (smmu_domain->non_strict) if (!iommu_get_dma_strict(domain))
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@ -2549,52 +2549,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
return group; return group;
} }
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
switch (domain->type) {
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = smmu_domain->non_strict;
return 0;
default:
return -ENODEV;
}
break;
default:
return -EINVAL;
}
}
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
mutex_lock(&smmu_domain->init_mutex);
switch (domain->type) {
case IOMMU_DOMAIN_DMA:
switch(attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
smmu_domain->non_strict = *(int *)data;
break;
default:
ret = -ENODEV;
}
break;
default:
ret = -EINVAL;
}
mutex_unlock(&smmu_domain->init_mutex);
return ret;
}
static int arm_smmu_enable_nesting(struct iommu_domain *domain) static int arm_smmu_enable_nesting(struct iommu_domain *domain)
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@ -2707,8 +2661,6 @@ static struct iommu_ops arm_smmu_ops = {
.probe_device = arm_smmu_probe_device, .probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device, .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group, .device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.enable_nesting = arm_smmu_enable_nesting, .enable_nesting = arm_smmu_enable_nesting,
.of_xlate = arm_smmu_of_xlate, .of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions, .get_resv_regions = arm_smmu_get_resv_regions,

Просмотреть файл

@ -677,7 +677,6 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */ struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
bool non_strict;
atomic_t nr_ats_masters; atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;

Просмотреть файл

@ -761,6 +761,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.iommu_dev = smmu->dev, .iommu_dev = smmu->dev,
}; };
if (!iommu_get_dma_strict(domain))
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
if (smmu->impl && smmu->impl->init_context) { if (smmu->impl && smmu->impl->init_context) {
ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
if (ret) if (ret)
@ -1499,18 +1502,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
return -ENODEV; return -ENODEV;
} }
break; break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
bool non_strict = smmu_domain->pgtbl_cfg.quirks &
IO_PGTABLE_QUIRK_NON_STRICT;
*(int *)data = non_strict;
return 0;
}
default:
return -ENODEV;
}
break;
default: default:
return -EINVAL; return -EINVAL;
} }
@ -1557,18 +1548,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
ret = -ENODEV; ret = -ENODEV;
} }
break; break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
if (*(int *)data)
smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
else
smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
break;
default:
ret = -ENODEV;
}
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
} }

Просмотреть файл

@ -306,10 +306,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
cookie = container_of(iovad, struct iommu_dma_cookie, iovad); cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
domain = cookie->fq_domain; domain = cookie->fq_domain;
/*
* The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
* implies that ops->flush_iotlb_all must be non-NULL.
*/
domain->ops->flush_iotlb_all(domain); domain->ops->flush_iotlb_all(domain);
} }
@ -336,7 +333,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn; unsigned long order, base_pfn;
struct iova_domain *iovad; struct iova_domain *iovad;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL; return -EINVAL;
@ -373,8 +369,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
init_iova_domain(iovad, 1UL << order, base_pfn); init_iova_domain(iovad, 1UL << order, base_pfn);
if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) && if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
!iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) {
attr) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
iommu_dma_entry_dtor)) iommu_dma_entry_dtor))
pr_warn("iova flush queue initialization failed\n"); pr_warn("iova flush queue initialization failed\n");

Просмотреть файл

@ -4347,6 +4347,17 @@ int __init intel_iommu_init(void)
down_read(&dmar_global_lock); down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) { for_each_active_iommu(iommu, drhd) {
/*
* The flush queue implementation does not perform
* page-selective invalidations that are required for efficient
* TLB flushes in virtual environments. The benefit of batching
* is likely to be much lower than the overhead of synchronizing
* the virtual and physical IOMMU page-tables.
*/
if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
pr_warn("IOMMU batching is disabled due to virtualization");
intel_iommu_strict = 1;
}
iommu_device_sysfs_add(&iommu->iommu, NULL, iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups, intel_iommu_groups,
"%s", iommu->name); "%s", iommu->name);
@ -4355,6 +4366,7 @@ int __init intel_iommu_init(void)
} }
up_read(&dmar_global_lock); up_read(&dmar_global_lock);
iommu_set_dma_strict(intel_iommu_strict);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops); bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through) if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb); register_memory_notifier(&intel_iommu_memory_nb);
@ -5413,57 +5425,6 @@ intel_iommu_enable_nesting(struct iommu_domain *domain)
return ret; return ret;
} }
static bool domain_use_flush_queue(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool r = true;
if (intel_iommu_strict)
return false;
/*
* The flush queue implementation does not perform page-selective
* invalidations that are required for efficient TLB flushes in virtual
* environments. The benefit of batching is likely to be much lower than
* the overhead of synchronizing the virtual and physical IOMMU
* page-tables.
*/
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (!cap_caching_mode(iommu->cap))
continue;
pr_warn_once("IOMMU batching is disabled due to virtualization");
r = false;
break;
}
rcu_read_unlock();
return r;
}
static int
intel_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
switch (domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
return -ENODEV;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = domain_use_flush_queue();
return 0;
default:
return -ENODEV;
}
break;
default:
return -EINVAL;
}
}
/* /*
* Check that the device does not live on an external facing PCI port that is * Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and * marked as untrusted. Such devices should not be able to apply quirks and
@ -5536,7 +5497,6 @@ const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable, .capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc, .domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free, .domain_free = intel_iommu_domain_free,
.domain_get_attr = intel_iommu_domain_get_attr,
.enable_nesting = intel_iommu_enable_nesting, .enable_nesting = intel_iommu_enable_nesting,
.attach_dev = intel_iommu_attach_device, .attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device, .detach_dev = intel_iommu_detach_device,

Просмотреть файл

@ -69,6 +69,7 @@ static const char * const iommu_group_resv_type_string[] = {
}; };
#define IOMMU_CMD_LINE_DMA_API BIT(0) #define IOMMU_CMD_LINE_DMA_API BIT(0)
#define IOMMU_CMD_LINE_STRICT BIT(1)
static int iommu_alloc_default_domain(struct iommu_group *group, static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev); struct device *dev);
@ -318,10 +319,29 @@ early_param("iommu.passthrough", iommu_set_def_domain_type);
static int __init iommu_dma_setup(char *str) static int __init iommu_dma_setup(char *str)
{ {
return kstrtobool(str, &iommu_dma_strict); int ret = kstrtobool(str, &iommu_dma_strict);
if (!ret)
iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
return ret;
} }
early_param("iommu.strict", iommu_dma_setup); early_param("iommu.strict", iommu_dma_setup);
void iommu_set_dma_strict(bool strict)
{
if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
iommu_dma_strict = strict;
}
bool iommu_get_dma_strict(struct iommu_domain *domain)
{
/* only allow lazy flushing for DMA domains */
if (domain->type == IOMMU_DOMAIN_DMA)
return iommu_dma_strict;
return true;
}
EXPORT_SYMBOL_GPL(iommu_get_dma_strict);
static ssize_t iommu_group_attr_show(struct kobject *kobj, static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf) struct attribute *__attr, char *buf)
{ {
@ -1500,14 +1520,6 @@ static int iommu_group_alloc_default_domain(struct bus_type *bus,
group->default_domain = dom; group->default_domain = dom;
if (!group->domain) if (!group->domain)
group->domain = dom; group->domain = dom;
if (!iommu_dma_strict) {
int attr = 1;
iommu_domain_set_attr(dom,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
&attr);
}
return 0; return 0;
} }

Просмотреть файл

@ -107,7 +107,6 @@ enum iommu_cap {
*/ */
enum iommu_attr { enum iommu_attr {
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_IO_PGTABLE_CFG,
DOMAIN_ATTR_MAX, DOMAIN_ATTR_MAX,
}; };
@ -514,6 +513,9 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data); void *data);
int iommu_enable_nesting(struct iommu_domain *domain); int iommu_enable_nesting(struct iommu_domain *domain);
void iommu_set_dma_strict(bool val);
bool iommu_get_dma_strict(struct iommu_domain *domain);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags); unsigned long iova, int flags);