iommu: Rename iommu_tlb_* functions to iommu_iotlb_*

To keep naming consistent we should stick with *iotlb*. This patch
renames a few remaining functions.

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Link: https://lore.kernel.org/r/20200817210051.13546-1-murphyt7@tcd.ie
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Tom Murphy 2020-08-17 22:00:49 +01:00 коммит произвёл Joerg Roedel
Родитель f75aef392f
Коммит aae4c8e27b
5 изменённых файлов: 10 добавлений и 10 удалений

Просмотреть файл

@ -471,7 +471,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
WARN_ON(unmapped != size);
if (!cookie->fq_domain)
iommu_tlb_sync(domain, &iotlb_gather);
iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
}

Просмотреть файл

@ -762,7 +762,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
}
iommu_flush_tlb_all(domain);
iommu_flush_iotlb_all(domain);
out:
iommu_put_resv_regions(dev, &mappings);
@ -2316,7 +2316,7 @@ size_t iommu_unmap(struct iommu_domain *domain,
iommu_iotlb_gather_init(&iotlb_gather);
ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
iommu_tlb_sync(domain, &iotlb_gather);
iommu_iotlb_sync(domain, &iotlb_gather);
return ret;
}

Просмотреть файл

@ -774,7 +774,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
long unlocked = 0;
struct vfio_regions *entry, *next;
iommu_tlb_sync(domain->domain, iotlb_gather);
iommu_iotlb_sync(domain->domain, iotlb_gather);
list_for_each_entry_safe(entry, next, regions, list) {
unlocked += vfio_unpin_pages_remote(dma,

Просмотреть файл

@ -31,7 +31,7 @@ enum io_pgtable_fmt {
* single page. IOMMUs that cannot batch TLB invalidation
* operations efficiently will typically issue them here, but
* others may decide to update the iommu_iotlb_gather structure
* and defer the invalidation until iommu_tlb_sync() instead.
* and defer the invalidation until iommu_iotlb_sync() instead.
*
* Note that these can all be called in atomic context and must therefore
* not block.

Просмотреть файл

@ -514,13 +514,13 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
if (domain->ops->flush_iotlb_all)
domain->ops->flush_iotlb_all(domain);
}
static inline void iommu_tlb_sync(struct iommu_domain *domain,
static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
if (domain->ops->iotlb_sync)
@ -543,7 +543,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
if (gather->pgsize != size ||
end < gather->start || start > gather->end) {
if (gather->pgsize)
iommu_tlb_sync(domain, gather);
iommu_iotlb_sync(domain, gather);
gather->pgsize = size;
}
@ -725,11 +725,11 @@ static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
return 0;
}
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
}
static inline void iommu_tlb_sync(struct iommu_domain *domain,
static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
}