iommu/virtio: Pass end address to viommu_add_mapping()

To support identity mappings, the virtio-iommu driver must be able to
represent full 64-bit ranges internally. Pass (start, end) instead of
(start, size) to viommu_add/del_mapping().

Clean comments. The one about the returned size was never true: when
sweeping the whole address space the returned size will most certainly
be smaller than 2^64.

Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20211201173323.1045819-5-jean-philippe@linaro.org
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jean-Philippe Brucker 2021-12-01 17:33:24 +00:00 коммит произвёл Joerg Roedel
Родитель 5610979415
Коммит c0c7635989
1 изменённых файлов: 15 добавлений и 16 удалений

Просмотреть файл

@ -311,8 +311,8 @@ out_unlock:
* *
* On success, return the new mapping. Otherwise return NULL. * On success, return the new mapping. Otherwise return NULL.
*/ */
static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
phys_addr_t paddr, size_t size, u32 flags) phys_addr_t paddr, u32 flags)
{ {
unsigned long irqflags; unsigned long irqflags;
struct viommu_mapping *mapping; struct viommu_mapping *mapping;
@ -323,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
mapping->paddr = paddr; mapping->paddr = paddr;
mapping->iova.start = iova; mapping->iova.start = iova;
mapping->iova.last = iova + size - 1; mapping->iova.last = end;
mapping->flags = flags; mapping->flags = flags;
spin_lock_irqsave(&vdomain->mappings_lock, irqflags); spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
@ -338,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
* *
* @vdomain: the domain * @vdomain: the domain
* @iova: start of the range * @iova: start of the range
* @size: size of the range. A size of 0 corresponds to the entire address * @end: end of the range
* space.
* *
* On success, returns the number of unmapped bytes (>= size) * On success, returns the number of unmapped bytes
*/ */
static size_t viommu_del_mappings(struct viommu_domain *vdomain, static size_t viommu_del_mappings(struct viommu_domain *vdomain,
unsigned long iova, size_t size) u64 iova, u64 end)
{ {
size_t unmapped = 0; size_t unmapped = 0;
unsigned long flags; unsigned long flags;
unsigned long last = iova + size - 1;
struct viommu_mapping *mapping = NULL; struct viommu_mapping *mapping = NULL;
struct interval_tree_node *node, *next; struct interval_tree_node *node, *next;
spin_lock_irqsave(&vdomain->mappings_lock, flags); spin_lock_irqsave(&vdomain->mappings_lock, flags);
next = interval_tree_iter_first(&vdomain->mappings, iova, last); next = interval_tree_iter_first(&vdomain->mappings, iova, end);
while (next) { while (next) {
node = next; node = next;
mapping = container_of(node, struct viommu_mapping, iova); mapping = container_of(node, struct viommu_mapping, iova);
next = interval_tree_iter_next(node, iova, last); next = interval_tree_iter_next(node, iova, end);
/* Trying to split a mapping? */ /* Trying to split a mapping? */
if (mapping->iova.start < iova) if (mapping->iova.start < iova)
@ -656,8 +654,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
{ {
struct viommu_domain *vdomain = to_viommu_domain(domain); struct viommu_domain *vdomain = to_viommu_domain(domain);
/* Free all remaining mappings (size 2^64) */ /* Free all remaining mappings */
viommu_del_mappings(vdomain, 0, 0); viommu_del_mappings(vdomain, 0, ULLONG_MAX);
if (vdomain->viommu) if (vdomain->viommu)
ida_free(&vdomain->viommu->domain_ids, vdomain->id); ida_free(&vdomain->viommu->domain_ids, vdomain->id);
@ -742,6 +740,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
{ {
int ret; int ret;
u32 flags; u32 flags;
u64 end = iova + size - 1;
struct virtio_iommu_req_map map; struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain); struct viommu_domain *vdomain = to_viommu_domain(domain);
@ -752,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
if (flags & ~vdomain->map_flags) if (flags & ~vdomain->map_flags)
return -EINVAL; return -EINVAL;
ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
if (ret) if (ret)
return ret; return ret;
@ -761,7 +760,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
.domain = cpu_to_le32(vdomain->id), .domain = cpu_to_le32(vdomain->id),
.virt_start = cpu_to_le64(iova), .virt_start = cpu_to_le64(iova),
.phys_start = cpu_to_le64(paddr), .phys_start = cpu_to_le64(paddr),
.virt_end = cpu_to_le64(iova + size - 1), .virt_end = cpu_to_le64(end),
.flags = cpu_to_le32(flags), .flags = cpu_to_le32(flags),
}; };
@ -770,7 +769,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret) if (ret)
viommu_del_mappings(vdomain, iova, size); viommu_del_mappings(vdomain, iova, end);
return ret; return ret;
} }
@ -783,7 +782,7 @@ static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
struct virtio_iommu_req_unmap unmap; struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain); struct viommu_domain *vdomain = to_viommu_domain(domain);
unmapped = viommu_del_mappings(vdomain, iova, size); unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size) if (unmapped < size)
return 0; return 0;