vdpasim: protect concurrent access to iommu iotlb

Iommu iotlb can be accessed by different cores for performing IO using
multiple virt queues. Add a spinlock to synchronize iotlb accesses.

This could be easily reproduced when using more than 1 pktgen threads
to inject traffic to vdpa simulator.

Fixes: 2c53d0f64c06f("vdpasim: vDPA device simulator")
Cc: stable@vger.kernel.org
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20200731073822.13326-1-jasowang@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Max Gurtovoy 2020-07-31 15:38:22 +08:00 коммит произвёл Michael S. Tsirkin
Родитель 6234f80574
Коммит 0ea9ee430e
1 изменённых файлов: 27 добавлений и 4 удалений

Просмотреть файл

@ -71,6 +71,8 @@ struct vdpasim {
u32 status; u32 status;
u32 generation; u32 generation;
u64 features; u64 features;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
}; };
/* TODO: cross-endian support */ /* TODO: cross-endian support */
@ -136,7 +138,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
for (i = 0; i < VDPASIM_VQ_NUM; i++) for (i = 0; i < VDPASIM_VQ_NUM; i++)
vdpasim_vq_reset(&vdpasim->vqs[i]); vdpasim_vq_reset(&vdpasim->vqs[i]);
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu); vhost_iotlb_reset(vdpasim->iommu);
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0; vdpasim->features = 0;
vdpasim->status = 0; vdpasim->status = 0;
@ -254,8 +258,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
/* For simplicity, use identical mapping to avoid e.g iova /* For simplicity, use identical mapping to avoid e.g iova
* allocator. * allocator.
*/ */
spin_lock(&vdpasim->iommu_lock);
ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
pa, dir_to_perm(dir)); pa, dir_to_perm(dir));
spin_unlock(&vdpasim->iommu_lock);
if (ret) if (ret)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
@ -269,8 +275,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
struct vdpasim *vdpasim = dev_to_sim(dev); struct vdpasim *vdpasim = dev_to_sim(dev);
struct vhost_iotlb *iommu = vdpasim->iommu; struct vhost_iotlb *iommu = vdpasim->iommu;
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(iommu, (u64)dma_addr, vhost_iotlb_del_range(iommu, (u64)dma_addr,
(u64)dma_addr + size - 1); (u64)dma_addr + size - 1);
spin_unlock(&vdpasim->iommu_lock);
} }
static void *vdpasim_alloc_coherent(struct device *dev, size_t size, static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@ -282,9 +290,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
void *addr = kmalloc(size, flag); void *addr = kmalloc(size, flag);
int ret; int ret;
if (!addr) spin_lock(&vdpasim->iommu_lock);
if (!addr) {
*dma_addr = DMA_MAPPING_ERROR; *dma_addr = DMA_MAPPING_ERROR;
else { } else {
u64 pa = virt_to_phys(addr); u64 pa = virt_to_phys(addr);
ret = vhost_iotlb_add_range(iommu, (u64)pa, ret = vhost_iotlb_add_range(iommu, (u64)pa,
@ -297,6 +306,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
} else } else
*dma_addr = (dma_addr_t)pa; *dma_addr = (dma_addr_t)pa;
} }
spin_unlock(&vdpasim->iommu_lock);
return addr; return addr;
} }
@ -308,8 +318,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
struct vdpasim *vdpasim = dev_to_sim(dev); struct vdpasim *vdpasim = dev_to_sim(dev);
struct vhost_iotlb *iommu = vdpasim->iommu; struct vhost_iotlb *iommu = vdpasim->iommu;
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(iommu, (u64)dma_addr, vhost_iotlb_del_range(iommu, (u64)dma_addr,
(u64)dma_addr + size - 1); (u64)dma_addr + size - 1);
spin_unlock(&vdpasim->iommu_lock);
kfree(phys_to_virt((uintptr_t)dma_addr)); kfree(phys_to_virt((uintptr_t)dma_addr));
} }
@ -555,6 +568,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
u64 start = 0ULL, last = 0ULL - 1; u64 start = 0ULL, last = 0ULL - 1;
int ret; int ret;
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu); vhost_iotlb_reset(vdpasim->iommu);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map; for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
@ -564,10 +578,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
if (ret) if (ret)
goto err; goto err;
} }
spin_unlock(&vdpasim->iommu_lock);
return 0; return 0;
err: err:
vhost_iotlb_reset(vdpasim->iommu); vhost_iotlb_reset(vdpasim->iommu);
spin_unlock(&vdpasim->iommu_lock);
return ret; return ret;
} }
@ -575,16 +591,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
u64 pa, u32 perm) u64 pa, u32 perm)
{ {
struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
return vhost_iotlb_add_range(vdpasim->iommu, iova, spin_lock(&vdpasim->iommu_lock);
iova + size - 1, pa, perm); ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
perm);
spin_unlock(&vdpasim->iommu_lock);
return ret;
} }
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
{ {
struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
spin_unlock(&vdpasim->iommu_lock);
return 0; return 0;
} }