drm: msm: fix common struct sg_table related issues
The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function returns the number of the created entries in the DMA address space. However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and dma_unmap_sg must be called with the original number of the entries passed to the dma_map_sg(). struct sg_table is a common structure used for describing a non-contiguous memory buffer, used commonly in the DRM and graphics subsystems. It consists of a scatterlist with memory pages and DMA addresses (sgl entry), as well as the number of scatterlist entries: CPU pages (orig_nents entry) and DMA mapped pages (nents entry). It turned out that it was a common mistake to misuse nents and orig_nents entries, calling DMA-mapping functions with a wrong number of entries or ignoring the number of mapped entries returned by the dma_map_sg() function. To avoid such issues, lets use a common dma-mapping wrappers operating directly on the struct sg_table objects and use scatterlist page iterators where possible. This, almost always, hides references to the nents and orig_nents entries, making the code robust, easier to follow and copy/paste safe. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Родитель
90dcf44490
Коммит
7690a33f22
|
@ -53,11 +53,10 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
|
|||
struct device *dev = msm_obj->base.dev->dev;
|
||||
|
||||
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
|
||||
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
dma_sync_sgtable_for_device(dev, msm_obj->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_map_sg(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,11 +65,9 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
|
|||
struct device *dev = msm_obj->base.dev->dev;
|
||||
|
||||
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
|
||||
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_unmap_sg(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
|
|||
{
|
||||
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
|
||||
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
|
||||
struct scatterlist *sg;
|
||||
struct sg_dma_page_iter dma_iter;
|
||||
unsigned prot_bits = 0;
|
||||
unsigned i, j;
|
||||
|
||||
if (prot & IOMMU_WRITE)
|
||||
prot_bits |= 1;
|
||||
if (prot & IOMMU_READ)
|
||||
prot_bits |= 2;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
dma_addr_t addr = sg->dma_address;
|
||||
for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
|
||||
gpummu->table[idx] = addr | prot_bits;
|
||||
addr += GPUMMU_PAGE_SIZE;
|
||||
}
|
||||
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
|
||||
dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
|
||||
gpummu->table[idx++] = (addr + i) | prot_bits;
|
||||
}
|
||||
|
||||
/* we can improve by deferring flush for multiple map() */
|
||||
|
|
|
@ -36,7 +36,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
|||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
size_t ret;
|
||||
|
||||
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
|
||||
ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
|
||||
WARN_ON(!ret);
|
||||
|
||||
return (ret == len) ? 0 : -EINVAL;
|
||||
|
|
Загрузка…
Ссылка в новой задаче