drm/exynos: remove 'pages' and 'page_size' elements in exynos gem buffer
Changelog v2: Removed redundant check for invalid sgl. Added check for valid page_offset in the beginning of exynos_drm_gem_map_buf. Changelog v1: The 'pages' structure is not required since we can use the 'sgt'. Even for CONTIG buffers, a SGT is created (which will have just one sgl). This SGT can be used during mmap instead of 'pages'. The 'page_size' element of the structure is also not used anywhere and is removed. This patch also fixes a memory leak where the 'pages' structure was being allocated during gem buffer allocation but not being freed during deallocate. Signed-off-by: Prathyush K <prathyush.k@samsung.com> Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
This commit is contained in:
Родитель
dd265850f1
Коммит
465ed66060
|
@ -34,8 +34,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
|
|||
unsigned int flags, struct exynos_drm_gem_buf *buf)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int npages, i = 0;
|
||||
struct scatterlist *sgl;
|
||||
enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
|
||||
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
@ -73,22 +71,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
|
|||
goto err_free_sgt;
|
||||
}
|
||||
|
||||
npages = buf->sgt->nents;
|
||||
|
||||
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
|
||||
if (!buf->pages) {
|
||||
DRM_ERROR("failed to allocate pages.\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_table;
|
||||
}
|
||||
|
||||
sgl = buf->sgt->sgl;
|
||||
while (i < npages) {
|
||||
buf->pages[i] = sg_page(sgl);
|
||||
sgl = sg_next(sgl);
|
||||
i++;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
|
||||
(unsigned long)buf->kvaddr,
|
||||
(unsigned long)buf->dma_addr,
|
||||
|
@ -96,8 +78,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
|
|||
|
||||
return ret;
|
||||
|
||||
err_free_table:
|
||||
sg_free_table(buf->sgt);
|
||||
err_free_sgt:
|
||||
kfree(buf->sgt);
|
||||
buf->sgt = NULL;
|
||||
|
|
|
@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
|
|||
void exynos_drm_fini_buf(struct drm_device *dev,
|
||||
struct exynos_drm_gem_buf *buffer);
|
||||
|
||||
/* allocate physical memory region and setup sgt and pages. */
|
||||
/* allocate physical memory region and setup sgt. */
|
||||
int exynos_drm_alloc_buf(struct drm_device *dev,
|
||||
struct exynos_drm_gem_buf *buf,
|
||||
unsigned int flags);
|
||||
|
||||
/* release physical memory region, sgt and pages. */
|
||||
/* release physical memory region, and sgt. */
|
||||
void exynos_drm_free_buf(struct drm_device *dev,
|
||||
unsigned int flags,
|
||||
struct exynos_drm_gem_buf *buffer);
|
||||
|
|
|
@ -87,8 +87,7 @@ static struct sg_table *
|
|||
goto err_unlock;
|
||||
}
|
||||
|
||||
DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n",
|
||||
buf->size, buf->page_size);
|
||||
DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -99,33 +99,22 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
|
|||
unsigned long pfn;
|
||||
int i;
|
||||
|
||||
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
|
||||
if (!buf->sgt)
|
||||
return -EINTR;
|
||||
|
||||
if (page_offset >= (buf->size >> PAGE_SHIFT)) {
|
||||
DRM_ERROR("invalid page offset\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sgl = buf->sgt->sgl;
|
||||
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
|
||||
if (!sgl) {
|
||||
DRM_ERROR("invalid SG table\n");
|
||||
return -EINTR;
|
||||
}
|
||||
if (page_offset < (sgl->length >> PAGE_SHIFT))
|
||||
break;
|
||||
page_offset -= (sgl->length >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
if (i >= buf->sgt->nents) {
|
||||
DRM_ERROR("invalid page offset\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
|
||||
} else {
|
||||
if (!buf->pages)
|
||||
return -EINTR;
|
||||
|
||||
pfn = page_to_pfn(buf->pages[0]) + page_offset;
|
||||
}
|
||||
|
||||
return vm_insert_mixed(vma, f_vaddr, pfn);
|
||||
}
|
||||
|
|
|
@ -41,8 +41,6 @@
|
|||
* device address with IOMMU.
|
||||
* @write: whether pages will be written to by the caller.
|
||||
* @sgt: sg table to transfer page data.
|
||||
* @pages: contain all pages to allocated memory region.
|
||||
* @page_size: could be 4K, 64K or 1MB.
|
||||
* @size: size of allocated memory region.
|
||||
* @pfnmap: indicate whether memory region from userptr is mmaped with
|
||||
* VM_PFNMAP or not.
|
||||
|
@ -54,8 +52,6 @@ struct exynos_drm_gem_buf {
|
|||
struct dma_attrs dma_attrs;
|
||||
unsigned int write;
|
||||
struct sg_table *sgt;
|
||||
struct page **pages;
|
||||
unsigned long page_size;
|
||||
unsigned long size;
|
||||
bool pfnmap;
|
||||
};
|
||||
|
|
Загрузка…
Ссылка в новой задаче