media updates for v4.3-rc1
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJV8WvjAAoJEAhfPr2O5OEV5wIP/AjmqOau99ms4FvOQ932sO57 kKDM4CYeTBkYY2Xz2eGStgxhcEj538JTf6SXdrceEEYJHb/GNCb2iBM1TnB4YciF rqhFv+n3R8h4Yn5KmhEhYzEfO7HUoyHPrOhcmTLzDoTO5wyrhAlPZxDWHohmfU84 uQ8WyGPYLxwm8hdZ+/NkB8PXsGbWN65EoKzN6tt2kA6HUP52UxE0Cw7Qu7Iu5zmO y/x03mMbjhCBFFE41EeM76J+xKBhuaS4cyf8g08DJy5Zpf6ic8bKFmVg1tAFOZRD mCETLrUlPYhglHqOoVS25bCI5kCw9xTAyjPZdQnwCTwgHl5gG3E4oJYKASrmZlps igMSmLJEpQilsLy1Ze+K+Ci8EILmZzwbi21X0sbjq74Jd+tJZ+C8ZuWHVmPEF9j7 iHtZNIRzkzufNBJZn3DsmlGBb/Xc/UqfZVnJAB9gu3Ktav6dmtEIHrGRPpL19iYH WtJWLt/Bpyb318K+fnxL8SzUqUxZJ4+8DrMtlgTqHmIRwVQ4CczyeWi0utQmBXEF CaNp00S2V9N1hn8OIc+gaf7LTYJn0LkHFsskoiUZ5aZQd9ai0ql0IT1xLe0r8lMi +ieB0Vp4wJtaodWIXOPeFugDqQXIb0Mh2M8J9FIJ116FLIai6btzO2iyVCtlR9Bg 1uPztCfJ/nusPPHnE26R =TEFw -----END PGP SIGNATURE----- Merge tag 'media/v4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media Pull media updates from Mauro Carvalho Chehab: "A series of patches that move part of the code used to allocate memory from the media subsystem to the mm subsystem" [ The mm parts have been acked by VM people, and the series was apparently in -mm for a while - Linus ] * tag 'media/v4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media: [media] drm/exynos: Convert g2d_userptr_get_dma_addr() to use get_vaddr_frames() [media] media: vb2: Remove unused functions [media] media: vb2: Convert vb2_dc_get_userptr() to use frame vector [media] media: vb2: Convert vb2_vmalloc_get_userptr() to use frame vector [media] media: vb2: Convert vb2_dma_sg_get_userptr() to use frame vector [media] vb2: Provide helpers for mapping virtual addresses [media] media: omap_vout: Convert omap_vout_uservirt_to_phys() to use get_vaddr_pfns() [media] mm: Provide new get_vaddr_frames() helper [media] vb2: Push mmap_sem down to memops
This commit is contained in:
Коммит
06a660ada2
|
@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI
|
|||
config DRM_EXYNOS_G2D
|
||||
bool "Exynos DRM G2D"
|
||||
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
|
||||
select FRAME_VECTOR
|
||||
help
|
||||
Choose this option if you want to use Exynos G2D for DRM.
|
||||
|
||||
|
|
|
@ -194,10 +194,8 @@ struct g2d_cmdlist_userptr {
|
|||
dma_addr_t dma_addr;
|
||||
unsigned long userptr;
|
||||
unsigned long size;
|
||||
struct page **pages;
|
||||
unsigned int npages;
|
||||
struct frame_vector *vec;
|
||||
struct sg_table *sgt;
|
||||
struct vm_area_struct *vma;
|
||||
atomic_t refcount;
|
||||
bool in_pool;
|
||||
bool out_of_list;
|
||||
|
@ -367,6 +365,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
|
|||
{
|
||||
struct g2d_cmdlist_userptr *g2d_userptr =
|
||||
(struct g2d_cmdlist_userptr *)obj;
|
||||
struct page **pages;
|
||||
|
||||
if (!obj)
|
||||
return;
|
||||
|
@ -386,19 +385,21 @@ out:
|
|||
exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
|
||||
g2d_userptr->npages,
|
||||
g2d_userptr->vma);
|
||||
pages = frame_vector_pages(g2d_userptr->vec);
|
||||
if (!IS_ERR(pages)) {
|
||||
int i;
|
||||
|
||||
exynos_gem_put_vma(g2d_userptr->vma);
|
||||
for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
|
||||
set_page_dirty_lock(pages[i]);
|
||||
}
|
||||
put_vaddr_frames(g2d_userptr->vec);
|
||||
frame_vector_destroy(g2d_userptr->vec);
|
||||
|
||||
if (!g2d_userptr->out_of_list)
|
||||
list_del_init(&g2d_userptr->list);
|
||||
|
||||
sg_free_table(g2d_userptr->sgt);
|
||||
kfree(g2d_userptr->sgt);
|
||||
|
||||
drm_free_large(g2d_userptr->pages);
|
||||
kfree(g2d_userptr);
|
||||
}
|
||||
|
||||
|
@ -412,9 +413,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
|
|||
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
|
||||
struct g2d_cmdlist_userptr *g2d_userptr;
|
||||
struct g2d_data *g2d;
|
||||
struct page **pages;
|
||||
struct sg_table *sgt;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long start, end;
|
||||
unsigned int npages, offset;
|
||||
int ret;
|
||||
|
@ -460,65 +459,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
atomic_set(&g2d_userptr->refcount, 1);
|
||||
g2d_userptr->size = size;
|
||||
|
||||
start = userptr & PAGE_MASK;
|
||||
offset = userptr & ~PAGE_MASK;
|
||||
end = PAGE_ALIGN(userptr + size);
|
||||
npages = (end - start) >> PAGE_SHIFT;
|
||||
g2d_userptr->npages = npages;
|
||||
|
||||
pages = drm_calloc_large(npages, sizeof(struct page *));
|
||||
if (!pages) {
|
||||
DRM_ERROR("failed to allocate pages.\n");
|
||||
g2d_userptr->vec = frame_vector_create(npages);
|
||||
if (!g2d_userptr->vec) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vma = find_vma(current->mm, userptr);
|
||||
if (!vma) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
DRM_ERROR("failed to get vm region.\n");
|
||||
ret = -EFAULT;
|
||||
goto err_free_pages;
|
||||
}
|
||||
|
||||
if (vma->vm_end < userptr + size) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
DRM_ERROR("vma is too small.\n");
|
||||
ret = -EFAULT;
|
||||
goto err_free_pages;
|
||||
}
|
||||
|
||||
g2d_userptr->vma = exynos_gem_get_vma(vma);
|
||||
if (!g2d_userptr->vma) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
DRM_ERROR("failed to copy vma.\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_pages;
|
||||
}
|
||||
|
||||
g2d_userptr->size = size;
|
||||
|
||||
ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
|
||||
npages, pages, vma);
|
||||
if (ret < 0) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
|
||||
if (ret != npages) {
|
||||
DRM_ERROR("failed to get user pages from userptr.\n");
|
||||
goto err_put_vma;
|
||||
if (ret < 0)
|
||||
goto err_destroy_framevec;
|
||||
ret = -EFAULT;
|
||||
goto err_put_framevec;
|
||||
}
|
||||
if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
|
||||
ret = -EFAULT;
|
||||
goto err_put_framevec;
|
||||
}
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
g2d_userptr->pages = pages;
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_userptr;
|
||||
goto err_put_framevec;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
|
||||
size, GFP_KERNEL);
|
||||
ret = sg_alloc_table_from_pages(sgt,
|
||||
frame_vector_pages(g2d_userptr->vec),
|
||||
npages, offset, size, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to get sgt from pages.\n");
|
||||
goto err_free_sgt;
|
||||
|
@ -553,16 +527,11 @@ err_sg_free_table:
|
|||
err_free_sgt:
|
||||
kfree(sgt);
|
||||
|
||||
err_free_userptr:
|
||||
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
|
||||
g2d_userptr->npages,
|
||||
g2d_userptr->vma);
|
||||
err_put_framevec:
|
||||
put_vaddr_frames(g2d_userptr->vec);
|
||||
|
||||
err_put_vma:
|
||||
exynos_gem_put_vma(g2d_userptr->vma);
|
||||
|
||||
err_free_pages:
|
||||
drm_free_large(pages);
|
||||
err_destroy_framevec:
|
||||
frame_vector_destroy(g2d_userptr->vec);
|
||||
|
||||
err_free:
|
||||
kfree(g2d_userptr);
|
||||
|
|
|
@ -366,103 +366,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vm_area_struct *vma_copy;
|
||||
|
||||
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
|
||||
if (!vma_copy)
|
||||
return NULL;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->open)
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
get_file(vma->vm_file);
|
||||
|
||||
memcpy(vma_copy, vma, sizeof(*vma));
|
||||
|
||||
vma_copy->vm_mm = NULL;
|
||||
vma_copy->vm_next = NULL;
|
||||
vma_copy->vm_prev = NULL;
|
||||
|
||||
return vma_copy;
|
||||
}
|
||||
|
||||
void exynos_gem_put_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
if (!vma)
|
||||
return;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->close)
|
||||
vma->vm_ops->close(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
int exynos_gem_get_pages_from_userptr(unsigned long start,
|
||||
unsigned int npages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
int get_npages;
|
||||
|
||||
/* the memory region mmaped with VM_PFNMAP. */
|
||||
if (vma_is_io(vma)) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
int ret = follow_pfn(vma, start, &pfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pages[i] = pfn_to_page(pfn);
|
||||
}
|
||||
|
||||
if (i != npages) {
|
||||
DRM_ERROR("failed to get user_pages.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
get_npages = get_user_pages(current, current->mm, start,
|
||||
npages, 1, 1, pages, NULL);
|
||||
get_npages = max(get_npages, 0);
|
||||
if (get_npages != npages) {
|
||||
DRM_ERROR("failed to get user_pages.\n");
|
||||
while (get_npages)
|
||||
put_page(pages[--get_npages]);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void exynos_gem_put_pages_to_userptr(struct page **pages,
|
||||
unsigned int npages,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
if (!vma_is_io(vma)) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
set_page_dirty_lock(pages[i]);
|
||||
|
||||
/*
|
||||
* undo the reference we took when populating
|
||||
* the table.
|
||||
*/
|
||||
put_page(pages[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
|
|
|
@ -10,6 +10,7 @@ config VIDEO_OMAP2_VOUT
|
|||
select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
|
||||
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
|
||||
select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
|
||||
select FRAME_VECTOR
|
||||
default n
|
||||
---help---
|
||||
V4L2 Display driver support for OMAP2/3 based boards.
|
||||
|
|
|
@ -195,46 +195,34 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
|
|||
}
|
||||
|
||||
/*
|
||||
* omap_vout_uservirt_to_phys: This inline function is used to convert user
|
||||
* space virtual address to physical address.
|
||||
* omap_vout_get_userptr: Convert user space virtual address to physical
|
||||
* address.
|
||||
*/
|
||||
static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp)
|
||||
static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
|
||||
u32 *physp)
|
||||
{
|
||||
unsigned long physp = 0;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct frame_vector *vec;
|
||||
int ret;
|
||||
|
||||
/* For kernel direct-mapped memory, take the easy way */
|
||||
if (virtp >= PAGE_OFFSET)
|
||||
return virt_to_phys((void *) virtp);
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vma = find_vma(mm, virtp);
|
||||
if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
|
||||
/* this will catch, kernel-allocated, mmaped-to-usermode
|
||||
addresses */
|
||||
physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
} else {
|
||||
/* otherwise, use get_user_pages() for general userland pages */
|
||||
int res, nr_pages = 1;
|
||||
struct page *pages;
|
||||
|
||||
res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
|
||||
0, &pages, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
if (res == nr_pages) {
|
||||
physp = __pa(page_address(&pages[0]) +
|
||||
(virtp & ~PAGE_MASK));
|
||||
} else {
|
||||
printk(KERN_WARNING VOUT_NAME
|
||||
"get_user_pages failed\n");
|
||||
return 0;
|
||||
}
|
||||
if (virtp >= PAGE_OFFSET) {
|
||||
*physp = virt_to_phys((void *)virtp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return physp;
|
||||
vec = frame_vector_create(1);
|
||||
if (!vec)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_vaddr_frames(virtp, 1, true, false, vec);
|
||||
if (ret != 1) {
|
||||
frame_vector_destroy(vec);
|
||||
return -EINVAL;
|
||||
}
|
||||
*physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
|
||||
vb->priv = vec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -784,11 +772,15 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
|
|||
* address of the buffer
|
||||
*/
|
||||
if (V4L2_MEMORY_USERPTR == vb->memory) {
|
||||
int ret;
|
||||
|
||||
if (0 == vb->baddr)
|
||||
return -EINVAL;
|
||||
/* Physical address */
|
||||
vout->queued_buf_addr[vb->i] = (u8 *)
|
||||
omap_vout_uservirt_to_phys(vb->baddr);
|
||||
ret = omap_vout_get_userptr(vb, vb->baddr,
|
||||
(u32 *)&vout->queued_buf_addr[vb->i]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
unsigned long addr, dma_addr;
|
||||
unsigned long size;
|
||||
|
@ -834,12 +826,13 @@ static void omap_vout_buffer_queue(struct videobuf_queue *q,
|
|||
static void omap_vout_buffer_release(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *vb)
|
||||
{
|
||||
struct omap_vout_device *vout = q->priv_data;
|
||||
|
||||
vb->state = VIDEOBUF_NEEDS_INIT;
|
||||
if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
|
||||
struct frame_vector *vec = vb->priv;
|
||||
|
||||
if (V4L2_MEMORY_MMAP != vout->memory)
|
||||
return;
|
||||
put_vaddr_frames(vec);
|
||||
frame_vector_destroy(vec);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -84,6 +84,7 @@ config VIDEOBUF2_CORE
|
|||
|
||||
config VIDEOBUF2_MEMOPS
|
||||
tristate
|
||||
select FRAME_VECTOR
|
||||
|
||||
config VIDEOBUF2_DMA_CONTIG
|
||||
tristate
|
||||
|
|
|
@ -1691,9 +1691,7 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
|||
ret = __qbuf_mmap(vb, b);
|
||||
break;
|
||||
case V4L2_MEMORY_USERPTR:
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = __qbuf_userptr(vb, b);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
break;
|
||||
case V4L2_MEMORY_DMABUF:
|
||||
ret = __qbuf_dmabuf(vb, b);
|
||||
|
|
|
@ -32,15 +32,13 @@ struct vb2_dc_buf {
|
|||
dma_addr_t dma_addr;
|
||||
enum dma_data_direction dma_dir;
|
||||
struct sg_table *dma_sgt;
|
||||
struct frame_vector *vec;
|
||||
|
||||
/* MMAP related */
|
||||
struct vb2_vmarea_handler handler;
|
||||
atomic_t refcount;
|
||||
struct sg_table *sgt_base;
|
||||
|
||||
/* USERPTR related */
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/* DMABUF related */
|
||||
struct dma_buf_attachment *db_attach;
|
||||
};
|
||||
|
@ -49,24 +47,6 @@ struct vb2_dc_buf {
|
|||
/* scatterlist table functions */
|
||||
/*********************************************/
|
||||
|
||||
|
||||
static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
|
||||
void (*cb)(struct page *pg))
|
||||
{
|
||||
struct scatterlist *s;
|
||||
unsigned int i;
|
||||
|
||||
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
|
||||
struct page *page = sg_page(s);
|
||||
unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
|
||||
>> PAGE_SHIFT;
|
||||
unsigned int j;
|
||||
|
||||
for (j = 0; j < n_pages; ++j, ++page)
|
||||
cb(page);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
|
@ -429,92 +409,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
|
|||
/* callbacks for USERPTR buffers */
|
||||
/*********************************************/
|
||||
|
||||
static inline int vma_is_io(struct vm_area_struct *vma)
|
||||
{
|
||||
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
|
||||
}
|
||||
|
||||
static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
|
||||
struct vm_area_struct *vma, unsigned long *res)
|
||||
{
|
||||
unsigned long pfn, start_pfn, prev_pfn;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (!vma_is_io(vma))
|
||||
return -EFAULT;
|
||||
|
||||
ret = follow_pfn(vma, start, &pfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
start_pfn = pfn;
|
||||
start += PAGE_SIZE;
|
||||
|
||||
for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
|
||||
prev_pfn = pfn;
|
||||
ret = follow_pfn(vma, start, &pfn);
|
||||
|
||||
if (ret) {
|
||||
pr_err("no page for address %lu\n", start);
|
||||
return ret;
|
||||
}
|
||||
if (pfn != prev_pfn + 1)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*res = start_pfn;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
|
||||
int n_pages, struct vm_area_struct *vma,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
if (vma_is_io(vma)) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
int ret = follow_pfn(vma, start, &pfn);
|
||||
|
||||
if (!pfn_valid(pfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (ret) {
|
||||
pr_err("no page for address %lu\n", start);
|
||||
return ret;
|
||||
}
|
||||
pages[i] = pfn_to_page(pfn);
|
||||
}
|
||||
} else {
|
||||
int n;
|
||||
|
||||
n = get_user_pages(current, current->mm, start & PAGE_MASK,
|
||||
n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
|
||||
/* negative error means that no page was pinned */
|
||||
n = max(n, 0);
|
||||
if (n != n_pages) {
|
||||
pr_err("got only %d of %d user pages\n", n, n_pages);
|
||||
while (n)
|
||||
put_page(pages[--n]);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dc_put_dirty_page(struct page *page)
|
||||
{
|
||||
set_page_dirty_lock(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
static void vb2_dc_put_userptr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
int i;
|
||||
struct page **pages;
|
||||
|
||||
if (sgt) {
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
|
@ -526,13 +426,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
|
|||
*/
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, &attrs);
|
||||
if (!vma_is_io(buf->vma))
|
||||
vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
|
||||
|
||||
pages = frame_vector_pages(buf->vec);
|
||||
/* sgt should exist only if vector contains pages... */
|
||||
BUG_ON(IS_ERR(pages));
|
||||
for (i = 0; i < frame_vector_count(buf->vec); i++)
|
||||
set_page_dirty_lock(pages[i]);
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
}
|
||||
vb2_put_vma(buf->vma);
|
||||
vb2_destroy_framevec(buf->vec);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
|
@ -572,13 +474,10 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|||
{
|
||||
struct vb2_dc_conf *conf = alloc_ctx;
|
||||
struct vb2_dc_buf *buf;
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
struct frame_vector *vec;
|
||||
unsigned long offset;
|
||||
struct page **pages;
|
||||
int n_pages;
|
||||
int n_pages, i;
|
||||
int ret = 0;
|
||||
struct vm_area_struct *vma;
|
||||
struct sg_table *sgt;
|
||||
unsigned long contig_size;
|
||||
unsigned long dma_align = dma_get_cache_alignment();
|
||||
|
@ -604,72 +503,43 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|||
buf->dev = conf->dev;
|
||||
buf->dma_dir = dma_dir;
|
||||
|
||||
start = vaddr & PAGE_MASK;
|
||||
offset = vaddr & ~PAGE_MASK;
|
||||
end = PAGE_ALIGN(vaddr + size);
|
||||
n_pages = (end - start) >> PAGE_SHIFT;
|
||||
|
||||
pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("failed to allocate pages table\n");
|
||||
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
|
||||
if (IS_ERR(vec)) {
|
||||
ret = PTR_ERR(vec);
|
||||
goto fail_buf;
|
||||
}
|
||||
buf->vec = vec;
|
||||
n_pages = frame_vector_count(vec);
|
||||
ret = frame_vector_to_pages(vec);
|
||||
if (ret < 0) {
|
||||
unsigned long *nums = frame_vector_pfns(vec);
|
||||
|
||||
/* current->mm->mmap_sem is taken by videobuf2 core */
|
||||
vma = find_vma(current->mm, vaddr);
|
||||
if (!vma) {
|
||||
pr_err("no vma for address %lu\n", vaddr);
|
||||
ret = -EFAULT;
|
||||
goto fail_pages;
|
||||
}
|
||||
|
||||
if (vma->vm_end < vaddr + size) {
|
||||
pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
|
||||
ret = -EFAULT;
|
||||
goto fail_pages;
|
||||
}
|
||||
|
||||
buf->vma = vb2_get_vma(vma);
|
||||
if (!buf->vma) {
|
||||
pr_err("failed to copy vma\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_pages;
|
||||
}
|
||||
|
||||
/* extract page list from userspace mapping */
|
||||
ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
|
||||
if (ret) {
|
||||
unsigned long pfn;
|
||||
if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
|
||||
buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
|
||||
buf->size = size;
|
||||
kfree(pages);
|
||||
return buf;
|
||||
}
|
||||
|
||||
pr_err("failed to get user pages\n");
|
||||
goto fail_vma;
|
||||
/*
|
||||
* Failed to convert to pages... Check the memory is physically
|
||||
* contiguous and use direct mapping
|
||||
*/
|
||||
for (i = 1; i < n_pages; i++)
|
||||
if (nums[i-1] + 1 != nums[i])
|
||||
goto fail_pfnvec;
|
||||
buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
pr_err("failed to allocate sg table\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_get_user_pages;
|
||||
goto fail_pfnvec;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
|
||||
ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
|
||||
offset, size, GFP_KERNEL);
|
||||
if (ret) {
|
||||
pr_err("failed to initialize sg table\n");
|
||||
goto fail_sgt;
|
||||
}
|
||||
|
||||
/* pages are no longer needed */
|
||||
kfree(pages);
|
||||
pages = NULL;
|
||||
|
||||
/*
|
||||
* No need to sync to the device, this will happen later when the
|
||||
* prepare() memop is called.
|
||||
|
@ -691,8 +561,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|||
}
|
||||
|
||||
buf->dma_addr = sg_dma_address(sgt->sgl);
|
||||
buf->size = size;
|
||||
buf->dma_sgt = sgt;
|
||||
out:
|
||||
buf->size = size;
|
||||
|
||||
return buf;
|
||||
|
||||
|
@ -701,23 +572,13 @@ fail_map_sg:
|
|||
buf->dma_dir, &attrs);
|
||||
|
||||
fail_sgt_init:
|
||||
if (!vma_is_io(buf->vma))
|
||||
vb2_dc_sgt_foreach_page(sgt, put_page);
|
||||
sg_free_table(sgt);
|
||||
|
||||
fail_sgt:
|
||||
kfree(sgt);
|
||||
|
||||
fail_get_user_pages:
|
||||
if (pages && !vma_is_io(buf->vma))
|
||||
while (n_pages)
|
||||
put_page(pages[--n_pages]);
|
||||
|
||||
fail_vma:
|
||||
vb2_put_vma(buf->vma);
|
||||
|
||||
fail_pages:
|
||||
kfree(pages); /* kfree is NULL-proof */
|
||||
fail_pfnvec:
|
||||
vb2_destroy_framevec(vec);
|
||||
|
||||
fail_buf:
|
||||
kfree(buf);
|
||||
|
|
|
@ -38,6 +38,7 @@ struct vb2_dma_sg_buf {
|
|||
struct device *dev;
|
||||
void *vaddr;
|
||||
struct page **pages;
|
||||
struct frame_vector *vec;
|
||||
int offset;
|
||||
enum dma_data_direction dma_dir;
|
||||
struct sg_table sg_table;
|
||||
|
@ -51,7 +52,6 @@ struct vb2_dma_sg_buf {
|
|||
unsigned int num_pages;
|
||||
atomic_t refcount;
|
||||
struct vb2_vmarea_handler handler;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
struct dma_buf_attachment *db_attach;
|
||||
};
|
||||
|
@ -225,25 +225,17 @@ static void vb2_dma_sg_finish(void *buf_priv)
|
|||
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
|
||||
}
|
||||
|
||||
static inline int vma_is_io(struct vm_area_struct *vma)
|
||||
{
|
||||
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
||||
unsigned long size,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_dma_sg_conf *conf = alloc_ctx;
|
||||
struct vb2_dma_sg_buf *buf;
|
||||
unsigned long first, last;
|
||||
int num_pages_from_user;
|
||||
struct vm_area_struct *vma;
|
||||
struct sg_table *sgt;
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
struct frame_vector *vec;
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
@ -254,61 +246,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|||
buf->offset = vaddr & ~PAGE_MASK;
|
||||
buf->size = size;
|
||||
buf->dma_sgt = &buf->sg_table;
|
||||
vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
|
||||
if (IS_ERR(vec))
|
||||
goto userptr_fail_pfnvec;
|
||||
buf->vec = vec;
|
||||
|
||||
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
|
||||
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
|
||||
buf->num_pages = last - first + 1;
|
||||
|
||||
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!buf->pages)
|
||||
goto userptr_fail_alloc_pages;
|
||||
|
||||
vma = find_vma(current->mm, vaddr);
|
||||
if (!vma) {
|
||||
dprintk(1, "no vma for address %lu\n", vaddr);
|
||||
goto userptr_fail_find_vma;
|
||||
}
|
||||
|
||||
if (vma->vm_end < vaddr + size) {
|
||||
dprintk(1, "vma at %lu is too small for %lu bytes\n",
|
||||
vaddr, size);
|
||||
goto userptr_fail_find_vma;
|
||||
}
|
||||
|
||||
buf->vma = vb2_get_vma(vma);
|
||||
if (!buf->vma) {
|
||||
dprintk(1, "failed to copy vma\n");
|
||||
goto userptr_fail_find_vma;
|
||||
}
|
||||
|
||||
if (vma_is_io(buf->vma)) {
|
||||
for (num_pages_from_user = 0;
|
||||
num_pages_from_user < buf->num_pages;
|
||||
++num_pages_from_user, vaddr += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
|
||||
if (follow_pfn(vma, vaddr, &pfn)) {
|
||||
dprintk(1, "no page for address %lu\n", vaddr);
|
||||
break;
|
||||
}
|
||||
buf->pages[num_pages_from_user] = pfn_to_page(pfn);
|
||||
}
|
||||
} else
|
||||
num_pages_from_user = get_user_pages(current, current->mm,
|
||||
vaddr & PAGE_MASK,
|
||||
buf->num_pages,
|
||||
buf->dma_dir == DMA_FROM_DEVICE,
|
||||
1, /* force */
|
||||
buf->pages,
|
||||
NULL);
|
||||
|
||||
if (num_pages_from_user != buf->num_pages)
|
||||
goto userptr_fail_get_user_pages;
|
||||
buf->pages = frame_vector_pages(vec);
|
||||
if (IS_ERR(buf->pages))
|
||||
goto userptr_fail_sgtable;
|
||||
buf->num_pages = frame_vector_count(vec);
|
||||
|
||||
if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
|
||||
buf->num_pages, buf->offset, size, 0))
|
||||
goto userptr_fail_alloc_table_from_pages;
|
||||
goto userptr_fail_sgtable;
|
||||
|
||||
sgt = &buf->sg_table;
|
||||
/*
|
||||
|
@ -324,17 +274,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|||
|
||||
userptr_fail_map:
|
||||
sg_free_table(&buf->sg_table);
|
||||
userptr_fail_alloc_table_from_pages:
|
||||
userptr_fail_get_user_pages:
|
||||
dprintk(1, "get_user_pages requested/got: %d/%d]\n",
|
||||
buf->num_pages, num_pages_from_user);
|
||||
if (!vma_is_io(buf->vma))
|
||||
while (--num_pages_from_user >= 0)
|
||||
put_page(buf->pages[num_pages_from_user]);
|
||||
vb2_put_vma(buf->vma);
|
||||
userptr_fail_find_vma:
|
||||
kfree(buf->pages);
|
||||
userptr_fail_alloc_pages:
|
||||
userptr_fail_sgtable:
|
||||
vb2_destroy_framevec(vec);
|
||||
userptr_fail_pfnvec:
|
||||
kfree(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -362,11 +304,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
|
|||
while (--i >= 0) {
|
||||
if (buf->dma_dir == DMA_FROM_DEVICE)
|
||||
set_page_dirty_lock(buf->pages[i]);
|
||||
if (!vma_is_io(buf->vma))
|
||||
put_page(buf->pages[i]);
|
||||
}
|
||||
kfree(buf->pages);
|
||||
vb2_put_vma(buf->vma);
|
||||
vb2_destroy_framevec(buf->vec);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,118 +23,62 @@
|
|||
#include <media/videobuf2-memops.h>
|
||||
|
||||
/**
|
||||
* vb2_get_vma() - acquire and lock the virtual memory area
|
||||
* @vma: given virtual memory area
|
||||
* vb2_create_framevec() - map virtual addresses to pfns
|
||||
* @start: Virtual user address where we start mapping
|
||||
* @length: Length of a range to map
|
||||
* @write: Should we map for writing into the area
|
||||
*
|
||||
* This function attempts to acquire an area mapped in the userspace for
|
||||
* the duration of a hardware operation. The area is "locked" by performing
|
||||
* the same set of operation that are done when process calls fork() and
|
||||
* memory areas are duplicated.
|
||||
*
|
||||
* Returns a copy of a virtual memory region on success or NULL.
|
||||
* This function allocates and fills in a vector with pfns corresponding to
|
||||
* virtual address range passed in arguments. If pfns have corresponding pages,
|
||||
* page references are also grabbed to pin pages in memory. The function
|
||||
* returns pointer to the vector on success and error pointer in case of
|
||||
* failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
|
||||
*/
|
||||
struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
|
||||
struct frame_vector *vb2_create_framevec(unsigned long start,
|
||||
unsigned long length,
|
||||
bool write)
|
||||
{
|
||||
struct vm_area_struct *vma_copy;
|
||||
int ret;
|
||||
unsigned long first, last;
|
||||
unsigned long nr;
|
||||
struct frame_vector *vec;
|
||||
|
||||
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
|
||||
if (vma_copy == NULL)
|
||||
return NULL;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->open)
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
get_file(vma->vm_file);
|
||||
|
||||
memcpy(vma_copy, vma, sizeof(*vma));
|
||||
|
||||
vma_copy->vm_mm = NULL;
|
||||
vma_copy->vm_next = NULL;
|
||||
vma_copy->vm_prev = NULL;
|
||||
|
||||
return vma_copy;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_get_vma);
|
||||
|
||||
/**
|
||||
* vb2_put_userptr() - release a userspace virtual memory area
|
||||
* @vma: virtual memory region associated with the area to be released
|
||||
*
|
||||
* This function releases the previously acquired memory area after a hardware
|
||||
* operation.
|
||||
*/
|
||||
void vb2_put_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
if (!vma)
|
||||
return;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->close)
|
||||
vma->vm_ops->close(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
|
||||
kfree(vma);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_put_vma);
|
||||
|
||||
/**
|
||||
* vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
|
||||
* @vaddr: starting virtual address of the area to be verified
|
||||
* @size: size of the area
|
||||
* @res_paddr: will return physical address for the given vaddr
|
||||
* @res_vma: will return locked copy of struct vm_area for the given area
|
||||
*
|
||||
* This function will go through memory area of size @size mapped at @vaddr and
|
||||
* verify that the underlying physical pages are contiguous. If they are
|
||||
* contiguous the virtual memory area is locked and a @res_vma is filled with
|
||||
* the copy and @res_pa set to the physical address of the buffer.
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
|
||||
struct vm_area_struct **res_vma, dma_addr_t *res_pa)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long offset, start, end;
|
||||
unsigned long this_pfn, prev_pfn;
|
||||
dma_addr_t pa = 0;
|
||||
|
||||
start = vaddr;
|
||||
offset = start & ~PAGE_MASK;
|
||||
end = start + size;
|
||||
|
||||
vma = find_vma(mm, start);
|
||||
|
||||
if (vma == NULL || vma->vm_end < end)
|
||||
return -EFAULT;
|
||||
|
||||
for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
|
||||
int ret = follow_pfn(vma, start, &this_pfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (prev_pfn == 0)
|
||||
pa = this_pfn << PAGE_SHIFT;
|
||||
else if (this_pfn != prev_pfn + 1)
|
||||
return -EFAULT;
|
||||
|
||||
prev_pfn = this_pfn;
|
||||
first = start >> PAGE_SHIFT;
|
||||
last = (start + length - 1) >> PAGE_SHIFT;
|
||||
nr = last - first + 1;
|
||||
vec = frame_vector_create(nr);
|
||||
if (!vec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = get_vaddr_frames(start, nr, write, 1, vec);
|
||||
if (ret < 0)
|
||||
goto out_destroy;
|
||||
/* We accept only complete set of PFNs */
|
||||
if (ret != nr) {
|
||||
ret = -EFAULT;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory is contiguous, lock vma and return to the caller
|
||||
*/
|
||||
*res_vma = vb2_get_vma(vma);
|
||||
if (*res_vma == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
*res_pa = pa + offset;
|
||||
return 0;
|
||||
return vec;
|
||||
out_release:
|
||||
put_vaddr_frames(vec);
|
||||
out_destroy:
|
||||
frame_vector_destroy(vec);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
|
||||
EXPORT_SYMBOL(vb2_create_framevec);
|
||||
|
||||
/**
|
||||
* vb2_destroy_framevec() - release vector of mapped pfns
|
||||
* @vec: vector of pfns / pages to release
|
||||
*
|
||||
* This releases references to all pages in the vector @vec (if corresponding
|
||||
* pfns are backed by pages) and frees the passed vector.
|
||||
*/
|
||||
void vb2_destroy_framevec(struct frame_vector *vec)
|
||||
{
|
||||
put_vaddr_frames(vec);
|
||||
frame_vector_destroy(vec);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_destroy_framevec);
|
||||
|
||||
/**
|
||||
* vb2_common_vm_open() - increase refcount of the vma
|
||||
|
|
|
@ -23,11 +23,9 @@
|
|||
|
||||
struct vb2_vmalloc_buf {
|
||||
void *vaddr;
|
||||
struct page **pages;
|
||||
struct vm_area_struct *vma;
|
||||
struct frame_vector *vec;
|
||||
enum dma_data_direction dma_dir;
|
||||
unsigned long size;
|
||||
unsigned int n_pages;
|
||||
atomic_t refcount;
|
||||
struct vb2_vmarea_handler handler;
|
||||
struct dma_buf *dbuf;
|
||||
|
@ -76,10 +74,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf;
|
||||
unsigned long first, last;
|
||||
int n_pages, offset;
|
||||
struct vm_area_struct *vma;
|
||||
dma_addr_t physp;
|
||||
struct frame_vector *vec;
|
||||
int n_pages, offset, i;
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
|
@ -88,51 +84,36 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|||
buf->dma_dir = dma_dir;
|
||||
offset = vaddr & ~PAGE_MASK;
|
||||
buf->size = size;
|
||||
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
|
||||
if (IS_ERR(vec))
|
||||
goto fail_pfnvec_create;
|
||||
buf->vec = vec;
|
||||
n_pages = frame_vector_count(vec);
|
||||
if (frame_vector_to_pages(vec) < 0) {
|
||||
unsigned long *nums = frame_vector_pfns(vec);
|
||||
|
||||
|
||||
vma = find_vma(current->mm, vaddr);
|
||||
if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
|
||||
if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
|
||||
goto fail_pages_array_alloc;
|
||||
buf->vma = vma;
|
||||
buf->vaddr = (__force void *)ioremap_nocache(physp, size);
|
||||
if (!buf->vaddr)
|
||||
goto fail_pages_array_alloc;
|
||||
/*
|
||||
* We cannot get page pointers for these pfns. Check memory is
|
||||
* physically contiguous and use direct mapping.
|
||||
*/
|
||||
for (i = 1; i < n_pages; i++)
|
||||
if (nums[i-1] + 1 != nums[i])
|
||||
goto fail_map;
|
||||
buf->vaddr = (__force void *)
|
||||
ioremap_nocache(nums[0] << PAGE_SHIFT, size);
|
||||
} else {
|
||||
first = vaddr >> PAGE_SHIFT;
|
||||
last = (vaddr + size - 1) >> PAGE_SHIFT;
|
||||
buf->n_pages = last - first + 1;
|
||||
buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!buf->pages)
|
||||
goto fail_pages_array_alloc;
|
||||
|
||||
/* current->mm->mmap_sem is taken by videobuf2 core */
|
||||
n_pages = get_user_pages(current, current->mm,
|
||||
vaddr & PAGE_MASK, buf->n_pages,
|
||||
dma_dir == DMA_FROM_DEVICE,
|
||||
1, /* force */
|
||||
buf->pages, NULL);
|
||||
if (n_pages != buf->n_pages)
|
||||
goto fail_get_user_pages;
|
||||
|
||||
buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
|
||||
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
|
||||
PAGE_KERNEL);
|
||||
if (!buf->vaddr)
|
||||
goto fail_get_user_pages;
|
||||
}
|
||||
|
||||
if (!buf->vaddr)
|
||||
goto fail_map;
|
||||
buf->vaddr += offset;
|
||||
return buf;
|
||||
|
||||
fail_get_user_pages:
|
||||
pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
|
||||
buf->n_pages);
|
||||
while (--n_pages >= 0)
|
||||
put_page(buf->pages[n_pages]);
|
||||
kfree(buf->pages);
|
||||
|
||||
fail_pages_array_alloc:
|
||||
fail_map:
|
||||
vb2_destroy_framevec(vec);
|
||||
fail_pfnvec_create:
|
||||
kfree(buf);
|
||||
|
||||
return NULL;
|
||||
|
@ -143,20 +124,21 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
|
|||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
|
||||
unsigned int i;
|
||||
struct page **pages;
|
||||
unsigned int n_pages;
|
||||
|
||||
if (buf->pages) {
|
||||
if (!buf->vec->is_pfns) {
|
||||
n_pages = frame_vector_count(buf->vec);
|
||||
pages = frame_vector_pages(buf->vec);
|
||||
if (vaddr)
|
||||
vm_unmap_ram((void *)vaddr, buf->n_pages);
|
||||
for (i = 0; i < buf->n_pages; ++i) {
|
||||
if (buf->dma_dir == DMA_FROM_DEVICE)
|
||||
set_page_dirty_lock(buf->pages[i]);
|
||||
put_page(buf->pages[i]);
|
||||
}
|
||||
kfree(buf->pages);
|
||||
vm_unmap_ram((void *)vaddr, n_pages);
|
||||
if (buf->dma_dir == DMA_FROM_DEVICE)
|
||||
for (i = 0; i < n_pages; i++)
|
||||
set_page_dirty_lock(pages[i]);
|
||||
} else {
|
||||
vb2_put_vma(buf->vma);
|
||||
iounmap((__force void __iomem *)buf->vaddr);
|
||||
}
|
||||
vb2_destroy_framevec(buf->vec);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/shrinker.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/page_ext.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
struct mempolicy;
|
||||
struct anon_vma;
|
||||
|
@ -1214,6 +1215,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
|||
int write, int force, struct page **pages);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
|
||||
/* Container for pinned pfns / pages */
|
||||
struct frame_vector {
|
||||
unsigned int nr_allocated; /* Number of frames we have space for */
|
||||
unsigned int nr_frames; /* Number of frames stored in ptrs array */
|
||||
bool got_ref; /* Did we pin pages by getting page ref? */
|
||||
bool is_pfns; /* Does array contain pages or pfns? */
|
||||
void *ptrs[0]; /* Array of pinned pfns / pages. Use
|
||||
* pfns_vector_pages() or pfns_vector_pfns()
|
||||
* for access */
|
||||
};
|
||||
|
||||
struct frame_vector *frame_vector_create(unsigned int nr_frames);
|
||||
void frame_vector_destroy(struct frame_vector *vec);
|
||||
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
|
||||
bool write, bool force, struct frame_vector *vec);
|
||||
void put_vaddr_frames(struct frame_vector *vec);
|
||||
int frame_vector_to_pages(struct frame_vector *vec);
|
||||
void frame_vector_to_pfns(struct frame_vector *vec);
|
||||
|
||||
static inline unsigned int frame_vector_count(struct frame_vector *vec)
|
||||
{
|
||||
return vec->nr_frames;
|
||||
}
|
||||
|
||||
static inline struct page **frame_vector_pages(struct frame_vector *vec)
|
||||
{
|
||||
if (vec->is_pfns) {
|
||||
int err = frame_vector_to_pages(vec);
|
||||
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
return (struct page **)(vec->ptrs);
|
||||
}
|
||||
|
||||
static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
|
||||
{
|
||||
if (!vec->is_pfns)
|
||||
frame_vector_to_pfns(vec);
|
||||
return (unsigned long *)(vec->ptrs);
|
||||
}
|
||||
|
||||
struct kvec;
|
||||
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#define _MEDIA_VIDEOBUF2_MEMOPS_H
|
||||
|
||||
#include <media/videobuf2-core.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
/**
|
||||
* struct vb2_vmarea_handler - common vma refcount tracking handler
|
||||
|
@ -31,11 +32,9 @@ struct vb2_vmarea_handler {
|
|||
|
||||
extern const struct vm_operations_struct vb2_common_vm_ops;
|
||||
|
||||
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
|
||||
struct vm_area_struct **res_vma, dma_addr_t *res_pa);
|
||||
|
||||
struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma);
|
||||
void vb2_put_vma(struct vm_area_struct *vma);
|
||||
|
||||
struct frame_vector *vb2_create_framevec(unsigned long start,
|
||||
unsigned long length,
|
||||
bool write);
|
||||
void vb2_destroy_framevec(struct frame_vector *vec);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -677,3 +677,6 @@ config ZONE_DEVICE
|
|||
mapping in an O_DIRECT operation, among other things.
|
||||
|
||||
If FS_DAX is enabled, then say Y.
|
||||
|
||||
config FRAME_VECTOR
|
||||
bool
|
||||
|
|
|
@ -80,3 +80,4 @@ obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
|
|||
obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
|
||||
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
|
||||
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
|
||||
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* get_vaddr_frames() - map virtual addresses to pfns
|
||||
* @start: starting user address
|
||||
* @nr_frames: number of pages / pfns from start to map
|
||||
* @write: whether pages will be written to by the caller
|
||||
* @force: whether to force write access even if user mapping is
|
||||
* readonly. See description of the same argument of
|
||||
get_user_pages().
|
||||
* @vec: structure which receives pages / pfns of the addresses mapped.
|
||||
* It should have space for at least nr_frames entries.
|
||||
*
|
||||
* This function maps virtual addresses from @start and fills @vec structure
|
||||
* with page frame numbers or page pointers to corresponding pages (choice
|
||||
* depends on the type of the vma underlying the virtual address). If @start
|
||||
* belongs to a normal vma, the function grabs reference to each of the pages
|
||||
* to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
|
||||
* touch page structures and the caller must make sure pfns aren't reused for
|
||||
* anything else while he is using them.
|
||||
*
|
||||
* The function returns number of pages mapped which may be less than
|
||||
* @nr_frames. In particular we stop mapping if there are more vmas of
|
||||
* different type underlying the specified range of virtual addresses.
|
||||
* When the function isn't able to map a single page, it returns error.
|
||||
*
|
||||
* This function takes care of grabbing mmap_sem as necessary.
|
||||
*/
|
||||
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
|
||||
bool write, bool force, struct frame_vector *vec)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
int ret = 0;
|
||||
int err;
|
||||
int locked;
|
||||
|
||||
if (nr_frames == 0)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
|
||||
nr_frames = vec->nr_allocated;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
locked = 1;
|
||||
vma = find_vma_intersection(mm, start, start + 1);
|
||||
if (!vma) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
|
||||
vec->got_ref = true;
|
||||
vec->is_pfns = false;
|
||||
ret = get_user_pages_locked(current, mm, start, nr_frames,
|
||||
write, force, (struct page **)(vec->ptrs), &locked);
|
||||
goto out;
|
||||
}
|
||||
|
||||
vec->got_ref = false;
|
||||
vec->is_pfns = true;
|
||||
do {
|
||||
unsigned long *nums = frame_vector_pfns(vec);
|
||||
|
||||
while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
|
||||
err = follow_pfn(vma, start, &nums[ret]);
|
||||
if (err) {
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
goto out;
|
||||
}
|
||||
start += PAGE_SIZE;
|
||||
ret++;
|
||||
}
|
||||
/*
|
||||
* We stop if we have enough pages or if VMA doesn't completely
|
||||
* cover the tail page.
|
||||
*/
|
||||
if (ret >= nr_frames || start < vma->vm_end)
|
||||
break;
|
||||
vma = find_vma_intersection(mm, start, start + 1);
|
||||
} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
|
||||
out:
|
||||
if (locked)
|
||||
up_read(&mm->mmap_sem);
|
||||
if (!ret)
|
||||
ret = -EFAULT;
|
||||
if (ret > 0)
|
||||
vec->nr_frames = ret;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(get_vaddr_frames);
|
||||
|
||||
/**
|
||||
* put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired
|
||||
* them
|
||||
* @vec: frame vector to put
|
||||
*
|
||||
* Drop references to pages if get_vaddr_frames() acquired them. We also
|
||||
* invalidate the frame vector so that it is prepared for the next call into
|
||||
* get_vaddr_frames().
|
||||
*/
|
||||
void put_vaddr_frames(struct frame_vector *vec)
|
||||
{
|
||||
int i;
|
||||
struct page **pages;
|
||||
|
||||
if (!vec->got_ref)
|
||||
goto out;
|
||||
pages = frame_vector_pages(vec);
|
||||
/*
|
||||
* frame_vector_pages() might needed to do a conversion when
|
||||
* get_vaddr_frames() got pages but vec was later converted to pfns.
|
||||
* But it shouldn't really fail to convert pfns back...
|
||||
*/
|
||||
if (WARN_ON(IS_ERR(pages)))
|
||||
goto out;
|
||||
for (i = 0; i < vec->nr_frames; i++)
|
||||
put_page(pages[i]);
|
||||
vec->got_ref = false;
|
||||
out:
|
||||
vec->nr_frames = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(put_vaddr_frames);
|
||||
|
||||
/**
|
||||
* frame_vector_to_pages - convert frame vector to contain page pointers
|
||||
* @vec: frame vector to convert
|
||||
*
|
||||
* Convert @vec to contain array of page pointers. If the conversion is
|
||||
* successful, return 0. Otherwise return an error. Note that we do not grab
|
||||
* page references for the page structures.
|
||||
*/
|
||||
int frame_vector_to_pages(struct frame_vector *vec)
|
||||
{
|
||||
int i;
|
||||
unsigned long *nums;
|
||||
struct page **pages;
|
||||
|
||||
if (!vec->is_pfns)
|
||||
return 0;
|
||||
nums = frame_vector_pfns(vec);
|
||||
for (i = 0; i < vec->nr_frames; i++)
|
||||
if (!pfn_valid(nums[i]))
|
||||
return -EINVAL;
|
||||
pages = (struct page **)nums;
|
||||
for (i = 0; i < vec->nr_frames; i++)
|
||||
pages[i] = pfn_to_page(nums[i]);
|
||||
vec->is_pfns = false;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(frame_vector_to_pages);
|
||||
|
||||
/**
|
||||
* frame_vector_to_pfns - convert frame vector to contain pfns
|
||||
* @vec: frame vector to convert
|
||||
*
|
||||
* Convert @vec to contain array of pfns.
|
||||
*/
|
||||
void frame_vector_to_pfns(struct frame_vector *vec)
|
||||
{
|
||||
int i;
|
||||
unsigned long *nums;
|
||||
struct page **pages;
|
||||
|
||||
if (vec->is_pfns)
|
||||
return;
|
||||
pages = (struct page **)(vec->ptrs);
|
||||
nums = (unsigned long *)pages;
|
||||
for (i = 0; i < vec->nr_frames; i++)
|
||||
nums[i] = page_to_pfn(pages[i]);
|
||||
vec->is_pfns = true;
|
||||
}
|
||||
EXPORT_SYMBOL(frame_vector_to_pfns);
|
||||
|
||||
/**
|
||||
* frame_vector_create() - allocate & initialize structure for pinned pfns
|
||||
* @nr_frames: number of pfns slots we should reserve
|
||||
*
|
||||
* Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns
|
||||
* pfns.
|
||||
*/
|
||||
struct frame_vector *frame_vector_create(unsigned int nr_frames)
|
||||
{
|
||||
struct frame_vector *vec;
|
||||
int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
|
||||
|
||||
if (WARN_ON_ONCE(nr_frames == 0))
|
||||
return NULL;
|
||||
/*
|
||||
* This is absurdly high. It's here just to avoid strange effects when
|
||||
* arithmetics overflows.
|
||||
*/
|
||||
if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2))
|
||||
return NULL;
|
||||
/*
|
||||
* Avoid higher order allocations, use vmalloc instead. It should
|
||||
* be rare anyway.
|
||||
*/
|
||||
if (size <= PAGE_SIZE)
|
||||
vec = kmalloc(size, GFP_KERNEL);
|
||||
else
|
||||
vec = vmalloc(size);
|
||||
if (!vec)
|
||||
return NULL;
|
||||
vec->nr_allocated = nr_frames;
|
||||
vec->nr_frames = 0;
|
||||
return vec;
|
||||
}
|
||||
EXPORT_SYMBOL(frame_vector_create);
|
||||
|
||||
/**
|
||||
* frame_vector_destroy() - free memory allocated to carry frame vector
|
||||
* @vec: Frame vector to free
|
||||
*
|
||||
* Free structure allocated by frame_vector_create() to carry frames.
|
||||
*/
|
||||
void frame_vector_destroy(struct frame_vector *vec)
|
||||
{
|
||||
/* Make sure put_vaddr_frames() got called properly... */
|
||||
VM_BUG_ON(vec->nr_frames > 0);
|
||||
kvfree(vec);
|
||||
}
|
||||
EXPORT_SYMBOL(frame_vector_destroy);
|
Загрузка…
Ссылка в новой задаче