media: atomisp: use pin_user_pages() for memory allocation
Instead of using a hacked version of an old copy of get_user_pages(), use pin_user_pages(). Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
This commit is contained in:
Родитель
19ae08554f
Коммит
14a638ab96
|
@ -130,6 +130,8 @@ struct hmm_buffer_object {
|
|||
struct list_head list;
|
||||
struct kref kref;
|
||||
|
||||
struct page **pages;
|
||||
|
||||
/* mutex protecting this BO */
|
||||
struct mutex mutex;
|
||||
enum hmm_bo_type type;
|
||||
|
|
|
@ -854,109 +854,20 @@ static void free_private_pages(struct hmm_buffer_object *bo,
|
|||
kfree(bo->page_obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hacked from kernel function __get_user_pages in mm/memory.c
|
||||
*
|
||||
* Handle buffers allocated by other kernel space driver and mmaped into user
|
||||
* space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
|
||||
*
|
||||
* Get physical pages from user space virtual address and update into page list
|
||||
*/
|
||||
static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
static void free_user_pages(struct hmm_buffer_object *bo)
|
||||
{
|
||||
int i, ret;
|
||||
unsigned long vm_flags;
|
||||
int i;
|
||||
|
||||
if (nr_pages <= 0)
|
||||
return 0;
|
||||
hmm_mem_stat.usr_size -= bo->pgnr;
|
||||
|
||||
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
|
||||
|
||||
/*
|
||||
* Require read or write permissions.
|
||||
* If FOLL_FORCE is set, we only require the "MAY" flags.
|
||||
*/
|
||||
vm_flags = (gup_flags & FOLL_WRITE) ?
|
||||
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
|
||||
vm_flags &= (gup_flags & FOLL_FORCE) ?
|
||||
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
||||
i = 0;
|
||||
|
||||
do {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
vma = find_vma(mm, start);
|
||||
if (!vma) {
|
||||
dev_err(atomisp_dev, "find_vma failed\n");
|
||||
return i ? : -EFAULT;
|
||||
}
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
/*
|
||||
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
||||
&start, &nr_pages, i, gup_flags);
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
do {
|
||||
struct page *page;
|
||||
unsigned long pfn;
|
||||
|
||||
/*
|
||||
* If we have a pending SIGKILL, don't keep faulting
|
||||
* pages and potentially allocating memory.
|
||||
*/
|
||||
if (unlikely(fatal_signal_pending(current))) {
|
||||
dev_err(atomisp_dev,
|
||||
"fatal_signal_pending in %s\n",
|
||||
__func__);
|
||||
return i ? i : -ERESTARTSYS;
|
||||
}
|
||||
|
||||
ret = follow_pfn(vma, start, &pfn);
|
||||
if (ret) {
|
||||
dev_err(atomisp_dev, "follow_pfn() failed\n");
|
||||
return i ? : -EFAULT;
|
||||
}
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (IS_ERR(page))
|
||||
return i ? i : PTR_ERR(page);
|
||||
if (pages) {
|
||||
pages[i] = page;
|
||||
get_page(page);
|
||||
flush_anon_page(vma, page, start);
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
if (vmas)
|
||||
vmas[i] = vma;
|
||||
i++;
|
||||
start += PAGE_SIZE;
|
||||
nr_pages--;
|
||||
} while (nr_pages && start < vma->vm_end);
|
||||
} while (nr_pages);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int flags = FOLL_TOUCH;
|
||||
|
||||
if (pages)
|
||||
flags |= FOLL_GET;
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
if (force)
|
||||
flags |= FOLL_FORCE;
|
||||
|
||||
return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
|
||||
if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
|
||||
unpin_user_pages(bo->pages, bo->pgnr);
|
||||
} else {
|
||||
for (i = 0; i < bo->pgnr; i++)
|
||||
put_page(bo->pages[i]);
|
||||
}
|
||||
kfree(bo->pages);
|
||||
kfree(bo->page_obj);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1000,11 +911,12 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
|
|||
|
||||
userptr = untagged_addr(userptr);
|
||||
|
||||
bo->pages = pages;
|
||||
|
||||
if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
|
||||
page_nr = get_pfnmap_pages(current, current->mm,
|
||||
(unsigned long)userptr,
|
||||
(int)(bo->pgnr), 1, 0,
|
||||
pages, NULL);
|
||||
page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
|
||||
FOLL_LONGTERM | FOLL_WRITE,
|
||||
pages, NULL);
|
||||
bo->mem_type = HMM_BO_MEM_TYPE_PFN;
|
||||
} else {
|
||||
/*Handle frame buffer allocated in user space*/
|
||||
|
@ -1020,6 +932,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
|
|||
bo->pgnr,
|
||||
bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr);
|
||||
|
||||
hmm_mem_stat.usr_size += bo->pgnr;
|
||||
|
||||
/* can be written by caller, not forced */
|
||||
if (page_nr != bo->pgnr) {
|
||||
dev_err(atomisp_dev,
|
||||
|
@ -1032,31 +946,16 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
|
|||
bo->page_obj[i].page = pages[i];
|
||||
bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
|
||||
}
|
||||
hmm_mem_stat.usr_size += bo->pgnr;
|
||||
kfree(pages);
|
||||
|
||||
return 0;
|
||||
|
||||
out_of_mem:
|
||||
for (i = 0; i < page_nr; i++)
|
||||
put_page(pages[i]);
|
||||
kfree(pages);
|
||||
kfree(bo->page_obj);
|
||||
|
||||
free_user_pages(bo);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void free_user_pages(struct hmm_buffer_object *bo)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bo->pgnr; i++)
|
||||
put_page(bo->page_obj[i].page);
|
||||
hmm_mem_stat.usr_size -= bo->pgnr;
|
||||
|
||||
kfree(bo->page_obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate/free physical pages for the bo.
|
||||
*
|
||||
|
|
Загрузка…
Ссылка в новой задаче