IB/{core,hw,umem}: set FOLL_PIN via pin_user_pages*(), fix up ODP
Convert infiniband to use the new pin_user_pages*() calls. Also, revert earlier changes to Infiniband ODP that had it using put_user_page(). ODP is "Case 3" in Documentation/core-api/pin_user_pages.rst, which is to say, normal get_user_pages() and put_page() is the API to use there. The new pin_user_pages*() calls replace corresponding get_user_pages*() calls, and set the FOLL_PIN flag. The FOLL_PIN flag requires that the caller must return the pages via put_user_page*() calls, but infiniband was already doing that as part of an earlier commit. Link: http://lkml.kernel.org/r/20200107224558.2362728-14-jhubbard@nvidia.com Signed-off-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Björn Töpel <bjorn.topel@intel.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Leon Romanovsky <leonro@mellanox.com> Cc: Mauro Carvalho Chehab <mchehab@kernel.org> Cc: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
57459435cf
Коммит
dfa0a4fff1
|
@ -257,7 +257,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
|
|||
sg = umem->sg_head.sgl;
|
||||
|
||||
while (npages) {
|
||||
ret = get_user_pages_fast(cur_base,
|
||||
ret = pin_user_pages_fast(cur_base,
|
||||
min_t(unsigned long, npages,
|
||||
PAGE_SIZE /
|
||||
sizeof(struct page *)),
|
||||
|
|
|
@ -293,9 +293,8 @@ EXPORT_SYMBOL(ib_umem_odp_release);
|
|||
* The function returns -EFAULT if the DMA mapping operation fails. It returns
|
||||
* -EAGAIN if a concurrent invalidation prevents us from updating the page.
|
||||
*
|
||||
* The page is released via put_user_page even if the operation failed. For
|
||||
* on-demand pinning, the page is released whenever it isn't stored in the
|
||||
* umem.
|
||||
* The page is released via put_page even if the operation failed. For on-demand
|
||||
* pinning, the page is released whenever it isn't stored in the umem.
|
||||
*/
|
||||
static int ib_umem_odp_map_dma_single_page(
|
||||
struct ib_umem_odp *umem_odp,
|
||||
|
@ -348,7 +347,7 @@ static int ib_umem_odp_map_dma_single_page(
|
|||
}
|
||||
|
||||
out:
|
||||
put_user_page(page);
|
||||
put_page(page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -458,7 +457,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
|
|||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
put_user_page(local_page_list[j]);
|
||||
put_page(local_page_list[j]);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -485,8 +484,8 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
|
|||
* ib_umem_odp_map_dma_single_page().
|
||||
*/
|
||||
if (npages - (j + 1) > 0)
|
||||
put_user_pages(&local_page_list[j+1],
|
||||
npages - (j + 1));
|
||||
release_pages(&local_page_list[j+1],
|
||||
npages - (j + 1));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
|
|||
int ret;
|
||||
unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
|
||||
|
||||
ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
|
||||
ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
|
||||
ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
|
||||
FOLL_WRITE | FOLL_LONGTERM, pages);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
|
|
@ -108,7 +108,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
|
|||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
for (got = 0; got < num_pages; got += ret) {
|
||||
ret = get_user_pages(start_page + got * PAGE_SIZE,
|
||||
ret = pin_user_pages(start_page + got * PAGE_SIZE,
|
||||
num_pages - got,
|
||||
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
|
||||
p + got, NULL);
|
||||
|
|
|
@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
|||
else
|
||||
j = npages;
|
||||
|
||||
ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
|
||||
ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
|
||||
if (ret != j) {
|
||||
i = 0;
|
||||
j = ret;
|
||||
|
|
|
@ -141,7 +141,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
|||
ret = 0;
|
||||
|
||||
while (npages) {
|
||||
ret = get_user_pages(cur_base,
|
||||
ret = pin_user_pages(cur_base,
|
||||
min_t(unsigned long, npages,
|
||||
PAGE_SIZE / sizeof(struct page *)),
|
||||
gup_flags | FOLL_LONGTERM,
|
||||
|
|
|
@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
|
|||
while (nents) {
|
||||
struct page **plist = &umem->page_chunk[i].plist[got];
|
||||
|
||||
rv = get_user_pages(first_page_va, nents,
|
||||
rv = pin_user_pages(first_page_va, nents,
|
||||
foll_flags | FOLL_LONGTERM,
|
||||
plist, NULL);
|
||||
if (rv < 0)
|
||||
|
|
Загрузка…
Ссылка в новой задаче