dma-iommu: remove __iommu_dma_mmap
The function has a single caller, so open code it there and take advantage of the precalculated page count variable. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20201209112019.2625029-1-hch@lst.de Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Родитель
fefe8527a1
Коммит
71fe89ceb5
|
@ -752,21 +752,6 @@ out_free_pages:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __iommu_dma_mmap - Map a buffer into provided user VMA
|
||||
* @pages: Array representing buffer from __iommu_dma_alloc()
|
||||
* @size: Size of buffer in bytes
|
||||
* @vma: VMA describing requested userspace mapping
|
||||
*
|
||||
* Maps the pages of the buffer in @pages into @vma. The caller is responsible
|
||||
* for verifying the correct size and protection of @vma beforehand.
|
||||
*/
|
||||
static int __iommu_dma_mmap(struct page **pages, size_t size,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static void iommu_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -1287,7 +1272,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
struct page **pages = dma_common_find_pages(cpu_addr);
|
||||
|
||||
if (pages)
|
||||
return __iommu_dma_mmap(pages, size, vma);
|
||||
return vm_map_pages(vma, pages, nr_pages);
|
||||
pfn = vmalloc_to_pfn(cpu_addr);
|
||||
} else {
|
||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
|
|
Загрузка…
Ссылка в новой задаче