common: dma-mapping: add support for generic dma_mmap_* calls
Commit 9adc5374
('common: dma-mapping: introduce mmap method') added a
generic method for implementing mmap user call to dma_map_ops structure.
This patch converts ARM and PowerPC architectures (the only providers of
dma_mmap_coherent/dma_mmap_writecombine calls) to use this generic
dma_map_ops based call and adds a generic cross architecture
definition for dma_mmap_attrs, dma_mmap_coherent, dma_mmap_writecombine
functions.
The generic mmap virt_to_page-based fallback implementation is provided for
architectures which don't provide their own implementation for mmap method.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
This commit is contained in:
Родитель
9fa8af91f0
Коммит
64ccc9c033
|
@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
struct dma_attrs *attrs);
|
struct dma_attrs *attrs);
|
||||||
|
|
||||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
|
|
||||||
|
|
||||||
static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t dma_addr,
|
|
||||||
size_t size, struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
BUG_ON(!ops);
|
|
||||||
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
|
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flag)
|
dma_addr_t *dma_handle, gfp_t flag)
|
||||||
{
|
{
|
||||||
|
@ -213,14 +202,6 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
|
||||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
|
||||||
{
|
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
|
||||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
|
||||||
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This can be called during boot to increase the size of the consistent
|
* This can be called during boot to increase the size of the consistent
|
||||||
* DMA region above it's default value of 2MB. It must be called before the
|
* DMA region above it's default value of 2MB. It must be called before the
|
||||||
|
|
|
@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||||
extern void dma_direct_free_coherent(struct device *dev, size_t size,
|
extern void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
void *vaddr, dma_addr_t dma_handle,
|
||||||
struct dma_attrs *attrs);
|
struct dma_attrs *attrs);
|
||||||
|
extern int dma_direct_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t handle,
|
||||||
|
size_t size, struct dma_attrs *attrs);
|
||||||
|
|
||||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||||
/*
|
/*
|
||||||
|
@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||||
|
|
||||||
extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
|
|
||||||
void *, dma_addr_t, size_t);
|
|
||||||
#define ARCH_HAS_DMA_MMAP_COHERENT
|
#define ARCH_HAS_DMA_MMAP_COHERENT
|
||||||
|
|
||||||
|
|
||||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
|
|
@ -109,6 +109,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
|
||||||
struct dma_map_ops dma_iommu_ops = {
|
struct dma_map_ops dma_iommu_ops = {
|
||||||
.alloc = dma_iommu_alloc_coherent,
|
.alloc = dma_iommu_alloc_coherent,
|
||||||
.free = dma_iommu_free_coherent,
|
.free = dma_iommu_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = dma_iommu_map_sg,
|
.map_sg = dma_iommu_map_sg,
|
||||||
.unmap_sg = dma_iommu_unmap_sg,
|
.unmap_sg = dma_iommu_unmap_sg,
|
||||||
.dma_supported = dma_iommu_dma_supported,
|
.dma_supported = dma_iommu_dma_supported,
|
||||||
|
|
|
@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
|
||||||
struct dma_map_ops swiotlb_dma_ops = {
|
struct dma_map_ops swiotlb_dma_ops = {
|
||||||
.alloc = dma_direct_alloc_coherent,
|
.alloc = dma_direct_alloc_coherent,
|
||||||
.free = dma_direct_free_coherent,
|
.free = dma_direct_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = swiotlb_map_sg_attrs,
|
.map_sg = swiotlb_map_sg_attrs,
|
||||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||||
.dma_supported = swiotlb_dma_supported,
|
.dma_supported = swiotlb_dma_supported,
|
||||||
|
|
|
@ -65,6 +65,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||||
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||||
|
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
|
||||||
|
#else
|
||||||
|
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||||
|
#endif
|
||||||
|
return remap_pfn_range(vma, vma->vm_start,
|
||||||
|
pfn + vma->vm_pgoff,
|
||||||
|
vma->vm_end - vma->vm_start,
|
||||||
|
vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
|
||||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int nents, enum dma_data_direction direction,
|
int nents, enum dma_data_direction direction,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
|
@ -154,6 +172,7 @@ static inline void dma_direct_sync_single(struct device *dev,
|
||||||
struct dma_map_ops dma_direct_ops = {
|
struct dma_map_ops dma_direct_ops = {
|
||||||
.alloc = dma_direct_alloc_coherent,
|
.alloc = dma_direct_alloc_coherent,
|
||||||
.free = dma_direct_free_coherent,
|
.free = dma_direct_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = dma_direct_map_sg,
|
.map_sg = dma_direct_map_sg,
|
||||||
.unmap_sg = dma_direct_unmap_sg,
|
.unmap_sg = dma_direct_unmap_sg,
|
||||||
.dma_supported = dma_direct_dma_supported,
|
.dma_supported = dma_direct_dma_supported,
|
||||||
|
@ -211,20 +230,3 @@ static int __init dma_init(void)
|
||||||
}
|
}
|
||||||
fs_initcall(dma_init);
|
fs_initcall(dma_init);
|
||||||
|
|
||||||
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t handle, size_t size)
|
|
||||||
{
|
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
||||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
||||||
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
|
|
||||||
#else
|
|
||||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
|
||||||
#endif
|
|
||||||
return remap_pfn_range(vma, vma->vm_start,
|
|
||||||
pfn + vma->vm_pgoff,
|
|
||||||
vma->vm_end - vma->vm_start,
|
|
||||||
vma->vm_page_prot);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(dma_mmap_coherent);
|
|
||||||
|
|
|
@ -613,6 +613,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
|
||||||
struct dma_map_ops vio_dma_mapping_ops = {
|
struct dma_map_ops vio_dma_mapping_ops = {
|
||||||
.alloc = vio_dma_iommu_alloc_coherent,
|
.alloc = vio_dma_iommu_alloc_coherent,
|
||||||
.free = vio_dma_iommu_free_coherent,
|
.free = vio_dma_iommu_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = vio_dma_iommu_map_sg,
|
.map_sg = vio_dma_iommu_map_sg,
|
||||||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||||
.map_page = vio_dma_iommu_map_page,
|
.map_page = vio_dma_iommu_map_page,
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
|
#include <asm-generic/dma-coherent.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Managed DMA API
|
* Managed DMA API
|
||||||
|
@ -218,3 +219,33 @@ void dmam_release_declared_memory(struct device *dev)
|
||||||
EXPORT_SYMBOL(dmam_release_declared_memory);
|
EXPORT_SYMBOL(dmam_release_declared_memory);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create userspace mapping for the DMA-coherent memory.
|
||||||
|
*/
|
||||||
|
int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
int ret = -ENXIO;
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
|
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||||
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||||
|
unsigned long off = vma->vm_pgoff;
|
||||||
|
|
||||||
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||||
|
|
||||||
|
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (off < count && user_count <= (count - off)) {
|
||||||
|
ret = remap_pfn_range(vma, vma->vm_start,
|
||||||
|
pfn + off,
|
||||||
|
user_count << PAGE_SHIFT,
|
||||||
|
vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_common_mmap);
|
||||||
|
|
|
@ -29,6 +29,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
|
||||||
#else
|
#else
|
||||||
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
|
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
|
||||||
#define dma_release_from_coherent(dev, order, vaddr) (0)
|
#define dma_release_from_coherent(dev, order, vaddr) (0)
|
||||||
|
#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -176,4 +176,41 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
|
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
|
||||||
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
|
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
|
||||||
|
|
||||||
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_mmap_attrs - map a coherent DMA allocation into user space
|
||||||
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||||
|
* @vma: vm_area_struct describing requested user mapping
|
||||||
|
* @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
|
||||||
|
* @handle: device-view address returned from dma_alloc_attrs
|
||||||
|
* @size: size of memory originally requested in dma_alloc_attrs
|
||||||
|
* @attrs: attributes of mapping properties requested in dma_alloc_attrs
|
||||||
|
*
|
||||||
|
* Map a coherent DMA buffer previously allocated by dma_alloc_attrs
|
||||||
|
* into user space. The coherent DMA buffer must not be freed by the
|
||||||
|
* driver until the user space mapping has been released.
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
BUG_ON(!ops);
|
||||||
|
if (ops->mmap)
|
||||||
|
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||||
|
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
|
||||||
|
|
||||||
|
static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
DEFINE_DMA_ATTRS(attrs);
|
||||||
|
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||||
|
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Загрузка…
Ссылка в новой задаче