Merge branch 'for-linus-for-3.6-rc1' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA-mapping updates from Marek Szyprowski: "Those patches are continuation of my earlier work. They contains extensions to DMA-mapping framework to remove limitation of the current ARM implementation (like limited total size of DMA coherent/write combine buffers), improve performance of buffer sharing between devices (attributes to skip cpu cache operations or creation of additional kernel mapping for some specific use cases) as well as some unification of the common code for dma_mmap_attrs() and dma_mmap_coherent() functions. All extensions have been implemented and tested for ARM architecture." * 'for-linus-for-3.6-rc1' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: ARM: dma-mapping: add support for DMA_ATTR_SKIP_CPU_SYNC attribute common: DMA-mapping: add DMA_ATTR_SKIP_CPU_SYNC attribute ARM: dma-mapping: add support for dma_get_sgtable() common: dma-mapping: introduce dma_get_sgtable() function ARM: dma-mapping: add support for DMA_ATTR_NO_KERNEL_MAPPING attribute common: DMA-mapping: add DMA_ATTR_NO_KERNEL_MAPPING attribute common: dma-mapping: add support for generic dma_mmap_* calls ARM: dma-mapping: fix error path for memory allocation failure ARM: dma-mapping: add more sanity checks in arm_dma_mmap() ARM: dma-mapping: remove custom consistent dma region mm: vmalloc: use const void * for caller argument scatterlist: add sg_alloc_table_from_pages function
This commit is contained in:
Коммит
6f51f51582
|
@ -49,3 +49,45 @@ DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
|
||||||
consistent or non-consistent memory as it sees fit. By using this API,
|
consistent or non-consistent memory as it sees fit. By using this API,
|
||||||
you are guaranteeing to the platform that you have all the correct and
|
you are guaranteeing to the platform that you have all the correct and
|
||||||
necessary sync points for this memory in the driver.
|
necessary sync points for this memory in the driver.
|
||||||
|
|
||||||
|
DMA_ATTR_NO_KERNEL_MAPPING
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
DMA_ATTR_NO_KERNEL_MAPPING lets the platform to avoid creating a kernel
|
||||||
|
virtual mapping for the allocated buffer. On some architectures creating
|
||||||
|
such mapping is non-trivial task and consumes very limited resources
|
||||||
|
(like kernel virtual address space or dma consistent address space).
|
||||||
|
Buffers allocated with this attribute can be only passed to user space
|
||||||
|
by calling dma_mmap_attrs(). By using this API, you are guaranteeing
|
||||||
|
that you won't dereference the pointer returned by dma_alloc_attr(). You
|
||||||
|
can threat it as a cookie that must be passed to dma_mmap_attrs() and
|
||||||
|
dma_free_attrs(). Make sure that both of these also get this attribute
|
||||||
|
set on each call.
|
||||||
|
|
||||||
|
Since it is optional for platforms to implement
|
||||||
|
DMA_ATTR_NO_KERNEL_MAPPING, those that do not will simply ignore the
|
||||||
|
attribute and exhibit default behavior.
|
||||||
|
|
||||||
|
DMA_ATTR_SKIP_CPU_SYNC
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
By default dma_map_{single,page,sg} functions family transfer a given
|
||||||
|
buffer from CPU domain to device domain. Some advanced use cases might
|
||||||
|
require sharing a buffer between more than one device. This requires
|
||||||
|
having a mapping created separately for each device and is usually
|
||||||
|
performed by calling dma_map_{single,page,sg} function more than once
|
||||||
|
for the given buffer with device pointer to each device taking part in
|
||||||
|
the buffer sharing. The first call transfers a buffer from 'CPU' domain
|
||||||
|
to 'device' domain, what synchronizes CPU caches for the given region
|
||||||
|
(usually it means that the cache has been flushed or invalidated
|
||||||
|
depending on the dma direction). However, next calls to
|
||||||
|
dma_map_{single,page,sg}() for other devices will perform exactly the
|
||||||
|
same sychronization operation on the CPU cache. CPU cache sychronization
|
||||||
|
might be a time consuming operation, especially if the buffers are
|
||||||
|
large, so it is highly recommended to avoid it if possible.
|
||||||
|
DMA_ATTR_SKIP_CPU_SYNC allows platform code to skip synchronization of
|
||||||
|
the CPU cache for the given buffer assuming that it has been already
|
||||||
|
transferred to 'device' domain. This attribute can be also used for
|
||||||
|
dma_unmap_{single,page,sg} functions family to force buffer to stay in
|
||||||
|
device domain after releasing a mapping for it. Use this attribute with
|
||||||
|
care!
|
||||||
|
|
|
@ -526,7 +526,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||||
|
|
||||||
coherent_pool=nn[KMG] [ARM,KNL]
|
coherent_pool=nn[KMG] [ARM,KNL]
|
||||||
Sets the size of memory pool for coherent, atomic dma
|
Sets the size of memory pool for coherent, atomic dma
|
||||||
allocations if Contiguous Memory Allocator (CMA) is used.
|
allocations, by default set to 256K.
|
||||||
|
|
||||||
code_bytes [X86] How many bytes of object code to print
|
code_bytes [X86] How many bytes of object code to print
|
||||||
in an oops report.
|
in an oops report.
|
||||||
|
|
|
@ -452,6 +452,7 @@ static struct dma_map_ops dmabounce_ops = {
|
||||||
.alloc = arm_dma_alloc,
|
.alloc = arm_dma_alloc,
|
||||||
.free = arm_dma_free,
|
.free = arm_dma_free,
|
||||||
.mmap = arm_dma_mmap,
|
.mmap = arm_dma_mmap,
|
||||||
|
.get_sgtable = arm_dma_get_sgtable,
|
||||||
.map_page = dmabounce_map_page,
|
.map_page = dmabounce_map_page,
|
||||||
.unmap_page = dmabounce_unmap_page,
|
.unmap_page = dmabounce_unmap_page,
|
||||||
.sync_single_for_cpu = dmabounce_sync_for_cpu,
|
.sync_single_for_cpu = dmabounce_sync_for_cpu,
|
||||||
|
|
|
@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
struct dma_attrs *attrs);
|
struct dma_attrs *attrs);
|
||||||
|
|
||||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
|
|
||||||
|
|
||||||
static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t dma_addr,
|
|
||||||
size_t size, struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
BUG_ON(!ops);
|
|
||||||
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
|
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flag)
|
dma_addr_t *dma_handle, gfp_t flag)
|
||||||
{
|
{
|
||||||
|
@ -213,20 +202,12 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
|
||||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
|
||||||
{
|
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
|
||||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
|
||||||
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This can be called during boot to increase the size of the consistent
|
* This can be called during boot to increase the size of the consistent
|
||||||
* DMA region above it's default value of 2MB. It must be called before the
|
* DMA region above it's default value of 2MB. It must be called before the
|
||||||
* memory allocator is initialised, i.e. before any core_initcall.
|
* memory allocator is initialised, i.e. before any core_initcall.
|
||||||
*/
|
*/
|
||||||
extern void __init init_consistent_dma_size(unsigned long size);
|
static inline void init_consistent_dma_size(unsigned long size) { }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
|
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
|
||||||
|
@ -280,6 +261,9 @@ extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
|
||||||
enum dma_data_direction);
|
enum dma_data_direction);
|
||||||
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
||||||
enum dma_data_direction);
|
enum dma_data_direction);
|
||||||
|
extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
|
struct dma_attrs *attrs);
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
|
#include <linux/io.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/sizes.h>
|
#include <linux/sizes.h>
|
||||||
|
|
||||||
|
@ -72,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
||||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||||
}
|
}
|
||||||
|
@ -95,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||||
handle & ~PAGE_MASK, size, dir);
|
handle & ~PAGE_MASK, size, dir);
|
||||||
}
|
}
|
||||||
|
@ -124,6 +125,7 @@ struct dma_map_ops arm_dma_ops = {
|
||||||
.alloc = arm_dma_alloc,
|
.alloc = arm_dma_alloc,
|
||||||
.free = arm_dma_free,
|
.free = arm_dma_free,
|
||||||
.mmap = arm_dma_mmap,
|
.mmap = arm_dma_mmap,
|
||||||
|
.get_sgtable = arm_dma_get_sgtable,
|
||||||
.map_page = arm_dma_map_page,
|
.map_page = arm_dma_map_page,
|
||||||
.unmap_page = arm_dma_unmap_page,
|
.unmap_page = arm_dma_unmap_page,
|
||||||
.map_sg = arm_dma_map_sg,
|
.map_sg = arm_dma_map_sg,
|
||||||
|
@ -217,115 +219,70 @@ static void __dma_free_buffer(struct page *page, size_t size)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
|
||||||
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
|
|
||||||
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
|
|
||||||
*/
|
|
||||||
static pte_t **consistent_pte;
|
|
||||||
|
|
||||||
#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
|
|
||||||
|
|
||||||
static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
|
|
||||||
|
|
||||||
void __init init_consistent_dma_size(unsigned long size)
|
|
||||||
{
|
|
||||||
unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
|
|
||||||
|
|
||||||
BUG_ON(consistent_pte); /* Check we're called before DMA region init */
|
|
||||||
BUG_ON(base < VMALLOC_END);
|
|
||||||
|
|
||||||
/* Grow region to accommodate specified size */
|
|
||||||
if (base < consistent_base)
|
|
||||||
consistent_base = base;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include "vmregion.h"
|
|
||||||
|
|
||||||
static struct arm_vmregion_head consistent_head = {
|
|
||||||
.vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
|
|
||||||
.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
|
|
||||||
.vm_end = CONSISTENT_END,
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
#error ARM Coherent DMA allocator does not (yet) support huge TLB
|
#error ARM Coherent DMA allocator does not (yet) support huge TLB
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialise the consistent memory allocation.
|
|
||||||
*/
|
|
||||||
static int __init consistent_init(void)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
pgd_t *pgd;
|
|
||||||
pud_t *pud;
|
|
||||||
pmd_t *pmd;
|
|
||||||
pte_t *pte;
|
|
||||||
int i = 0;
|
|
||||||
unsigned long base = consistent_base;
|
|
||||||
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
|
|
||||||
if (!consistent_pte) {
|
|
||||||
pr_err("%s: no memory\n", __func__);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
|
|
||||||
consistent_head.vm_start = base;
|
|
||||||
|
|
||||||
do {
|
|
||||||
pgd = pgd_offset(&init_mm, base);
|
|
||||||
|
|
||||||
pud = pud_alloc(&init_mm, pgd, base);
|
|
||||||
if (!pud) {
|
|
||||||
pr_err("%s: no pud tables\n", __func__);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
pmd = pmd_alloc(&init_mm, pud, base);
|
|
||||||
if (!pmd) {
|
|
||||||
pr_err("%s: no pmd tables\n", __func__);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
WARN_ON(!pmd_none(*pmd));
|
|
||||||
|
|
||||||
pte = pte_alloc_kernel(pmd, base);
|
|
||||||
if (!pte) {
|
|
||||||
pr_err("%s: no pte tables\n", __func__);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
consistent_pte[i++] = pte;
|
|
||||||
base += PMD_SIZE;
|
|
||||||
} while (base < CONSISTENT_END);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
core_initcall(consistent_init);
|
|
||||||
|
|
||||||
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
||||||
pgprot_t prot, struct page **ret_page);
|
pgprot_t prot, struct page **ret_page);
|
||||||
|
|
||||||
static struct arm_vmregion_head coherent_head = {
|
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
||||||
.vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
|
pgprot_t prot, struct page **ret_page,
|
||||||
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
|
const void *caller);
|
||||||
|
|
||||||
|
static void *
|
||||||
|
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
|
||||||
|
const void *caller)
|
||||||
|
{
|
||||||
|
struct vm_struct *area;
|
||||||
|
unsigned long addr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DMA allocation can be mapped to user space, so lets
|
||||||
|
* set VM_USERMAP flags too.
|
||||||
|
*/
|
||||||
|
area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
|
||||||
|
caller);
|
||||||
|
if (!area)
|
||||||
|
return NULL;
|
||||||
|
addr = (unsigned long)area->addr;
|
||||||
|
area->phys_addr = __pfn_to_phys(page_to_pfn(page));
|
||||||
|
|
||||||
|
if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
|
||||||
|
vunmap((void *)addr);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return (void *)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __dma_free_remap(void *cpu_addr, size_t size)
|
||||||
|
{
|
||||||
|
unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
|
||||||
|
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||||
|
if (!area || (area->flags & flags) != flags) {
|
||||||
|
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
unmap_kernel_range((unsigned long)cpu_addr, size);
|
||||||
|
vunmap(cpu_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct dma_pool {
|
||||||
|
size_t size;
|
||||||
|
spinlock_t lock;
|
||||||
|
unsigned long *bitmap;
|
||||||
|
unsigned long nr_pages;
|
||||||
|
void *vaddr;
|
||||||
|
struct page *page;
|
||||||
};
|
};
|
||||||
|
|
||||||
static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
|
static struct dma_pool atomic_pool = {
|
||||||
|
.size = SZ_256K,
|
||||||
|
};
|
||||||
|
|
||||||
static int __init early_coherent_pool(char *p)
|
static int __init early_coherent_pool(char *p)
|
||||||
{
|
{
|
||||||
coherent_pool_size = memparse(p, &p);
|
atomic_pool.size = memparse(p, &p);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_param("coherent_pool", early_coherent_pool);
|
early_param("coherent_pool", early_coherent_pool);
|
||||||
|
@ -333,32 +290,45 @@ early_param("coherent_pool", early_coherent_pool);
|
||||||
/*
|
/*
|
||||||
* Initialise the coherent pool for atomic allocations.
|
* Initialise the coherent pool for atomic allocations.
|
||||||
*/
|
*/
|
||||||
static int __init coherent_init(void)
|
static int __init atomic_pool_init(void)
|
||||||
{
|
{
|
||||||
|
struct dma_pool *pool = &atomic_pool;
|
||||||
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
|
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
|
||||||
size_t size = coherent_pool_size;
|
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
|
||||||
|
unsigned long *bitmap;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_CMA))
|
bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||||
return 0;
|
if (!bitmap)
|
||||||
|
goto no_bitmap;
|
||||||
|
|
||||||
ptr = __alloc_from_contiguous(NULL, size, prot, &page);
|
if (IS_ENABLED(CONFIG_CMA))
|
||||||
|
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
|
||||||
|
else
|
||||||
|
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
|
||||||
|
&page, NULL);
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
coherent_head.vm_start = (unsigned long) ptr;
|
spin_lock_init(&pool->lock);
|
||||||
coherent_head.vm_end = (unsigned long) ptr + size;
|
pool->vaddr = ptr;
|
||||||
printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
|
pool->page = page;
|
||||||
(unsigned)size / 1024);
|
pool->bitmap = bitmap;
|
||||||
|
pool->nr_pages = nr_pages;
|
||||||
|
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
|
||||||
|
(unsigned)pool->size / 1024);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
|
kfree(bitmap);
|
||||||
(unsigned)size / 1024);
|
no_bitmap:
|
||||||
|
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
|
||||||
|
(unsigned)pool->size / 1024);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* CMA is activated by core_initcall, so we must be called after it.
|
* CMA is activated by core_initcall, so we must be called after it.
|
||||||
*/
|
*/
|
||||||
postcore_initcall(coherent_init);
|
postcore_initcall(atomic_pool_init);
|
||||||
|
|
||||||
struct dma_contig_early_reserve {
|
struct dma_contig_early_reserve {
|
||||||
phys_addr_t base;
|
phys_addr_t base;
|
||||||
|
@ -406,112 +376,6 @@ void __init dma_contiguous_remap(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
|
||||||
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
|
|
||||||
const void *caller)
|
|
||||||
{
|
|
||||||
struct arm_vmregion *c;
|
|
||||||
size_t align;
|
|
||||||
int bit;
|
|
||||||
|
|
||||||
if (!consistent_pte) {
|
|
||||||
pr_err("%s: not initialised\n", __func__);
|
|
||||||
dump_stack();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Align the virtual region allocation - maximum alignment is
|
|
||||||
* a section size, minimum is a page size. This helps reduce
|
|
||||||
* fragmentation of the DMA space, and also prevents allocations
|
|
||||||
* smaller than a section from crossing a section boundary.
|
|
||||||
*/
|
|
||||||
bit = fls(size - 1);
|
|
||||||
if (bit > SECTION_SHIFT)
|
|
||||||
bit = SECTION_SHIFT;
|
|
||||||
align = 1 << bit;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a virtual address in the consistent mapping region.
|
|
||||||
*/
|
|
||||||
c = arm_vmregion_alloc(&consistent_head, align, size,
|
|
||||||
gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
|
|
||||||
if (c) {
|
|
||||||
pte_t *pte;
|
|
||||||
int idx = CONSISTENT_PTE_INDEX(c->vm_start);
|
|
||||||
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
|
|
||||||
|
|
||||||
pte = consistent_pte[idx] + off;
|
|
||||||
c->priv = page;
|
|
||||||
|
|
||||||
do {
|
|
||||||
BUG_ON(!pte_none(*pte));
|
|
||||||
|
|
||||||
set_pte_ext(pte, mk_pte(page, prot), 0);
|
|
||||||
page++;
|
|
||||||
pte++;
|
|
||||||
off++;
|
|
||||||
if (off >= PTRS_PER_PTE) {
|
|
||||||
off = 0;
|
|
||||||
pte = consistent_pte[++idx];
|
|
||||||
}
|
|
||||||
} while (size -= PAGE_SIZE);
|
|
||||||
|
|
||||||
dsb();
|
|
||||||
|
|
||||||
return (void *)c->vm_start;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __dma_free_remap(void *cpu_addr, size_t size)
|
|
||||||
{
|
|
||||||
struct arm_vmregion *c;
|
|
||||||
unsigned long addr;
|
|
||||||
pte_t *ptep;
|
|
||||||
int idx;
|
|
||||||
u32 off;
|
|
||||||
|
|
||||||
c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
|
|
||||||
if (!c) {
|
|
||||||
pr_err("%s: trying to free invalid coherent area: %p\n",
|
|
||||||
__func__, cpu_addr);
|
|
||||||
dump_stack();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((c->vm_end - c->vm_start) != size) {
|
|
||||||
pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
|
|
||||||
__func__, c->vm_end - c->vm_start, size);
|
|
||||||
dump_stack();
|
|
||||||
size = c->vm_end - c->vm_start;
|
|
||||||
}
|
|
||||||
|
|
||||||
idx = CONSISTENT_PTE_INDEX(c->vm_start);
|
|
||||||
off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
|
|
||||||
ptep = consistent_pte[idx] + off;
|
|
||||||
addr = c->vm_start;
|
|
||||||
do {
|
|
||||||
pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
|
|
||||||
|
|
||||||
ptep++;
|
|
||||||
addr += PAGE_SIZE;
|
|
||||||
off++;
|
|
||||||
if (off >= PTRS_PER_PTE) {
|
|
||||||
off = 0;
|
|
||||||
ptep = consistent_pte[++idx];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pte_none(pte) || !pte_present(pte))
|
|
||||||
pr_crit("%s: bad page in kernel page table\n",
|
|
||||||
__func__);
|
|
||||||
} while (size -= PAGE_SIZE);
|
|
||||||
|
|
||||||
flush_tlb_kernel_range(c->vm_start, c->vm_end);
|
|
||||||
|
|
||||||
arm_vmregion_free(&consistent_head, c);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
|
static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
|
@ -552,16 +416,17 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *__alloc_from_pool(struct device *dev, size_t size,
|
static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
||||||
struct page **ret_page, const void *caller)
|
|
||||||
{
|
{
|
||||||
struct arm_vmregion *c;
|
struct dma_pool *pool = &atomic_pool;
|
||||||
|
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
unsigned int pageno;
|
||||||
|
unsigned long flags;
|
||||||
|
void *ptr = NULL;
|
||||||
size_t align;
|
size_t align;
|
||||||
|
|
||||||
if (!coherent_head.vm_start) {
|
if (!pool->vaddr) {
|
||||||
printk(KERN_ERR "%s: coherent pool not initialised!\n",
|
WARN(1, "coherent pool not initialised!\n");
|
||||||
__func__);
|
|
||||||
dump_stack();
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -571,35 +436,41 @@ static void *__alloc_from_pool(struct device *dev, size_t size,
|
||||||
* size. This helps reduce fragmentation of the DMA space.
|
* size. This helps reduce fragmentation of the DMA space.
|
||||||
*/
|
*/
|
||||||
align = PAGE_SIZE << get_order(size);
|
align = PAGE_SIZE << get_order(size);
|
||||||
c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
|
|
||||||
if (c) {
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
void *ptr = (void *)c->vm_start;
|
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
|
||||||
struct page *page = virt_to_page(ptr);
|
0, count, (1 << align) - 1);
|
||||||
*ret_page = page;
|
if (pageno < pool->nr_pages) {
|
||||||
return ptr;
|
bitmap_set(pool->bitmap, pageno, count);
|
||||||
|
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
||||||
|
*ret_page = pool->page + pageno;
|
||||||
}
|
}
|
||||||
return NULL;
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
|
||||||
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __free_from_pool(void *cpu_addr, size_t size)
|
static int __free_from_pool(void *start, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long start = (unsigned long)cpu_addr;
|
struct dma_pool *pool = &atomic_pool;
|
||||||
unsigned long end = start + size;
|
unsigned long pageno, count;
|
||||||
struct arm_vmregion *c;
|
unsigned long flags;
|
||||||
|
|
||||||
if (start < coherent_head.vm_start || end > coherent_head.vm_end)
|
if (start < pool->vaddr || start > pool->vaddr + pool->size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
|
if (start + size > pool->vaddr + pool->size) {
|
||||||
|
WARN(1, "freeing wrong coherent size from pool\n");
|
||||||
if ((c->vm_end - c->vm_start) != size) {
|
return 0;
|
||||||
printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
|
|
||||||
__func__, c->vm_end - c->vm_start, size);
|
|
||||||
dump_stack();
|
|
||||||
size = c->vm_end - c->vm_start;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
arm_vmregion_free(&coherent_head, c);
|
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
|
||||||
|
count = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
|
bitmap_clear(pool->bitmap, pageno, count);
|
||||||
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -644,7 +515,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
|
||||||
|
|
||||||
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
|
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
|
||||||
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
|
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
|
||||||
#define __alloc_from_pool(dev, size, ret_page, c) NULL
|
#define __alloc_from_pool(size, ret_page) NULL
|
||||||
#define __alloc_from_contiguous(dev, size, prot, ret) NULL
|
#define __alloc_from_contiguous(dev, size, prot, ret) NULL
|
||||||
#define __free_from_pool(cpu_addr, size) 0
|
#define __free_from_pool(cpu_addr, size) 0
|
||||||
#define __free_from_contiguous(dev, page, size) do { } while (0)
|
#define __free_from_contiguous(dev, page, size) do { } while (0)
|
||||||
|
@ -702,10 +573,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
|
|
||||||
if (arch_is_coherent() || nommu())
|
if (arch_is_coherent() || nommu())
|
||||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||||
|
else if (gfp & GFP_ATOMIC)
|
||||||
|
addr = __alloc_from_pool(size, &page);
|
||||||
else if (!IS_ENABLED(CONFIG_CMA))
|
else if (!IS_ENABLED(CONFIG_CMA))
|
||||||
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
||||||
else if (gfp & GFP_ATOMIC)
|
|
||||||
addr = __alloc_from_pool(dev, size, &page, caller);
|
|
||||||
else
|
else
|
||||||
addr = __alloc_from_contiguous(dev, size, prot, &page);
|
addr = __alloc_from_contiguous(dev, size, prot, &page);
|
||||||
|
|
||||||
|
@ -741,16 +612,22 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
{
|
{
|
||||||
int ret = -ENXIO;
|
int ret = -ENXIO;
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||||
|
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
unsigned long pfn = dma_to_pfn(dev, dma_addr);
|
unsigned long pfn = dma_to_pfn(dev, dma_addr);
|
||||||
|
unsigned long off = vma->vm_pgoff;
|
||||||
|
|
||||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||||
|
|
||||||
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = remap_pfn_range(vma, vma->vm_start,
|
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
||||||
pfn + vma->vm_pgoff,
|
ret = remap_pfn_range(vma, vma->vm_start,
|
||||||
vma->vm_end - vma->vm_start,
|
pfn + off,
|
||||||
vma->vm_page_prot);
|
vma->vm_end - vma->vm_start,
|
||||||
|
vma->vm_page_prot);
|
||||||
|
}
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -785,6 +662,21 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
void (*op)(const void *, size_t, int))
|
void (*op)(const void *, size_t, int))
|
||||||
|
@ -998,9 +890,6 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
|
||||||
|
|
||||||
static int __init dma_debug_do_init(void)
|
static int __init dma_debug_do_init(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_MMU
|
|
||||||
arm_vmregion_create_proc("dma-mappings", &consistent_head);
|
|
||||||
#endif
|
|
||||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1088,7 +977,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
|
||||||
|
|
||||||
return pages;
|
return pages;
|
||||||
error:
|
error:
|
||||||
while (--i)
|
while (i--)
|
||||||
if (pages[i])
|
if (pages[i])
|
||||||
__free_pages(pages[i], 0);
|
__free_pages(pages[i], 0);
|
||||||
if (array_size <= PAGE_SIZE)
|
if (array_size <= PAGE_SIZE)
|
||||||
|
@ -1117,61 +1006,32 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
|
||||||
* Create a CPU mapping for a specified pages
|
* Create a CPU mapping for a specified pages
|
||||||
*/
|
*/
|
||||||
static void *
|
static void *
|
||||||
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
|
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
|
||||||
|
const void *caller)
|
||||||
{
|
{
|
||||||
struct arm_vmregion *c;
|
unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
size_t align;
|
struct vm_struct *area;
|
||||||
size_t count = size >> PAGE_SHIFT;
|
unsigned long p;
|
||||||
int bit;
|
|
||||||
|
|
||||||
if (!consistent_pte[0]) {
|
area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
|
||||||
pr_err("%s: not initialised\n", __func__);
|
caller);
|
||||||
dump_stack();
|
if (!area)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
area->pages = pages;
|
||||||
|
area->nr_pages = nr_pages;
|
||||||
|
p = (unsigned long)area->addr;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
|
||||||
|
if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
|
||||||
|
goto err;
|
||||||
|
p += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
return area->addr;
|
||||||
/*
|
err:
|
||||||
* Align the virtual region allocation - maximum alignment is
|
unmap_kernel_range((unsigned long)area->addr, size);
|
||||||
* a section size, minimum is a page size. This helps reduce
|
vunmap(area->addr);
|
||||||
* fragmentation of the DMA space, and also prevents allocations
|
|
||||||
* smaller than a section from crossing a section boundary.
|
|
||||||
*/
|
|
||||||
bit = fls(size - 1);
|
|
||||||
if (bit > SECTION_SHIFT)
|
|
||||||
bit = SECTION_SHIFT;
|
|
||||||
align = 1 << bit;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a virtual address in the consistent mapping region.
|
|
||||||
*/
|
|
||||||
c = arm_vmregion_alloc(&consistent_head, align, size,
|
|
||||||
gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
|
|
||||||
if (c) {
|
|
||||||
pte_t *pte;
|
|
||||||
int idx = CONSISTENT_PTE_INDEX(c->vm_start);
|
|
||||||
int i = 0;
|
|
||||||
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
|
|
||||||
|
|
||||||
pte = consistent_pte[idx] + off;
|
|
||||||
c->priv = pages;
|
|
||||||
|
|
||||||
do {
|
|
||||||
BUG_ON(!pte_none(*pte));
|
|
||||||
|
|
||||||
set_pte_ext(pte, mk_pte(pages[i], prot), 0);
|
|
||||||
pte++;
|
|
||||||
off++;
|
|
||||||
i++;
|
|
||||||
if (off >= PTRS_PER_PTE) {
|
|
||||||
off = 0;
|
|
||||||
pte = consistent_pte[++idx];
|
|
||||||
}
|
|
||||||
} while (i < count);
|
|
||||||
|
|
||||||
dsb();
|
|
||||||
|
|
||||||
return (void *)c->vm_start;
|
|
||||||
}
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1230,6 +1090,19 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
struct vm_struct *area;
|
||||||
|
|
||||||
|
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
||||||
|
return cpu_addr;
|
||||||
|
|
||||||
|
area = find_vm_area(cpu_addr);
|
||||||
|
if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
|
||||||
|
return area->pages;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
|
@ -1248,7 +1121,11 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
if (*handle == DMA_ERROR_CODE)
|
if (*handle == DMA_ERROR_CODE)
|
||||||
goto err_buffer;
|
goto err_buffer;
|
||||||
|
|
||||||
addr = __iommu_alloc_remap(pages, size, gfp, prot);
|
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
||||||
|
return pages;
|
||||||
|
|
||||||
|
addr = __iommu_alloc_remap(pages, size, gfp, prot,
|
||||||
|
__builtin_return_address(0));
|
||||||
if (!addr)
|
if (!addr)
|
||||||
goto err_mapping;
|
goto err_mapping;
|
||||||
|
|
||||||
|
@ -1265,31 +1142,25 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct arm_vmregion *c;
|
unsigned long uaddr = vma->vm_start;
|
||||||
|
unsigned long usize = vma->vm_end - vma->vm_start;
|
||||||
|
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
||||||
|
|
||||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||||
c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
|
|
||||||
|
|
||||||
if (c) {
|
if (!pages)
|
||||||
struct page **pages = c->priv;
|
return -ENXIO;
|
||||||
|
|
||||||
unsigned long uaddr = vma->vm_start;
|
do {
|
||||||
unsigned long usize = vma->vm_end - vma->vm_start;
|
int ret = vm_insert_page(vma, uaddr, *pages++);
|
||||||
int i = 0;
|
if (ret) {
|
||||||
|
pr_err("Remapping memory failed: %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
uaddr += PAGE_SIZE;
|
||||||
|
usize -= PAGE_SIZE;
|
||||||
|
} while (usize > 0);
|
||||||
|
|
||||||
do {
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = vm_insert_page(vma, uaddr, pages[i++]);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("Remapping memory, error: %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
uaddr += PAGE_SIZE;
|
|
||||||
usize -= PAGE_SIZE;
|
|
||||||
} while (usize > 0);
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1300,16 +1171,35 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||||
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||||
dma_addr_t handle, struct dma_attrs *attrs)
|
dma_addr_t handle, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct arm_vmregion *c;
|
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
|
if (!pages) {
|
||||||
if (c) {
|
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
||||||
struct page **pages = c->priv;
|
return;
|
||||||
__dma_free_remap(cpu_addr, size);
|
|
||||||
__iommu_remove_mapping(dev, handle, size);
|
|
||||||
__iommu_free_buffer(dev, pages, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
||||||
|
unmap_kernel_range((unsigned long)cpu_addr, size);
|
||||||
|
vunmap(cpu_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__iommu_remove_mapping(dev, handle, size);
|
||||||
|
__iommu_free_buffer(dev, pages, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
||||||
|
|
||||||
|
if (!pages)
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
|
||||||
|
GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1317,7 +1207,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||||
*/
|
*/
|
||||||
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||||
size_t size, dma_addr_t *handle,
|
size_t size, dma_addr_t *handle,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
||||||
dma_addr_t iova, iova_base;
|
dma_addr_t iova, iova_base;
|
||||||
|
@ -1336,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||||
phys_addr_t phys = page_to_phys(sg_page(s));
|
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||||
|
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent() &&
|
||||||
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||||
|
|
||||||
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
||||||
|
@ -1383,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
|
|
||||||
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
||||||
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
|
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
|
||||||
dir) < 0)
|
dir, attrs) < 0)
|
||||||
goto bad_mapping;
|
goto bad_mapping;
|
||||||
|
|
||||||
dma->dma_address += offset;
|
dma->dma_address += offset;
|
||||||
|
@ -1396,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
}
|
}
|
||||||
size += s->length;
|
size += s->length;
|
||||||
}
|
}
|
||||||
if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
|
if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
|
||||||
goto bad_mapping;
|
goto bad_mapping;
|
||||||
|
|
||||||
dma->dma_address += offset;
|
dma->dma_address += offset;
|
||||||
|
@ -1430,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
if (sg_dma_len(s))
|
if (sg_dma_len(s))
|
||||||
__iommu_remove_mapping(dev, sg_dma_address(s),
|
__iommu_remove_mapping(dev, sg_dma_address(s),
|
||||||
sg_dma_len(s));
|
sg_dma_len(s));
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent() &&
|
||||||
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||||
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
||||||
s->length, dir);
|
s->length, dir);
|
||||||
}
|
}
|
||||||
|
@ -1492,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
int ret, len = PAGE_ALIGN(size + offset);
|
int ret, len = PAGE_ALIGN(size + offset);
|
||||||
|
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||||
|
|
||||||
dma_addr = __alloc_iova(mapping, len);
|
dma_addr = __alloc_iova(mapping, len);
|
||||||
|
@ -1531,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
||||||
if (!iova)
|
if (!iova)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||||
|
|
||||||
iommu_unmap(mapping->domain, iova, len);
|
iommu_unmap(mapping->domain, iova, len);
|
||||||
|
@ -1571,6 +1463,7 @@ struct dma_map_ops iommu_ops = {
|
||||||
.alloc = arm_iommu_alloc_attrs,
|
.alloc = arm_iommu_alloc_attrs,
|
||||||
.free = arm_iommu_free_attrs,
|
.free = arm_iommu_free_attrs,
|
||||||
.mmap = arm_iommu_mmap_attrs,
|
.mmap = arm_iommu_mmap_attrs,
|
||||||
|
.get_sgtable = arm_iommu_get_sgtable,
|
||||||
|
|
||||||
.map_page = arm_iommu_map_page,
|
.map_page = arm_iommu_map_page,
|
||||||
.unmap_page = arm_iommu_unmap_page,
|
.unmap_page = arm_iommu_unmap_page,
|
||||||
|
|
|
@ -59,6 +59,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
||||||
#define VM_ARM_MTYPE(mt) ((mt) << 20)
|
#define VM_ARM_MTYPE(mt) ((mt) << 20)
|
||||||
#define VM_ARM_MTYPE_MASK (0x1f << 20)
|
#define VM_ARM_MTYPE_MASK (0x1f << 20)
|
||||||
|
|
||||||
|
/* consistent regions used by dma_alloc_attrs() */
|
||||||
|
#define VM_ARM_DMA_CONSISTENT 0x20000000
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
|
|
|
@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||||
extern void dma_direct_free_coherent(struct device *dev, size_t size,
|
extern void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
void *vaddr, dma_addr_t dma_handle,
|
||||||
struct dma_attrs *attrs);
|
struct dma_attrs *attrs);
|
||||||
|
extern int dma_direct_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t handle,
|
||||||
|
size_t size, struct dma_attrs *attrs);
|
||||||
|
|
||||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||||
/*
|
/*
|
||||||
|
@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||||
|
|
||||||
extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
|
|
||||||
void *, dma_addr_t, size_t);
|
|
||||||
#define ARCH_HAS_DMA_MMAP_COHERENT
|
#define ARCH_HAS_DMA_MMAP_COHERENT
|
||||||
|
|
||||||
|
|
||||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
|
|
@ -109,6 +109,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
|
||||||
struct dma_map_ops dma_iommu_ops = {
|
struct dma_map_ops dma_iommu_ops = {
|
||||||
.alloc = dma_iommu_alloc_coherent,
|
.alloc = dma_iommu_alloc_coherent,
|
||||||
.free = dma_iommu_free_coherent,
|
.free = dma_iommu_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = dma_iommu_map_sg,
|
.map_sg = dma_iommu_map_sg,
|
||||||
.unmap_sg = dma_iommu_unmap_sg,
|
.unmap_sg = dma_iommu_unmap_sg,
|
||||||
.dma_supported = dma_iommu_dma_supported,
|
.dma_supported = dma_iommu_dma_supported,
|
||||||
|
|
|
@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
|
||||||
struct dma_map_ops swiotlb_dma_ops = {
|
struct dma_map_ops swiotlb_dma_ops = {
|
||||||
.alloc = dma_direct_alloc_coherent,
|
.alloc = dma_direct_alloc_coherent,
|
||||||
.free = dma_direct_free_coherent,
|
.free = dma_direct_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = swiotlb_map_sg_attrs,
|
.map_sg = swiotlb_map_sg_attrs,
|
||||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||||
.dma_supported = swiotlb_dma_supported,
|
.dma_supported = swiotlb_dma_supported,
|
||||||
|
|
|
@ -67,6 +67,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||||
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||||
|
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
|
||||||
|
#else
|
||||||
|
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||||
|
#endif
|
||||||
|
return remap_pfn_range(vma, vma->vm_start,
|
||||||
|
pfn + vma->vm_pgoff,
|
||||||
|
vma->vm_end - vma->vm_start,
|
||||||
|
vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
|
||||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int nents, enum dma_data_direction direction,
|
int nents, enum dma_data_direction direction,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
|
@ -156,6 +174,7 @@ static inline void dma_direct_sync_single(struct device *dev,
|
||||||
struct dma_map_ops dma_direct_ops = {
|
struct dma_map_ops dma_direct_ops = {
|
||||||
.alloc = dma_direct_alloc_coherent,
|
.alloc = dma_direct_alloc_coherent,
|
||||||
.free = dma_direct_free_coherent,
|
.free = dma_direct_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = dma_direct_map_sg,
|
.map_sg = dma_direct_map_sg,
|
||||||
.unmap_sg = dma_direct_unmap_sg,
|
.unmap_sg = dma_direct_unmap_sg,
|
||||||
.dma_supported = dma_direct_dma_supported,
|
.dma_supported = dma_direct_dma_supported,
|
||||||
|
@ -219,20 +238,3 @@ static int __init dma_init(void)
|
||||||
}
|
}
|
||||||
fs_initcall(dma_init);
|
fs_initcall(dma_init);
|
||||||
|
|
||||||
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t handle, size_t size)
|
|
||||||
{
|
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
||||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
||||||
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
|
|
||||||
#else
|
|
||||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
|
||||||
#endif
|
|
||||||
return remap_pfn_range(vma, vma->vm_start,
|
|
||||||
pfn + vma->vm_pgoff,
|
|
||||||
vma->vm_end - vma->vm_start,
|
|
||||||
vma->vm_page_prot);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(dma_mmap_coherent);
|
|
||||||
|
|
|
@ -611,6 +611,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
|
||||||
struct dma_map_ops vio_dma_mapping_ops = {
|
struct dma_map_ops vio_dma_mapping_ops = {
|
||||||
.alloc = vio_dma_iommu_alloc_coherent,
|
.alloc = vio_dma_iommu_alloc_coherent,
|
||||||
.free = vio_dma_iommu_free_coherent,
|
.free = vio_dma_iommu_free_coherent,
|
||||||
|
.mmap = dma_direct_mmap_coherent,
|
||||||
.map_sg = vio_dma_iommu_map_sg,
|
.map_sg = vio_dma_iommu_map_sg,
|
||||||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||||
.map_page = vio_dma_iommu_map_page,
|
.map_page = vio_dma_iommu_map_page,
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
|
#include <asm-generic/dma-coherent.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Managed DMA API
|
* Managed DMA API
|
||||||
|
@ -217,4 +218,52 @@ void dmam_release_declared_memory(struct device *dev)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dmam_release_declared_memory);
|
EXPORT_SYMBOL(dmam_release_declared_memory);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create scatter-list for the already allocated DMA buffer.
|
||||||
|
*/
|
||||||
|
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t handle, size_t size)
|
||||||
|
{
|
||||||
|
struct page *page = virt_to_page(cpu_addr);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_common_get_sgtable);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create userspace mapping for the DMA-coherent memory.
|
||||||
|
*/
|
||||||
|
int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
int ret = -ENXIO;
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
|
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||||
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||||
|
unsigned long off = vma->vm_pgoff;
|
||||||
|
|
||||||
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||||
|
|
||||||
|
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (off < count && user_count <= (count - off)) {
|
||||||
|
ret = remap_pfn_range(vma, vma->vm_start,
|
||||||
|
pfn + off,
|
||||||
|
user_count << PAGE_SHIFT,
|
||||||
|
vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_common_mmap);
|
||||||
|
|
|
@ -29,6 +29,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
|
||||||
#else
|
#else
|
||||||
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
|
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
|
||||||
#define dma_release_from_coherent(dev, order, vaddr) (0)
|
#define dma_release_from_coherent(dev, order, vaddr) (0)
|
||||||
|
#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -176,4 +176,59 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
|
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
|
||||||
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
|
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
|
||||||
|
|
||||||
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_mmap_attrs - map a coherent DMA allocation into user space
|
||||||
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||||
|
* @vma: vm_area_struct describing requested user mapping
|
||||||
|
* @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
|
||||||
|
* @handle: device-view address returned from dma_alloc_attrs
|
||||||
|
* @size: size of memory originally requested in dma_alloc_attrs
|
||||||
|
* @attrs: attributes of mapping properties requested in dma_alloc_attrs
|
||||||
|
*
|
||||||
|
* Map a coherent DMA buffer previously allocated by dma_alloc_attrs
|
||||||
|
* into user space. The coherent DMA buffer must not be freed by the
|
||||||
|
* driver until the user space mapping has been released.
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
BUG_ON(!ops);
|
||||||
|
if (ops->mmap)
|
||||||
|
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||||
|
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
|
||||||
|
|
||||||
|
static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
DEFINE_DMA_ATTRS(attrs);
|
||||||
|
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||||
|
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
BUG_ON(!ops);
|
||||||
|
if (ops->get_sgtable)
|
||||||
|
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
|
||||||
|
attrs);
|
||||||
|
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -15,6 +15,8 @@ enum dma_attr {
|
||||||
DMA_ATTR_WEAK_ORDERING,
|
DMA_ATTR_WEAK_ORDERING,
|
||||||
DMA_ATTR_WRITE_COMBINE,
|
DMA_ATTR_WRITE_COMBINE,
|
||||||
DMA_ATTR_NON_CONSISTENT,
|
DMA_ATTR_NON_CONSISTENT,
|
||||||
|
DMA_ATTR_NO_KERNEL_MAPPING,
|
||||||
|
DMA_ATTR_SKIP_CPU_SYNC,
|
||||||
DMA_ATTR_MAX,
|
DMA_ATTR_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,9 @@ struct dma_map_ops {
|
||||||
int (*mmap)(struct device *, struct vm_area_struct *,
|
int (*mmap)(struct device *, struct vm_area_struct *,
|
||||||
void *, dma_addr_t, size_t, struct dma_attrs *attrs);
|
void *, dma_addr_t, size_t, struct dma_attrs *attrs);
|
||||||
|
|
||||||
|
int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
|
||||||
|
dma_addr_t, size_t, struct dma_attrs *attrs);
|
||||||
|
|
||||||
dma_addr_t (*map_page)(struct device *dev, struct page *page,
|
dma_addr_t (*map_page)(struct device *dev, struct page *page,
|
||||||
unsigned long offset, size_t size,
|
unsigned long offset, size_t size,
|
||||||
enum dma_data_direction dir,
|
enum dma_data_direction dir,
|
||||||
|
|
|
@ -214,6 +214,10 @@ void sg_free_table(struct sg_table *);
|
||||||
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
|
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
|
||||||
sg_alloc_fn *);
|
sg_alloc_fn *);
|
||||||
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
|
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
|
||||||
|
int sg_alloc_table_from_pages(struct sg_table *sgt,
|
||||||
|
struct page **pages, unsigned int n_pages,
|
||||||
|
unsigned long offset, unsigned long size,
|
||||||
|
gfp_t gfp_mask);
|
||||||
|
|
||||||
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
|
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||||
void *buf, size_t buflen);
|
void *buf, size_t buflen);
|
||||||
|
|
|
@ -32,7 +32,7 @@ struct vm_struct {
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
unsigned int nr_pages;
|
unsigned int nr_pages;
|
||||||
phys_addr_t phys_addr;
|
phys_addr_t phys_addr;
|
||||||
void *caller;
|
const void *caller;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -62,7 +62,7 @@ extern void *vmalloc_32_user(unsigned long size);
|
||||||
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
||||||
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||||
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
||||||
pgprot_t prot, int node, void *caller);
|
pgprot_t prot, int node, const void *caller);
|
||||||
extern void vfree(const void *addr);
|
extern void vfree(const void *addr);
|
||||||
|
|
||||||
extern void *vmap(struct page **pages, unsigned int count,
|
extern void *vmap(struct page **pages, unsigned int count,
|
||||||
|
@ -85,14 +85,15 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
|
||||||
|
|
||||||
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
|
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
|
||||||
extern struct vm_struct *get_vm_area_caller(unsigned long size,
|
extern struct vm_struct *get_vm_area_caller(unsigned long size,
|
||||||
unsigned long flags, void *caller);
|
unsigned long flags, const void *caller);
|
||||||
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
|
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
unsigned long start, unsigned long end,
|
unsigned long start, unsigned long end,
|
||||||
void *caller);
|
const void *caller);
|
||||||
extern struct vm_struct *remove_vm_area(const void *addr);
|
extern struct vm_struct *remove_vm_area(const void *addr);
|
||||||
|
extern struct vm_struct *find_vm_area(const void *addr);
|
||||||
|
|
||||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||||
struct page ***pages);
|
struct page ***pages);
|
||||||
|
|
|
@ -318,6 +318,70 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sg_alloc_table);
|
EXPORT_SYMBOL(sg_alloc_table);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* sg_alloc_table_from_pages - Allocate and initialize an sg table from
|
||||||
|
* an array of pages
|
||||||
|
* @sgt: The sg table header to use
|
||||||
|
* @pages: Pointer to an array of page pointers
|
||||||
|
* @n_pages: Number of pages in the pages array
|
||||||
|
* @offset: Offset from start of the first page to the start of a buffer
|
||||||
|
* @size: Number of valid bytes in the buffer (after offset)
|
||||||
|
* @gfp_mask: GFP allocation mask
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Allocate and initialize an sg table from a list of pages. Contiguous
|
||||||
|
* ranges of the pages are squashed into a single scatterlist node. A user
|
||||||
|
* may provide an offset at a start and a size of valid data in a buffer
|
||||||
|
* specified by the page array. The returned sg table is released by
|
||||||
|
* sg_free_table.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0 on success, negative error on failure
|
||||||
|
*/
|
||||||
|
int sg_alloc_table_from_pages(struct sg_table *sgt,
|
||||||
|
struct page **pages, unsigned int n_pages,
|
||||||
|
unsigned long offset, unsigned long size,
|
||||||
|
gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
unsigned int chunks;
|
||||||
|
unsigned int i;
|
||||||
|
unsigned int cur_page;
|
||||||
|
int ret;
|
||||||
|
struct scatterlist *s;
|
||||||
|
|
||||||
|
/* compute number of contiguous chunks */
|
||||||
|
chunks = 1;
|
||||||
|
for (i = 1; i < n_pages; ++i)
|
||||||
|
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
|
||||||
|
++chunks;
|
||||||
|
|
||||||
|
ret = sg_alloc_table(sgt, chunks, gfp_mask);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* merging chunks and putting them into the scatterlist */
|
||||||
|
cur_page = 0;
|
||||||
|
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
|
||||||
|
unsigned long chunk_size;
|
||||||
|
unsigned int j;
|
||||||
|
|
||||||
|
/* look for the end of the current chunk */
|
||||||
|
for (j = cur_page + 1; j < n_pages; ++j)
|
||||||
|
if (page_to_pfn(pages[j]) !=
|
||||||
|
page_to_pfn(pages[j - 1]) + 1)
|
||||||
|
break;
|
||||||
|
|
||||||
|
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
|
||||||
|
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
|
||||||
|
size -= chunk_size;
|
||||||
|
offset = 0;
|
||||||
|
cur_page = j;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(sg_alloc_table_from_pages);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sg_miter_start - start mapping iteration over a sg list
|
* sg_miter_start - start mapping iteration over a sg list
|
||||||
* @miter: sg mapping iter to be started
|
* @miter: sg mapping iter to be started
|
||||||
|
|
28
mm/vmalloc.c
28
mm/vmalloc.c
|
@ -1280,7 +1280,7 @@ DEFINE_RWLOCK(vmlist_lock);
|
||||||
struct vm_struct *vmlist;
|
struct vm_struct *vmlist;
|
||||||
|
|
||||||
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
||||||
unsigned long flags, void *caller)
|
unsigned long flags, const void *caller)
|
||||||
{
|
{
|
||||||
vm->flags = flags;
|
vm->flags = flags;
|
||||||
vm->addr = (void *)va->va_start;
|
vm->addr = (void *)va->va_start;
|
||||||
|
@ -1306,7 +1306,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
||||||
unsigned long flags, void *caller)
|
unsigned long flags, const void *caller)
|
||||||
{
|
{
|
||||||
setup_vmalloc_vm(vm, va, flags, caller);
|
setup_vmalloc_vm(vm, va, flags, caller);
|
||||||
insert_vmalloc_vmlist(vm);
|
insert_vmalloc_vmlist(vm);
|
||||||
|
@ -1314,7 +1314,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
||||||
|
|
||||||
static struct vm_struct *__get_vm_area_node(unsigned long size,
|
static struct vm_struct *__get_vm_area_node(unsigned long size,
|
||||||
unsigned long align, unsigned long flags, unsigned long start,
|
unsigned long align, unsigned long flags, unsigned long start,
|
||||||
unsigned long end, int node, gfp_t gfp_mask, void *caller)
|
unsigned long end, int node, gfp_t gfp_mask, const void *caller)
|
||||||
{
|
{
|
||||||
struct vmap_area *va;
|
struct vmap_area *va;
|
||||||
struct vm_struct *area;
|
struct vm_struct *area;
|
||||||
|
@ -1375,7 +1375,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
|
||||||
|
|
||||||
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
|
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
|
||||||
unsigned long start, unsigned long end,
|
unsigned long start, unsigned long end,
|
||||||
void *caller)
|
const void *caller)
|
||||||
{
|
{
|
||||||
return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
|
return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
|
||||||
caller);
|
caller);
|
||||||
|
@ -1397,13 +1397,21 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
|
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
|
||||||
void *caller)
|
const void *caller)
|
||||||
{
|
{
|
||||||
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
||||||
-1, GFP_KERNEL, caller);
|
-1, GFP_KERNEL, caller);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_struct *find_vm_area(const void *addr)
|
/**
|
||||||
|
* find_vm_area - find a continuous kernel virtual area
|
||||||
|
* @addr: base address
|
||||||
|
*
|
||||||
|
* Search for the kernel VM area starting at @addr, and return it.
|
||||||
|
* It is up to the caller to do all required locking to keep the returned
|
||||||
|
* pointer valid.
|
||||||
|
*/
|
||||||
|
struct vm_struct *find_vm_area(const void *addr)
|
||||||
{
|
{
|
||||||
struct vmap_area *va;
|
struct vmap_area *va;
|
||||||
|
|
||||||
|
@ -1568,9 +1576,9 @@ EXPORT_SYMBOL(vmap);
|
||||||
|
|
||||||
static void *__vmalloc_node(unsigned long size, unsigned long align,
|
static void *__vmalloc_node(unsigned long size, unsigned long align,
|
||||||
gfp_t gfp_mask, pgprot_t prot,
|
gfp_t gfp_mask, pgprot_t prot,
|
||||||
int node, void *caller);
|
int node, const void *caller);
|
||||||
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
||||||
pgprot_t prot, int node, void *caller)
|
pgprot_t prot, int node, const void *caller)
|
||||||
{
|
{
|
||||||
const int order = 0;
|
const int order = 0;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
@ -1643,7 +1651,7 @@ fail:
|
||||||
*/
|
*/
|
||||||
void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||||
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
||||||
pgprot_t prot, int node, void *caller)
|
pgprot_t prot, int node, const void *caller)
|
||||||
{
|
{
|
||||||
struct vm_struct *area;
|
struct vm_struct *area;
|
||||||
void *addr;
|
void *addr;
|
||||||
|
@ -1699,7 +1707,7 @@ fail:
|
||||||
*/
|
*/
|
||||||
static void *__vmalloc_node(unsigned long size, unsigned long align,
|
static void *__vmalloc_node(unsigned long size, unsigned long align,
|
||||||
gfp_t gfp_mask, pgprot_t prot,
|
gfp_t gfp_mask, pgprot_t prot,
|
||||||
int node, void *caller)
|
int node, const void *caller)
|
||||||
{
|
{
|
||||||
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
|
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
|
||||||
gfp_mask, prot, node, caller);
|
gfp_mask, prot, node, caller);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче