Merge branch 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping into next
Pull CMA and DMA-mapping fixes from Marek Szyprowski: "A few fixes for dma-mapping and CMA subsystems" * 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: CMA: correct unlock target drivers/base/dma-contiguous.c: erratum of dev_get_cma_area arm: dma-mapping: add checking cma area initialized arm: dma-iommu: Clean up redundant variable cma: Remove potential deadlock situation
This commit is contained in:
Коммит
ff806d034e
|
@ -18,7 +18,6 @@ struct dma_iommu_mapping {
|
|||
unsigned int extensions;
|
||||
size_t bitmap_size; /* size of a single bitmap */
|
||||
size_t bits; /* per bitmap */
|
||||
unsigned int size; /* per bitmap */
|
||||
dma_addr_t base;
|
||||
|
||||
spinlock_t lock;
|
||||
|
|
|
@ -390,7 +390,7 @@ static int __init atomic_pool_init(void)
|
|||
if (!pages)
|
||||
goto no_pages;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA))
|
||||
if (dev_get_cma_area(NULL))
|
||||
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
|
||||
atomic_pool_init);
|
||||
else
|
||||
|
@ -701,7 +701,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||
else if (!(gfp & __GFP_WAIT))
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
else if (!IS_ENABLED(CONFIG_DMA_CMA))
|
||||
else if (!dev_get_cma_area(dev))
|
||||
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
||||
else
|
||||
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
|
||||
|
@ -790,7 +790,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
__dma_free_buffer(page, size);
|
||||
} else if (__free_from_pool(cpu_addr, size)) {
|
||||
return;
|
||||
} else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
|
||||
} else if (!dev_get_cma_area(dev)) {
|
||||
__dma_free_remap(cpu_addr, size);
|
||||
__dma_free_buffer(page, size);
|
||||
} else {
|
||||
|
@ -1074,6 +1074,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|||
unsigned int order = get_order(size);
|
||||
unsigned int align = 0;
|
||||
unsigned int count, start;
|
||||
size_t mapping_size = mapping->bits << PAGE_SHIFT;
|
||||
unsigned long flags;
|
||||
dma_addr_t iova;
|
||||
int i;
|
||||
|
@ -1119,7 +1120,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|||
}
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
|
||||
iova = mapping->base + (mapping->size * i);
|
||||
iova = mapping->base + (mapping_size * i);
|
||||
iova += start << PAGE_SHIFT;
|
||||
|
||||
return iova;
|
||||
|
@ -1129,6 +1130,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|||
dma_addr_t addr, size_t size)
|
||||
{
|
||||
unsigned int start, count;
|
||||
size_t mapping_size = mapping->bits << PAGE_SHIFT;
|
||||
unsigned long flags;
|
||||
dma_addr_t bitmap_base;
|
||||
u32 bitmap_index;
|
||||
|
@ -1136,14 +1138,14 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|||
if (!size)
|
||||
return;
|
||||
|
||||
bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
|
||||
bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
|
||||
BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
|
||||
|
||||
bitmap_base = mapping->base + mapping->size * bitmap_index;
|
||||
bitmap_base = mapping->base + mapping_size * bitmap_index;
|
||||
|
||||
start = (addr - bitmap_base) >> PAGE_SHIFT;
|
||||
|
||||
if (addr + size > bitmap_base + mapping->size) {
|
||||
if (addr + size > bitmap_base + mapping_size) {
|
||||
/*
|
||||
* The address range to be freed reaches into the iova
|
||||
* range of the next bitmap. This should not happen as
|
||||
|
@ -1964,7 +1966,6 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
|
|||
mapping->extensions = extensions;
|
||||
mapping->base = base;
|
||||
mapping->bits = BITS_PER_BYTE * bitmap_size;
|
||||
mapping->size = mapping->bits << PAGE_SHIFT;
|
||||
|
||||
spin_lock_init(&mapping->lock);
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ struct cma {
|
|||
unsigned long base_pfn;
|
||||
unsigned long count;
|
||||
unsigned long *bitmap;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct cma *dma_contiguous_default_area;
|
||||
|
@ -161,6 +162,7 @@ static int __init cma_activate_area(struct cma *cma)
|
|||
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
|
||||
} while (--i);
|
||||
|
||||
mutex_init(&cma->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -261,6 +263,13 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
|
||||
{
|
||||
mutex_lock(&cma->lock);
|
||||
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
|
||||
mutex_unlock(&cma->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_alloc_from_contiguous() - allocate pages from contiguous area
|
||||
* @dev: Pointer to device for which the allocation is performed.
|
||||
|
@ -269,7 +278,7 @@ err:
|
|||
*
|
||||
* This function allocates memory buffer for specified device. It uses
|
||||
* device specific contiguous memory area if available or the default
|
||||
* global one. Requires architecture specific get_dev_cma_area() helper
|
||||
* global one. Requires architecture specific dev_get_cma_area() helper
|
||||
* function.
|
||||
*/
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
||||
|
@ -294,30 +303,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
|||
|
||||
mask = (1 << align) - 1;
|
||||
|
||||
mutex_lock(&cma_mutex);
|
||||
|
||||
for (;;) {
|
||||
mutex_lock(&cma->lock);
|
||||
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
|
||||
start, count, mask);
|
||||
if (pageno >= cma->count)
|
||||
if (pageno >= cma->count) {
|
||||
mutex_unlock(&cma->lock);
|
||||
break;
|
||||
}
|
||||
bitmap_set(cma->bitmap, pageno, count);
|
||||
/*
|
||||
* It's safe to drop the lock here. We've marked this region for
|
||||
* our exclusive use. If the migration fails we will take the
|
||||
* lock again and unmark it.
|
||||
*/
|
||||
mutex_unlock(&cma->lock);
|
||||
|
||||
pfn = cma->base_pfn + pageno;
|
||||
mutex_lock(&cma_mutex);
|
||||
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
|
||||
mutex_unlock(&cma_mutex);
|
||||
if (ret == 0) {
|
||||
bitmap_set(cma->bitmap, pageno, count);
|
||||
page = pfn_to_page(pfn);
|
||||
break;
|
||||
} else if (ret != -EBUSY) {
|
||||
clear_cma_bitmap(cma, pfn, count);
|
||||
break;
|
||||
}
|
||||
clear_cma_bitmap(cma, pfn, count);
|
||||
pr_debug("%s(): memory range at %p is busy, retrying\n",
|
||||
__func__, pfn_to_page(pfn));
|
||||
/* try again with a bit different memory target */
|
||||
start = pageno + mask + 1;
|
||||
}
|
||||
|
||||
mutex_unlock(&cma_mutex);
|
||||
pr_debug("%s(): returned %p\n", __func__, page);
|
||||
return page;
|
||||
}
|
||||
|
@ -350,10 +370,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|||
|
||||
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
|
||||
|
||||
mutex_lock(&cma_mutex);
|
||||
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
|
||||
free_contig_range(pfn, count);
|
||||
mutex_unlock(&cma_mutex);
|
||||
clear_cma_bitmap(cma, pfn, count);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче