dma-debug: teach add_dma_entry() about DMA_ATTR_SKIP_CPU_SYNC

Mapping something twice should be possible as long as,
DMA_ATTR_SKIP_CPU_SYNC is passed to the strictly speaking second relevant
mapping operation (that attempts to map the same thing). So, don't issue a
warning if the specified condition is met in add_dma_entry().

Signed-off-by: Hamza Mahfooz <someguy@effective-light.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Hamza Mahfooz 2021-10-12 01:54:03 -04:00 коммит произвёл Christoph Hellwig
Родитель 293d92cbbd
Коммит c2bbf9d1e9
3 изменённых файлов: 36 добавлений и 24 удалений

Просмотреть файл

@ -552,7 +552,7 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
* Wrapper function for adding an entry to the hash.
* This function takes care of locking itself.
*/
static void add_dma_entry(struct dma_debug_entry *entry)
static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
{
struct hash_bucket *bucket;
unsigned long flags;
@ -566,7 +566,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
if (rc == -ENOMEM) {
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
} else if (rc == -EEXIST) {
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
err_printk(entry->dev, entry,
"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
@ -1191,7 +1191,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
EXPORT_SYMBOL(debug_dma_map_single);
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
size_t size, int direction, dma_addr_t dma_addr)
size_t size, int direction, dma_addr_t dma_addr,
unsigned long attrs)
{
struct dma_debug_entry *entry;
@ -1222,7 +1223,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
check_for_illegal_area(dev, addr, size);
}
add_dma_entry(entry);
add_dma_entry(entry, attrs);
}
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@ -1280,7 +1281,8 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
}
void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction)
int nents, int mapped_ents, int direction,
unsigned long attrs)
{
struct dma_debug_entry *entry;
struct scatterlist *s;
@ -1312,7 +1314,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
check_sg_segment(dev, s);
add_dma_entry(entry);
add_dma_entry(entry, attrs);
}
}
@ -1368,7 +1370,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
}
void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt)
dma_addr_t dma_addr, void *virt,
unsigned long attrs)
{
struct dma_debug_entry *entry;
@ -1398,7 +1401,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
else
entry->pfn = page_to_pfn(virt_to_page(virt));
add_dma_entry(entry);
add_dma_entry(entry, attrs);
}
void debug_dma_free_coherent(struct device *dev, size_t size,
@ -1429,7 +1432,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
}
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
int direction, dma_addr_t dma_addr)
int direction, dma_addr_t dma_addr,
unsigned long attrs)
{
struct dma_debug_entry *entry;
@ -1449,7 +1453,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED;
add_dma_entry(entry);
add_dma_entry(entry, attrs);
}
void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,

Просмотреть файл

@ -11,26 +11,30 @@
#ifdef CONFIG_DMA_API_DEBUG
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr);
int direction, dma_addr_t dma_addr,
unsigned long attrs);
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction);
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction);
int nents, int mapped_ents, int direction,
unsigned long attrs);
extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir);
extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt);
dma_addr_t dma_addr, void *virt,
unsigned long attrs);
extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr);
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction,
dma_addr_t dma_addr);
dma_addr_t dma_addr,
unsigned long attrs);
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction);
@ -53,7 +57,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
#else /* CONFIG_DMA_API_DEBUG */
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr)
int direction, dma_addr_t dma_addr,
unsigned long attrs)
{
}
@ -63,7 +68,8 @@ static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
}
static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction)
int nents, int mapped_ents, int direction,
unsigned long attrs)
{
}
@ -74,7 +80,8 @@ static inline void debug_dma_unmap_sg(struct device *dev,
}
static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt)
dma_addr_t dma_addr, void *virt,
unsigned long attrs)
{
}
@ -85,7 +92,8 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction,
dma_addr_t dma_addr)
dma_addr_t dma_addr,
unsigned long attrs)
{
}

Просмотреть файл

@ -156,7 +156,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr);
debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
return addr;
}
@ -195,7 +195,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
ents = ops->map_sg(dev, sg, nents, dir, attrs);
if (ents > 0)
debug_dma_map_sg(dev, sg, nents, ents, dir);
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO))
return -EIO;
@ -305,7 +305,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
return addr;
}
EXPORT_SYMBOL(dma_map_resource);
@ -510,7 +510,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
else
return NULL;
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
return cpu_addr;
}
EXPORT_SYMBOL(dma_alloc_attrs);
@ -566,7 +566,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
if (page)
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
return page;
}
EXPORT_SYMBOL_GPL(dma_alloc_pages);
@ -644,7 +644,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
if (sgt) {
sgt->nents = 1;
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir);
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
}
return sgt;
}