Merge branch 'stable/for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb
Pull swiotlb updates from Konrad Rzeszutek Wilk: - minor fixes (rate limiting), remove certain functions - support for DMA_ATTR_SKIP_CPU_SYNC which is an optimization in the DMA API * 'stable/for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb: swiotlb: Minor fix-ups for DMA_ATTR_SKIP_CPU_SYNC support swiotlb: Add support for DMA_ATTR_SKIP_CPU_SYNC swiotlb-xen: Enforce return of DMA_ERROR_CODE in mapping function swiotlb: Drop unused functions swiotlb_map_sg and swiotlb_unmap_sg swiotlb: Rate-limit printing when running out of SW-IOMMU space
This commit is contained in:
Коммит
b5cab0da75
|
@ -186,7 +186,6 @@ struct dma_map_ops *xen_dma_ops;
|
|||
EXPORT_SYMBOL(xen_dma_ops);
|
||||
|
||||
static struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
.mapping_error = xen_swiotlb_dma_mapping_error,
|
||||
.alloc = xen_swiotlb_alloc_coherent,
|
||||
.free = xen_swiotlb_free_coherent,
|
||||
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
int xen_swiotlb __read_mostly;
|
||||
|
||||
static struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
.mapping_error = xen_swiotlb_dma_mapping_error,
|
||||
.alloc = xen_swiotlb_alloc_coherent,
|
||||
.free = xen_swiotlb_free_coherent,
|
||||
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
||||
|
|
|
@ -405,7 +405,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|||
*/
|
||||
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
|
||||
|
||||
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
|
||||
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
|
||||
attrs);
|
||||
if (map == SWIOTLB_MAP_ERROR)
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
|
@ -416,11 +417,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|||
/*
|
||||
* Ensure that the address returned is DMA'ble
|
||||
*/
|
||||
if (!dma_capable(dev, dev_addr, size)) {
|
||||
swiotlb_tbl_unmap_single(dev, map, size, dir);
|
||||
dev_addr = 0;
|
||||
}
|
||||
return dev_addr;
|
||||
if (dma_capable(dev, dev_addr, size))
|
||||
return dev_addr;
|
||||
|
||||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
|
||||
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
|
||||
|
||||
|
@ -444,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
|
|||
|
||||
/* NOTE: We use dev_addr here, not paddr! */
|
||||
if (is_xen_swiotlb_buffer(dev_addr)) {
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -557,11 +560,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|||
start_dma_addr,
|
||||
sg_phys(sg),
|
||||
sg->length,
|
||||
dir);
|
||||
dir, attrs);
|
||||
if (map == SWIOTLB_MAP_ERROR) {
|
||||
dev_warn(hwdev, "swiotlb buffer is full\n");
|
||||
/* Don't panic here, we expect map_sg users
|
||||
to do proper error handling. */
|
||||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
|
||||
attrs);
|
||||
sg_dma_len(sgl) = 0;
|
||||
|
@ -648,13 +652,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
|
||||
|
||||
int
|
||||
xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
||||
{
|
||||
return !dma_addr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
|
||||
|
||||
/*
|
||||
* Return whether the given device DMA address mask can be supported
|
||||
* properly. For example, if your device can only drive the low 24-bits
|
||||
|
|
|
@ -44,11 +44,13 @@ enum dma_sync_target {
|
|||
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
||||
dma_addr_t tbl_dma_addr,
|
||||
phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
|
||||
phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
||||
phys_addr_t tlb_addr,
|
||||
|
@ -72,14 +74,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
extern int
|
||||
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
extern void
|
||||
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
extern int
|
||||
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir,
|
||||
|
|
|
@ -50,9 +50,6 @@ extern void
|
|||
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
|
||||
extern int
|
||||
xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
|
||||
|
||||
extern int
|
||||
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
||||
|
||||
|
|
|
@ -425,7 +425,8 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
|
|||
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
||||
dma_addr_t tbl_dma_addr,
|
||||
phys_addr_t orig_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
unsigned long flags;
|
||||
phys_addr_t tlb_addr;
|
||||
|
@ -526,7 +527,8 @@ found:
|
|||
*/
|
||||
for (i = 0; i < nslots; i++)
|
||||
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
|
||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
|
||||
|
||||
return tlb_addr;
|
||||
|
@ -539,18 +541,20 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
|
|||
|
||||
static phys_addr_t
|
||||
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
|
||||
|
||||
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
|
||||
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
|
||||
dir, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
|
||||
*/
|
||||
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||
|
@ -561,6 +565,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
|
|||
* First, sync the memory before unmapping the entry
|
||||
*/
|
||||
if (orig_addr != INVALID_PHYS_ADDR &&
|
||||
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
|
||||
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
|
||||
|
||||
|
@ -654,7 +659,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||
* GFP_DMA memory; fall back on map_single(), which
|
||||
* will grab memory from the lowest available address range.
|
||||
*/
|
||||
phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
|
||||
phys_addr_t paddr = map_single(hwdev, 0, size,
|
||||
DMA_FROM_DEVICE, 0);
|
||||
if (paddr == SWIOTLB_MAP_ERROR)
|
||||
goto err_warn;
|
||||
|
||||
|
@ -667,9 +673,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||
(unsigned long long)dma_mask,
|
||||
(unsigned long long)dev_addr);
|
||||
|
||||
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|
||||
/*
|
||||
* DMA_TO_DEVICE to avoid memcpy in unmap_single.
|
||||
* The DMA_ATTR_SKIP_CPU_SYNC is optional.
|
||||
*/
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr,
|
||||
size, DMA_TO_DEVICE);
|
||||
size, DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
goto err_warn;
|
||||
}
|
||||
}
|
||||
|
@ -698,8 +708,12 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|||
if (!is_swiotlb_buffer(paddr))
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
else
|
||||
/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
|
||||
/*
|
||||
* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
|
||||
* DMA_ATTR_SKIP_CPU_SYNC is optional.
|
||||
*/
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
EXPORT_SYMBOL(swiotlb_free_coherent);
|
||||
|
||||
|
@ -714,8 +728,8 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
|
|||
* When the mapping is small enough return a static buffer to limit
|
||||
* the damage, or panic when the transfer is too big.
|
||||
*/
|
||||
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
|
||||
"device %s\n", size, dev ? dev_name(dev) : "?");
|
||||
dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
|
||||
size);
|
||||
|
||||
if (size <= io_tlb_overflow || !do_panic)
|
||||
return;
|
||||
|
@ -755,7 +769,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|||
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
|
||||
|
||||
/* Oh well, have to allocate and map a bounce buffer. */
|
||||
map = map_single(dev, phys, size, dir);
|
||||
map = map_single(dev, phys, size, dir, attrs);
|
||||
if (map == SWIOTLB_MAP_ERROR) {
|
||||
swiotlb_full(dev, size, dir, 1);
|
||||
return phys_to_dma(dev, io_tlb_overflow_buffer);
|
||||
|
@ -764,12 +778,13 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|||
dev_addr = phys_to_dma(dev, map);
|
||||
|
||||
/* Ensure that the address returned is DMA'ble */
|
||||
if (!dma_capable(dev, dev_addr, size)) {
|
||||
swiotlb_tbl_unmap_single(dev, map, size, dir);
|
||||
return phys_to_dma(dev, io_tlb_overflow_buffer);
|
||||
}
|
||||
if (dma_capable(dev, dev_addr, size))
|
||||
return dev_addr;
|
||||
|
||||
return dev_addr;
|
||||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
|
||||
|
||||
return phys_to_dma(dev, io_tlb_overflow_buffer);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(swiotlb_map_page);
|
||||
|
||||
|
@ -782,14 +797,15 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
|
|||
* whatever the device wrote there.
|
||||
*/
|
||||
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
if (is_swiotlb_buffer(paddr)) {
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -809,7 +825,7 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
unmap_single(hwdev, dev_addr, size, dir);
|
||||
unmap_single(hwdev, dev_addr, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
|
||||
|
||||
|
@ -891,11 +907,12 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|||
if (swiotlb_force ||
|
||||
!dma_capable(hwdev, dev_addr, sg->length)) {
|
||||
phys_addr_t map = map_single(hwdev, sg_phys(sg),
|
||||
sg->length, dir);
|
||||
sg->length, dir, attrs);
|
||||
if (map == SWIOTLB_MAP_ERROR) {
|
||||
/* Don't panic here, we expect map_sg users
|
||||
to do proper error handling. */
|
||||
swiotlb_full(hwdev, sg->length, dir, 0);
|
||||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
|
||||
attrs);
|
||||
sg_dma_len(sgl) = 0;
|
||||
|
@ -910,14 +927,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|||
}
|
||||
EXPORT_SYMBOL(swiotlb_map_sg_attrs);
|
||||
|
||||
int
|
||||
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(swiotlb_map_sg);
|
||||
|
||||
/*
|
||||
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
|
||||
* concerning calls here are the same as for swiotlb_unmap_page() above.
|
||||
|
@ -933,19 +942,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
|
||||
|
||||
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
|
||||
attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
|
||||
|
||||
void
|
||||
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(swiotlb_unmap_sg);
|
||||
|
||||
/*
|
||||
* Make physical memory consistent for a set of streaming mode DMA translations
|
||||
* after a transfer.
|
||||
|
|
Загрузка…
Ссылка в новой задаче