dma-mapping fixes for 5.6
- give command line cma= precedence over the CONFIG_ option (Nicolas Saenz Julienne) - always allow 32-bit DMA, even for weirdly placed ZONE_DMA - improve the debug printks when memory is not addressable, to help find problems with swiotlb initialization -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl5MU2MLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMITg/6A7iEBPKTR5ugKKHoYiKfg7xrslOHZHHdWh2qXKMf C+vjeZzIeIWWFWVvCYAfbueSTAPeyfE1qMTwHjBakH1DOIpl2TgtLmBqZT+hb4WJ Me4a48dNyX5Ngk5e6l4iNfkaa0P0SsbjRyZQyWpXCRDOAdudqmUOrgjA1MJPZrdS dkzB6RdcKUdUJZ/METQ0+i2knbznMcfHn0WsqPjD8A1OMvgslUg3ky4t1dpfCjC9 hfmwkaWQmfPbW1SKmkuuOXc4im2WI8TeVNJ5N1+qAr91AtEVELTSBToZkEP4FoFj iTCfwimt77qF3qeD7kIU6WGbFrnEcV9VTWW3YilMFgVUF6f0bW0HZBq4fQdI9HvS O/aZteZTz5ukp9tJ8gILN1CNuuayJtaRFMeZv+A2+lsI31ITC2GKYsoNAepdwtHq w1d4OrJciUy92h4VOrGtWFDIyr+tFrwqUjUR1WElz3tvPE4fkaLaK4lzEof7Z5B9 trzUHBmiiFnjAVpEIADEjRHXcNJQPBuWi07UT7ZuJ3bMa+neKUcsSwm3RZf46Kx+ yh45HsmjjHcBmHbbpTS3BGkog44cXB/+hRxtntBOwsvAaP4Ip8SHeyDrMH1Ay6xr 4xZmt9c3kQySgrywjIP//WmQ601HMjPmddkrJj8kZNNE7HxQYQlIyyf9IpEeSxVK Seg= =NfNc -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.6' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: - give command line cma= precedence over the CONFIG_ option (Nicolas Saenz Julienne) - always allow 32-bit DMA, even for weirdly placed ZONE_DMA - improve the debug printks when memory is not addressable, to help find problems with swiotlb initialization * tag 'dma-mapping-5.6' of git://git.infradead.org/users/hch/dma-mapping: dma-direct: improve DMA mask overflow reporting dma-direct: improve swiotlb error reporting dma-direct: relax addressability checks in dma_direct_supported dma-contiguous: CMA: give precedence to cmdline
This commit is contained in:
Коммит
0a44cac810
|
@ -64,6 +64,9 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
|||
size_t size, enum dma_data_direction dir,
|
||||
enum dma_sync_target target);
|
||||
|
||||
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
extern enum swiotlb_force swiotlb_force;
|
||||
extern phys_addr_t io_tlb_start, io_tlb_end;
|
||||
|
@ -73,8 +76,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
|
|||
return paddr >= io_tlb_start && paddr < io_tlb_end;
|
||||
}
|
||||
|
||||
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void __init swiotlb_exit(void);
|
||||
unsigned int swiotlb_max_segment(void);
|
||||
size_t swiotlb_max_mapping_size(struct device *dev);
|
||||
|
@ -85,12 +86,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
|
|||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
|
||||
dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void swiotlb_exit(void)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -302,9 +302,16 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
|||
phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
||||
phys_addr_t mask = align - 1;
|
||||
unsigned long node = rmem->fdt_node;
|
||||
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
|
||||
struct cma *cma;
|
||||
int err;
|
||||
|
||||
if (size_cmdline != -1 && default_cma) {
|
||||
pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
|
||||
rmem->name);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
|
||||
of_get_flat_dt_prop(node, "no-map", NULL))
|
||||
return -EINVAL;
|
||||
|
@ -322,7 +329,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
|||
/* Architecture specific contiguous memory fixup. */
|
||||
dma_contiguous_early_fixup(rmem->base, rmem->size);
|
||||
|
||||
if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
|
||||
if (default_cma)
|
||||
dma_contiguous_set_default(cma);
|
||||
|
||||
rmem->ops = &rmem_cma_ops;
|
||||
|
|
|
@ -23,18 +23,6 @@
|
|||
*/
|
||||
unsigned int zone_dma_bits __ro_after_init = 24;
|
||||
|
||||
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask) {
|
||||
dev_err_once(dev, "DMA map on device without dma_mask\n");
|
||||
} else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_limit) {
|
||||
dev_err_once(dev,
|
||||
"overflow %pad+%zu of DMA mask %llx bus limit %llx\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
|
||||
phys_addr_t phys)
|
||||
{
|
||||
|
@ -357,13 +345,6 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
|||
EXPORT_SYMBOL(dma_direct_unmap_sg);
|
||||
#endif
|
||||
|
||||
static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
return swiotlb_force != SWIOTLB_FORCE &&
|
||||
dma_capable(dev, dma_addr, size, true);
|
||||
}
|
||||
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
|
@ -371,9 +352,16 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
|||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
||||
|
||||
if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
|
||||
!swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
|
||||
report_addr(dev, dma_addr, size);
|
||||
if (unlikely(swiotlb_force == SWIOTLB_FORCE))
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
|
||||
if (swiotlb_force != SWIOTLB_NO_FORCE)
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
dev_WARN_ONCE(dev, 1,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
|
@ -411,7 +399,10 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
|||
dma_addr_t dma_addr = paddr;
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
|
||||
report_addr(dev, dma_addr, size);
|
||||
dev_err_once(dev,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
WARN_ON_ONCE(1);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
|
@ -472,28 +463,26 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* Because 32-bit DMA masks are so common we expect every architecture to be
|
||||
* able to satisfy them - either by not supporting more physical memory, or by
|
||||
* providing a ZONE_DMA32. If neither is the case, the architecture needs to
|
||||
* use an IOMMU instead of the direct mapping.
|
||||
*/
|
||||
int dma_direct_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
u64 min_mask;
|
||||
u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
||||
min_mask = DMA_BIT_MASK(zone_dma_bits);
|
||||
else
|
||||
min_mask = DMA_BIT_MASK(32);
|
||||
|
||||
min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
|
||||
/*
|
||||
* Because 32-bit DMA masks are so common we expect every architecture
|
||||
* to be able to satisfy them - either by not supporting more physical
|
||||
* memory, or by providing a ZONE_DMA32. If neither is the case, the
|
||||
* architecture needs to use an IOMMU instead of the direct mapping.
|
||||
*/
|
||||
if (mask >= DMA_BIT_MASK(32))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* This check needs to be against the actual bit mask value, so
|
||||
* use __phys_to_dma() here so that the SME encryption mask isn't
|
||||
* part of the check.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
||||
min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
|
||||
return mask >= __phys_to_dma(dev, min_mask);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -656,35 +657,38 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
|
|||
}
|
||||
|
||||
/*
|
||||
* Create a swiotlb mapping for the buffer at @phys, and in case of DMAing
|
||||
* Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
|
||||
* to the device copy the data into it as well.
|
||||
*/
|
||||
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
trace_swiotlb_bounced(dev, *dma_addr, size, swiotlb_force);
|
||||
phys_addr_t swiotlb_addr;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
|
||||
dev_warn_ratelimited(dev,
|
||||
"Cannot do DMA to address %pa\n", phys);
|
||||
return false;
|
||||
}
|
||||
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
|
||||
swiotlb_force);
|
||||
|
||||
/* Oh well, have to allocate and map a bounce buffer. */
|
||||
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
|
||||
*phys, size, size, dir, attrs);
|
||||
if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
|
||||
return false;
|
||||
swiotlb_addr = swiotlb_tbl_map_single(dev,
|
||||
__phys_to_dma(dev, io_tlb_start),
|
||||
paddr, size, size, dir, attrs);
|
||||
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
/* Ensure that the address returned is DMA'ble */
|
||||
*dma_addr = __phys_to_dma(dev, *phys);
|
||||
if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
|
||||
swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
|
||||
dma_addr = __phys_to_dma(dev, swiotlb_addr);
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
|
||||
swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
|
||||
attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||
return false;
|
||||
dev_WARN_ONCE(dev, 1,
|
||||
"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return true;
|
||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(swiotlb_addr, size, dir);
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
size_t swiotlb_max_mapping_size(struct device *dev)
|
||||
|
|
Загрузка…
Ссылка в новой задаче